Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[firefly-linux-kernel-4.4.55.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
35
36 #include <trace/events/sched.h>
37
38 #include <asm/setup.h>
39
40 #include "trace_output.h"
41 #include "trace_stat.h"
42
43 #define FTRACE_WARN_ON(cond)                    \
44         ({                                      \
45                 int ___r = cond;                \
46                 if (WARN_ON(___r))              \
47                         ftrace_kill();          \
48                 ___r;                           \
49         })
50
51 #define FTRACE_WARN_ON_ONCE(cond)               \
52         ({                                      \
53                 int ___r = cond;                \
54                 if (WARN_ON_ONCE(___r))         \
55                         ftrace_kill();          \
56                 ___r;                           \
57         })
58
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
64
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66
67 #ifdef CONFIG_DYNAMIC_FTRACE
68 #define INIT_REGEX_LOCK(opsname)        \
69         .regex_lock     = __MUTEX_INITIALIZER(opsname.regex_lock),
70 #else
71 #define INIT_REGEX_LOCK(opsname)
72 #endif
73
74 static struct ftrace_ops ftrace_list_end __read_mostly = {
75         .func           = ftrace_stub,
76         .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
77 };
78
79 /* ftrace_enabled is a method to turn ftrace on or off */
80 int ftrace_enabled __read_mostly;
81 static int last_ftrace_enabled;
82
83 /* Quick disabling of function tracer. */
84 int function_trace_stop __read_mostly;
85
86 /* Current function tracing op */
87 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
88
89 /* List for set_ftrace_pid's pids. */
90 LIST_HEAD(ftrace_pids);
91 struct ftrace_pid {
92         struct list_head list;
93         struct pid *pid;
94 };
95
96 /*
97  * ftrace_disabled is set when an anomaly is discovered.
98  * ftrace_disabled is much stronger than ftrace_enabled.
99  */
100 static int ftrace_disabled __read_mostly;
101
102 static DEFINE_MUTEX(ftrace_lock);
103
104 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
105 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
106 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
107 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
108 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
109 static struct ftrace_ops global_ops;
110 static struct ftrace_ops control_ops;
111
112 #if ARCH_SUPPORTS_FTRACE_OPS
113 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
114                                  struct ftrace_ops *op, struct pt_regs *regs);
115 #else
116 /* See comment below, where ftrace_ops_list_func is defined */
117 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
118 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
119 #endif
120
121 /*
122  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
123  * can use rcu_dereference_raw_notrace() is that elements removed from this list
124  * are simply leaked, so there is no need to interact with a grace-period
125  * mechanism.  The rcu_dereference_raw_notrace() calls are needed to handle
126  * concurrent insertions into the ftrace_global_list.
127  *
128  * Silly Alpha and silly pointer-speculation compiler optimizations!
129  */
130 #define do_for_each_ftrace_op(op, list)                 \
131         op = rcu_dereference_raw_notrace(list);                 \
132         do
133
134 /*
135  * Optimized for just a single item in the list (as that is the normal case).
136  */
137 #define while_for_each_ftrace_op(op)                            \
138         while (likely(op = rcu_dereference_raw_notrace((op)->next)) &&  \
139                unlikely((op) != &ftrace_list_end))
140
141 static inline void ftrace_ops_init(struct ftrace_ops *ops)
142 {
143 #ifdef CONFIG_DYNAMIC_FTRACE
144         if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
145                 mutex_init(&ops->regex_lock);
146                 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
147         }
148 #endif
149 }
150
151 /**
152  * ftrace_nr_registered_ops - return number of ops registered
153  *
154  * Returns the number of ftrace_ops registered and tracing functions
155  */
156 int ftrace_nr_registered_ops(void)
157 {
158         struct ftrace_ops *ops;
159         int cnt = 0;
160
161         mutex_lock(&ftrace_lock);
162
163         for (ops = ftrace_ops_list;
164              ops != &ftrace_list_end; ops = ops->next)
165                 cnt++;
166
167         mutex_unlock(&ftrace_lock);
168
169         return cnt;
170 }
171
172 static void
173 ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
174                         struct ftrace_ops *op, struct pt_regs *regs)
175 {
176         int bit;
177
178         bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
179         if (bit < 0)
180                 return;
181
182         do_for_each_ftrace_op(op, ftrace_global_list) {
183                 op->func(ip, parent_ip, op, regs);
184         } while_for_each_ftrace_op(op);
185
186         trace_clear_recursion(bit);
187 }
188
189 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
190                             struct ftrace_ops *op, struct pt_regs *regs)
191 {
192         if (!test_tsk_trace_trace(current))
193                 return;
194
195         ftrace_pid_function(ip, parent_ip, op, regs);
196 }
197
198 static void set_ftrace_pid_function(ftrace_func_t func)
199 {
200         /* do not set ftrace_pid_function to itself! */
201         if (func != ftrace_pid_func)
202                 ftrace_pid_function = func;
203 }
204
205 /**
206  * clear_ftrace_function - reset the ftrace function
207  *
208  * This NULLs the ftrace function and in essence stops
209  * tracing.  There may be lag
210  */
211 void clear_ftrace_function(void)
212 {
213         ftrace_trace_function = ftrace_stub;
214         ftrace_pid_function = ftrace_stub;
215 }
216
217 static void control_ops_disable_all(struct ftrace_ops *ops)
218 {
219         int cpu;
220
221         for_each_possible_cpu(cpu)
222                 *per_cpu_ptr(ops->disabled, cpu) = 1;
223 }
224
225 static int control_ops_alloc(struct ftrace_ops *ops)
226 {
227         int __percpu *disabled;
228
229         disabled = alloc_percpu(int);
230         if (!disabled)
231                 return -ENOMEM;
232
233         ops->disabled = disabled;
234         control_ops_disable_all(ops);
235         return 0;
236 }
237
238 static void control_ops_free(struct ftrace_ops *ops)
239 {
240         free_percpu(ops->disabled);
241 }
242
243 static void update_global_ops(void)
244 {
245         ftrace_func_t func;
246
247         /*
248          * If there's only one function registered, then call that
249          * function directly. Otherwise, we need to iterate over the
250          * registered callers.
251          */
252         if (ftrace_global_list == &ftrace_list_end ||
253             ftrace_global_list->next == &ftrace_list_end) {
254                 func = ftrace_global_list->func;
255                 /*
256                  * As we are calling the function directly.
257                  * If it does not have recursion protection,
258                  * the function_trace_op needs to be updated
259                  * accordingly.
260                  */
261                 if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)
262                         global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
263                 else
264                         global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
265         } else {
266                 func = ftrace_global_list_func;
267                 /* The list has its own recursion protection. */
268                 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
269         }
270
271
272         /* If we filter on pids, update to use the pid function */
273         if (!list_empty(&ftrace_pids)) {
274                 set_ftrace_pid_function(func);
275                 func = ftrace_pid_func;
276         }
277
278         global_ops.func = func;
279 }
280
281 static void update_ftrace_function(void)
282 {
283         ftrace_func_t func;
284
285         update_global_ops();
286
287         /*
288          * If we are at the end of the list and this ops is
289          * recursion safe and not dynamic and the arch supports passing ops,
290          * then have the mcount trampoline call the function directly.
291          */
292         if (ftrace_ops_list == &ftrace_list_end ||
293             (ftrace_ops_list->next == &ftrace_list_end &&
294              !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
295              (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
296              !FTRACE_FORCE_LIST_FUNC)) {
297                 /* Set the ftrace_ops that the arch callback uses */
298                 if (ftrace_ops_list == &global_ops)
299                         function_trace_op = ftrace_global_list;
300                 else
301                         function_trace_op = ftrace_ops_list;
302                 func = ftrace_ops_list->func;
303         } else {
304                 /* Just use the default ftrace_ops */
305                 function_trace_op = &ftrace_list_end;
306                 func = ftrace_ops_list_func;
307         }
308
309         ftrace_trace_function = func;
310 }
311
312 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
313 {
314         ops->next = *list;
315         /*
316          * We are entering ops into the list but another
317          * CPU might be walking that list. We need to make sure
318          * the ops->next pointer is valid before another CPU sees
319          * the ops pointer included into the list.
320          */
321         rcu_assign_pointer(*list, ops);
322 }
323
324 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
325 {
326         struct ftrace_ops **p;
327
328         /*
329          * If we are removing the last function, then simply point
330          * to the ftrace_stub.
331          */
332         if (*list == ops && ops->next == &ftrace_list_end) {
333                 *list = &ftrace_list_end;
334                 return 0;
335         }
336
337         for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
338                 if (*p == ops)
339                         break;
340
341         if (*p != ops)
342                 return -1;
343
344         *p = (*p)->next;
345         return 0;
346 }
347
348 static void add_ftrace_list_ops(struct ftrace_ops **list,
349                                 struct ftrace_ops *main_ops,
350                                 struct ftrace_ops *ops)
351 {
352         int first = *list == &ftrace_list_end;
353         add_ftrace_ops(list, ops);
354         if (first)
355                 add_ftrace_ops(&ftrace_ops_list, main_ops);
356 }
357
358 static int remove_ftrace_list_ops(struct ftrace_ops **list,
359                                   struct ftrace_ops *main_ops,
360                                   struct ftrace_ops *ops)
361 {
362         int ret = remove_ftrace_ops(list, ops);
363         if (!ret && *list == &ftrace_list_end)
364                 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
365         return ret;
366 }
367
368 static int __register_ftrace_function(struct ftrace_ops *ops)
369 {
370         if (unlikely(ftrace_disabled))
371                 return -ENODEV;
372
373         if (FTRACE_WARN_ON(ops == &global_ops))
374                 return -EINVAL;
375
376         if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
377                 return -EBUSY;
378
379         /* We don't support both control and global flags set. */
380         if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
381                 return -EINVAL;
382
383 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
384         /*
385          * If the ftrace_ops specifies SAVE_REGS, then it only can be used
386          * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
387          * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
388          */
389         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
390             !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
391                 return -EINVAL;
392
393         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
394                 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
395 #endif
396
397         if (!core_kernel_data((unsigned long)ops))
398                 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
399
400         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
401                 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
402                 ops->flags |= FTRACE_OPS_FL_ENABLED;
403         } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
404                 if (control_ops_alloc(ops))
405                         return -ENOMEM;
406                 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
407         } else
408                 add_ftrace_ops(&ftrace_ops_list, ops);
409
410         if (ftrace_enabled)
411                 update_ftrace_function();
412
413         return 0;
414 }
415
416 static int __unregister_ftrace_function(struct ftrace_ops *ops)
417 {
418         int ret;
419
420         if (ftrace_disabled)
421                 return -ENODEV;
422
423         if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
424                 return -EBUSY;
425
426         if (FTRACE_WARN_ON(ops == &global_ops))
427                 return -EINVAL;
428
429         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
430                 ret = remove_ftrace_list_ops(&ftrace_global_list,
431                                              &global_ops, ops);
432                 if (!ret)
433                         ops->flags &= ~FTRACE_OPS_FL_ENABLED;
434         } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
435                 ret = remove_ftrace_list_ops(&ftrace_control_list,
436                                              &control_ops, ops);
437                 if (!ret) {
438                         /*
439                          * The ftrace_ops is now removed from the list,
440                          * so there'll be no new users. We must ensure
441                          * all current users are done before we free
442                          * the control data.
443                          */
444                         synchronize_sched();
445                         control_ops_free(ops);
446                 }
447         } else
448                 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
449
450         if (ret < 0)
451                 return ret;
452
453         if (ftrace_enabled)
454                 update_ftrace_function();
455
456         /*
457          * Dynamic ops may be freed, we must make sure that all
458          * callers are done before leaving this function.
459          */
460         if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
461                 synchronize_sched();
462
463         return 0;
464 }
465
466 static void ftrace_update_pid_func(void)
467 {
468         /* Only do something if we are tracing something */
469         if (ftrace_trace_function == ftrace_stub)
470                 return;
471
472         update_ftrace_function();
473 }
474
475 #ifdef CONFIG_FUNCTION_PROFILER
476 struct ftrace_profile {
477         struct hlist_node               node;
478         unsigned long                   ip;
479         unsigned long                   counter;
480 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
481         unsigned long long              time;
482         unsigned long long              time_squared;
483 #endif
484 };
485
486 struct ftrace_profile_page {
487         struct ftrace_profile_page      *next;
488         unsigned long                   index;
489         struct ftrace_profile           records[];
490 };
491
492 struct ftrace_profile_stat {
493         atomic_t                        disabled;
494         struct hlist_head               *hash;
495         struct ftrace_profile_page      *pages;
496         struct ftrace_profile_page      *start;
497         struct tracer_stat              stat;
498 };
499
500 #define PROFILE_RECORDS_SIZE                                            \
501         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
502
503 #define PROFILES_PER_PAGE                                       \
504         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
505
506 static int ftrace_profile_enabled __read_mostly;
507
508 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
509 static DEFINE_MUTEX(ftrace_profile_lock);
510
511 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
512
513 #define FTRACE_PROFILE_HASH_BITS 10
514 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
515
516 static void *
517 function_stat_next(void *v, int idx)
518 {
519         struct ftrace_profile *rec = v;
520         struct ftrace_profile_page *pg;
521
522         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
523
524  again:
525         if (idx != 0)
526                 rec++;
527
528         if ((void *)rec >= (void *)&pg->records[pg->index]) {
529                 pg = pg->next;
530                 if (!pg)
531                         return NULL;
532                 rec = &pg->records[0];
533                 if (!rec->counter)
534                         goto again;
535         }
536
537         return rec;
538 }
539
540 static void *function_stat_start(struct tracer_stat *trace)
541 {
542         struct ftrace_profile_stat *stat =
543                 container_of(trace, struct ftrace_profile_stat, stat);
544
545         if (!stat || !stat->start)
546                 return NULL;
547
548         return function_stat_next(&stat->start->records[0], 0);
549 }
550
551 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
552 /* function graph compares on total time */
553 static int function_stat_cmp(void *p1, void *p2)
554 {
555         struct ftrace_profile *a = p1;
556         struct ftrace_profile *b = p2;
557
558         if (a->time < b->time)
559                 return -1;
560         if (a->time > b->time)
561                 return 1;
562         else
563                 return 0;
564 }
565 #else
566 /* not function graph compares against hits */
567 static int function_stat_cmp(void *p1, void *p2)
568 {
569         struct ftrace_profile *a = p1;
570         struct ftrace_profile *b = p2;
571
572         if (a->counter < b->counter)
573                 return -1;
574         if (a->counter > b->counter)
575                 return 1;
576         else
577                 return 0;
578 }
579 #endif
580
581 static int function_stat_headers(struct seq_file *m)
582 {
583 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
584         seq_printf(m, "  Function                               "
585                    "Hit    Time            Avg             s^2\n"
586                       "  --------                               "
587                    "---    ----            ---             ---\n");
588 #else
589         seq_printf(m, "  Function                               Hit\n"
590                       "  --------                               ---\n");
591 #endif
592         return 0;
593 }
594
595 static int function_stat_show(struct seq_file *m, void *v)
596 {
597         struct ftrace_profile *rec = v;
598         char str[KSYM_SYMBOL_LEN];
599         int ret = 0;
600 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
601         static struct trace_seq s;
602         unsigned long long avg;
603         unsigned long long stddev;
604 #endif
605         mutex_lock(&ftrace_profile_lock);
606
607         /* we raced with function_profile_reset() */
608         if (unlikely(rec->counter == 0)) {
609                 ret = -EBUSY;
610                 goto out;
611         }
612
613         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
614         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
615
616 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
617         seq_printf(m, "    ");
618         avg = rec->time;
619         do_div(avg, rec->counter);
620
621         /* Sample standard deviation (s^2) */
622         if (rec->counter <= 1)
623                 stddev = 0;
624         else {
625                 stddev = rec->time_squared - rec->counter * avg * avg;
626                 /*
627                  * Divide only 1000 for ns^2 -> us^2 conversion.
628                  * trace_print_graph_duration will divide 1000 again.
629                  */
630                 do_div(stddev, (rec->counter - 1) * 1000);
631         }
632
633         trace_seq_init(&s);
634         trace_print_graph_duration(rec->time, &s);
635         trace_seq_puts(&s, "    ");
636         trace_print_graph_duration(avg, &s);
637         trace_seq_puts(&s, "    ");
638         trace_print_graph_duration(stddev, &s);
639         trace_print_seq(m, &s);
640 #endif
641         seq_putc(m, '\n');
642 out:
643         mutex_unlock(&ftrace_profile_lock);
644
645         return ret;
646 }
647
648 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
649 {
650         struct ftrace_profile_page *pg;
651
652         pg = stat->pages = stat->start;
653
654         while (pg) {
655                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
656                 pg->index = 0;
657                 pg = pg->next;
658         }
659
660         memset(stat->hash, 0,
661                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
662 }
663
664 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
665 {
666         struct ftrace_profile_page *pg;
667         int functions;
668         int pages;
669         int i;
670
671         /* If we already allocated, do nothing */
672         if (stat->pages)
673                 return 0;
674
675         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
676         if (!stat->pages)
677                 return -ENOMEM;
678
679 #ifdef CONFIG_DYNAMIC_FTRACE
680         functions = ftrace_update_tot_cnt;
681 #else
682         /*
683          * We do not know the number of functions that exist because
684          * dynamic tracing is what counts them. With past experience
685          * we have around 20K functions. That should be more than enough.
686          * It is highly unlikely we will execute every function in
687          * the kernel.
688          */
689         functions = 20000;
690 #endif
691
692         pg = stat->start = stat->pages;
693
694         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
695
696         for (i = 1; i < pages; i++) {
697                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
698                 if (!pg->next)
699                         goto out_free;
700                 pg = pg->next;
701         }
702
703         return 0;
704
705  out_free:
706         pg = stat->start;
707         while (pg) {
708                 unsigned long tmp = (unsigned long)pg;
709
710                 pg = pg->next;
711                 free_page(tmp);
712         }
713
714         stat->pages = NULL;
715         stat->start = NULL;
716
717         return -ENOMEM;
718 }
719
720 static int ftrace_profile_init_cpu(int cpu)
721 {
722         struct ftrace_profile_stat *stat;
723         int size;
724
725         stat = &per_cpu(ftrace_profile_stats, cpu);
726
727         if (stat->hash) {
728                 /* If the profile is already created, simply reset it */
729                 ftrace_profile_reset(stat);
730                 return 0;
731         }
732
733         /*
734          * We are profiling all functions, but usually only a few thousand
735          * functions are hit. We'll make a hash of 1024 items.
736          */
737         size = FTRACE_PROFILE_HASH_SIZE;
738
739         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
740
741         if (!stat->hash)
742                 return -ENOMEM;
743
744         /* Preallocate the function profiling pages */
745         if (ftrace_profile_pages_init(stat) < 0) {
746                 kfree(stat->hash);
747                 stat->hash = NULL;
748                 return -ENOMEM;
749         }
750
751         return 0;
752 }
753
754 static int ftrace_profile_init(void)
755 {
756         int cpu;
757         int ret = 0;
758
759         for_each_online_cpu(cpu) {
760                 ret = ftrace_profile_init_cpu(cpu);
761                 if (ret)
762                         break;
763         }
764
765         return ret;
766 }
767
768 /* interrupts must be disabled */
769 static struct ftrace_profile *
770 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
771 {
772         struct ftrace_profile *rec;
773         struct hlist_head *hhd;
774         unsigned long key;
775
776         key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
777         hhd = &stat->hash[key];
778
779         if (hlist_empty(hhd))
780                 return NULL;
781
782         hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
783                 if (rec->ip == ip)
784                         return rec;
785         }
786
787         return NULL;
788 }
789
790 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
791                                struct ftrace_profile *rec)
792 {
793         unsigned long key;
794
795         key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
796         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
797 }
798
799 /*
800  * The memory is already allocated, this simply finds a new record to use.
801  */
802 static struct ftrace_profile *
803 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
804 {
805         struct ftrace_profile *rec = NULL;
806
807         /* prevent recursion (from NMIs) */
808         if (atomic_inc_return(&stat->disabled) != 1)
809                 goto out;
810
811         /*
812          * Try to find the function again since an NMI
813          * could have added it
814          */
815         rec = ftrace_find_profiled_func(stat, ip);
816         if (rec)
817                 goto out;
818
819         if (stat->pages->index == PROFILES_PER_PAGE) {
820                 if (!stat->pages->next)
821                         goto out;
822                 stat->pages = stat->pages->next;
823         }
824
825         rec = &stat->pages->records[stat->pages->index++];
826         rec->ip = ip;
827         ftrace_add_profile(stat, rec);
828
829  out:
830         atomic_dec(&stat->disabled);
831
832         return rec;
833 }
834
835 static void
836 function_profile_call(unsigned long ip, unsigned long parent_ip,
837                       struct ftrace_ops *ops, struct pt_regs *regs)
838 {
839         struct ftrace_profile_stat *stat;
840         struct ftrace_profile *rec;
841         unsigned long flags;
842
843         if (!ftrace_profile_enabled)
844                 return;
845
846         local_irq_save(flags);
847
848         stat = &__get_cpu_var(ftrace_profile_stats);
849         if (!stat->hash || !ftrace_profile_enabled)
850                 goto out;
851
852         rec = ftrace_find_profiled_func(stat, ip);
853         if (!rec) {
854                 rec = ftrace_profile_alloc(stat, ip);
855                 if (!rec)
856                         goto out;
857         }
858
859         rec->counter++;
860  out:
861         local_irq_restore(flags);
862 }
863
864 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
865 static int profile_graph_entry(struct ftrace_graph_ent *trace)
866 {
867         function_profile_call(trace->func, 0, NULL, NULL);
868         return 1;
869 }
870
871 static void profile_graph_return(struct ftrace_graph_ret *trace)
872 {
873         struct ftrace_profile_stat *stat;
874         unsigned long long calltime;
875         struct ftrace_profile *rec;
876         unsigned long flags;
877
878         local_irq_save(flags);
879         stat = &__get_cpu_var(ftrace_profile_stats);
880         if (!stat->hash || !ftrace_profile_enabled)
881                 goto out;
882
883         /* If the calltime was zero'd ignore it */
884         if (!trace->calltime)
885                 goto out;
886
887         calltime = trace->rettime - trace->calltime;
888
889         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
890                 int index;
891
892                 index = trace->depth;
893
894                 /* Append this call time to the parent time to subtract */
895                 if (index)
896                         current->ret_stack[index - 1].subtime += calltime;
897
898                 if (current->ret_stack[index].subtime < calltime)
899                         calltime -= current->ret_stack[index].subtime;
900                 else
901                         calltime = 0;
902         }
903
904         rec = ftrace_find_profiled_func(stat, trace->func);
905         if (rec) {
906                 rec->time += calltime;
907                 rec->time_squared += calltime * calltime;
908         }
909
910  out:
911         local_irq_restore(flags);
912 }
913
914 static int register_ftrace_profiler(void)
915 {
916         return register_ftrace_graph(&profile_graph_return,
917                                      &profile_graph_entry);
918 }
919
920 static void unregister_ftrace_profiler(void)
921 {
922         unregister_ftrace_graph();
923 }
924 #else
925 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
926         .func           = function_profile_call,
927         .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
928         INIT_REGEX_LOCK(ftrace_profile_ops)
929 };
930
931 static int register_ftrace_profiler(void)
932 {
933         return register_ftrace_function(&ftrace_profile_ops);
934 }
935
936 static void unregister_ftrace_profiler(void)
937 {
938         unregister_ftrace_function(&ftrace_profile_ops);
939 }
940 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
941
942 static ssize_t
943 ftrace_profile_write(struct file *filp, const char __user *ubuf,
944                      size_t cnt, loff_t *ppos)
945 {
946         unsigned long val;
947         int ret;
948
949         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
950         if (ret)
951                 return ret;
952
953         val = !!val;
954
955         mutex_lock(&ftrace_profile_lock);
956         if (ftrace_profile_enabled ^ val) {
957                 if (val) {
958                         ret = ftrace_profile_init();
959                         if (ret < 0) {
960                                 cnt = ret;
961                                 goto out;
962                         }
963
964                         ret = register_ftrace_profiler();
965                         if (ret < 0) {
966                                 cnt = ret;
967                                 goto out;
968                         }
969                         ftrace_profile_enabled = 1;
970                 } else {
971                         ftrace_profile_enabled = 0;
972                         /*
973                          * unregister_ftrace_profiler calls stop_machine
974                          * so this acts like an synchronize_sched.
975                          */
976                         unregister_ftrace_profiler();
977                 }
978         }
979  out:
980         mutex_unlock(&ftrace_profile_lock);
981
982         *ppos += cnt;
983
984         return cnt;
985 }
986
987 static ssize_t
988 ftrace_profile_read(struct file *filp, char __user *ubuf,
989                      size_t cnt, loff_t *ppos)
990 {
991         char buf[64];           /* big enough to hold a number */
992         int r;
993
994         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
995         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
996 }
997
998 static const struct file_operations ftrace_profile_fops = {
999         .open           = tracing_open_generic,
1000         .read           = ftrace_profile_read,
1001         .write          = ftrace_profile_write,
1002         .llseek         = default_llseek,
1003 };
1004
1005 /* used to initialize the real stat files */
1006 static struct tracer_stat function_stats __initdata = {
1007         .name           = "functions",
1008         .stat_start     = function_stat_start,
1009         .stat_next      = function_stat_next,
1010         .stat_cmp       = function_stat_cmp,
1011         .stat_headers   = function_stat_headers,
1012         .stat_show      = function_stat_show
1013 };
1014
1015 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1016 {
1017         struct ftrace_profile_stat *stat;
1018         struct dentry *entry;
1019         char *name;
1020         int ret;
1021         int cpu;
1022
1023         for_each_possible_cpu(cpu) {
1024                 stat = &per_cpu(ftrace_profile_stats, cpu);
1025
1026                 /* allocate enough for function name + cpu number */
1027                 name = kmalloc(32, GFP_KERNEL);
1028                 if (!name) {
1029                         /*
1030                          * The files created are permanent, if something happens
1031                          * we still do not free memory.
1032                          */
1033                         WARN(1,
1034                              "Could not allocate stat file for cpu %d\n",
1035                              cpu);
1036                         return;
1037                 }
1038                 stat->stat = function_stats;
1039                 snprintf(name, 32, "function%d", cpu);
1040                 stat->stat.name = name;
1041                 ret = register_stat_tracer(&stat->stat);
1042                 if (ret) {
1043                         WARN(1,
1044                              "Could not register function stat for cpu %d\n",
1045                              cpu);
1046                         kfree(name);
1047                         return;
1048                 }
1049         }
1050
1051         entry = debugfs_create_file("function_profile_enabled", 0644,
1052                                     d_tracer, NULL, &ftrace_profile_fops);
1053         if (!entry)
1054                 pr_warning("Could not create debugfs "
1055                            "'function_profile_enabled' entry\n");
1056 }
1057
1058 #else /* CONFIG_FUNCTION_PROFILER */
1059 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1060 {
1061 }
1062 #endif /* CONFIG_FUNCTION_PROFILER */
1063
1064 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1065
1066 loff_t
1067 ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
1068 {
1069         loff_t ret;
1070
1071         if (file->f_mode & FMODE_READ)
1072                 ret = seq_lseek(file, offset, whence);
1073         else
1074                 file->f_pos = ret = 1;
1075
1076         return ret;
1077 }
1078
1079 #ifdef CONFIG_DYNAMIC_FTRACE
1080
1081 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1082 # error Dynamic ftrace depends on MCOUNT_RECORD
1083 #endif
1084
1085 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1086
1087 struct ftrace_func_probe {
1088         struct hlist_node       node;
1089         struct ftrace_probe_ops *ops;
1090         unsigned long           flags;
1091         unsigned long           ip;
1092         void                    *data;
1093         struct list_head        free_list;
1094 };
1095
1096 struct ftrace_func_entry {
1097         struct hlist_node hlist;
1098         unsigned long ip;
1099 };
1100
1101 struct ftrace_hash {
1102         unsigned long           size_bits;
1103         struct hlist_head       *buckets;
1104         unsigned long           count;
1105         struct rcu_head         rcu;
1106 };
1107
1108 /*
1109  * We make these constant because no one should touch them,
1110  * but they are used as the default "empty hash", to avoid allocating
1111  * it all the time. These are in a read only section such that if
1112  * anyone does try to modify it, it will cause an exception.
1113  */
1114 static const struct hlist_head empty_buckets[1];
1115 static const struct ftrace_hash empty_hash = {
1116         .buckets = (struct hlist_head *)empty_buckets,
1117 };
1118 #define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
1119
1120 static struct ftrace_ops global_ops = {
1121         .func                   = ftrace_stub,
1122         .notrace_hash           = EMPTY_HASH,
1123         .filter_hash            = EMPTY_HASH,
1124         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
1125         INIT_REGEX_LOCK(global_ops)
1126 };
1127
1128 struct ftrace_page {
1129         struct ftrace_page      *next;
1130         struct dyn_ftrace       *records;
1131         int                     index;
1132         int                     size;
1133 };
1134
1135 static struct ftrace_page *ftrace_new_pgs;
1136
1137 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1138 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1139
1140 /* estimate from running different kernels */
1141 #define NR_TO_INIT              10000
1142
1143 static struct ftrace_page       *ftrace_pages_start;
1144 static struct ftrace_page       *ftrace_pages;
1145
1146 static bool ftrace_hash_empty(struct ftrace_hash *hash)
1147 {
1148         return !hash || !hash->count;
1149 }
1150
1151 static struct ftrace_func_entry *
1152 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1153 {
1154         unsigned long key;
1155         struct ftrace_func_entry *entry;
1156         struct hlist_head *hhd;
1157
1158         if (ftrace_hash_empty(hash))
1159                 return NULL;
1160
1161         if (hash->size_bits > 0)
1162                 key = hash_long(ip, hash->size_bits);
1163         else
1164                 key = 0;
1165
1166         hhd = &hash->buckets[key];
1167
1168         hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1169                 if (entry->ip == ip)
1170                         return entry;
1171         }
1172         return NULL;
1173 }
1174
1175 static void __add_hash_entry(struct ftrace_hash *hash,
1176                              struct ftrace_func_entry *entry)
1177 {
1178         struct hlist_head *hhd;
1179         unsigned long key;
1180
1181         if (hash->size_bits)
1182                 key = hash_long(entry->ip, hash->size_bits);
1183         else
1184                 key = 0;
1185
1186         hhd = &hash->buckets[key];
1187         hlist_add_head(&entry->hlist, hhd);
1188         hash->count++;
1189 }
1190
1191 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1192 {
1193         struct ftrace_func_entry *entry;
1194
1195         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1196         if (!entry)
1197                 return -ENOMEM;
1198
1199         entry->ip = ip;
1200         __add_hash_entry(hash, entry);
1201
1202         return 0;
1203 }
1204
1205 static void
1206 free_hash_entry(struct ftrace_hash *hash,
1207                   struct ftrace_func_entry *entry)
1208 {
1209         hlist_del(&entry->hlist);
1210         kfree(entry);
1211         hash->count--;
1212 }
1213
1214 static void
1215 remove_hash_entry(struct ftrace_hash *hash,
1216                   struct ftrace_func_entry *entry)
1217 {
1218         hlist_del(&entry->hlist);
1219         hash->count--;
1220 }
1221
1222 static void ftrace_hash_clear(struct ftrace_hash *hash)
1223 {
1224         struct hlist_head *hhd;
1225         struct hlist_node *tn;
1226         struct ftrace_func_entry *entry;
1227         int size = 1 << hash->size_bits;
1228         int i;
1229
1230         if (!hash->count)
1231                 return;
1232
1233         for (i = 0; i < size; i++) {
1234                 hhd = &hash->buckets[i];
1235                 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1236                         free_hash_entry(hash, entry);
1237         }
1238         FTRACE_WARN_ON(hash->count);
1239 }
1240
1241 static void free_ftrace_hash(struct ftrace_hash *hash)
1242 {
1243         if (!hash || hash == EMPTY_HASH)
1244                 return;
1245         ftrace_hash_clear(hash);
1246         kfree(hash->buckets);
1247         kfree(hash);
1248 }
1249
1250 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1251 {
1252         struct ftrace_hash *hash;
1253
1254         hash = container_of(rcu, struct ftrace_hash, rcu);
1255         free_ftrace_hash(hash);
1256 }
1257
1258 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1259 {
1260         if (!hash || hash == EMPTY_HASH)
1261                 return;
1262         call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1263 }
1264
1265 void ftrace_free_filter(struct ftrace_ops *ops)
1266 {
1267         ftrace_ops_init(ops);
1268         free_ftrace_hash(ops->filter_hash);
1269         free_ftrace_hash(ops->notrace_hash);
1270 }
1271
1272 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1273 {
1274         struct ftrace_hash *hash;
1275         int size;
1276
1277         hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1278         if (!hash)
1279                 return NULL;
1280
1281         size = 1 << size_bits;
1282         hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1283
1284         if (!hash->buckets) {
1285                 kfree(hash);
1286                 return NULL;
1287         }
1288
1289         hash->size_bits = size_bits;
1290
1291         return hash;
1292 }
1293
1294 static struct ftrace_hash *
1295 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1296 {
1297         struct ftrace_func_entry *entry;
1298         struct ftrace_hash *new_hash;
1299         int size;
1300         int ret;
1301         int i;
1302
1303         new_hash = alloc_ftrace_hash(size_bits);
1304         if (!new_hash)
1305                 return NULL;
1306
1307         /* Empty hash? */
1308         if (ftrace_hash_empty(hash))
1309                 return new_hash;
1310
1311         size = 1 << hash->size_bits;
1312         for (i = 0; i < size; i++) {
1313                 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1314                         ret = add_hash_entry(new_hash, entry->ip);
1315                         if (ret < 0)
1316                                 goto free_hash;
1317                 }
1318         }
1319
1320         FTRACE_WARN_ON(new_hash->count != hash->count);
1321
1322         return new_hash;
1323
1324  free_hash:
1325         free_ftrace_hash(new_hash);
1326         return NULL;
1327 }
1328
1329 static void
1330 ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1331 static void
1332 ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1333
1334 static int
1335 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1336                  struct ftrace_hash **dst, struct ftrace_hash *src)
1337 {
1338         struct ftrace_func_entry *entry;
1339         struct hlist_node *tn;
1340         struct hlist_head *hhd;
1341         struct ftrace_hash *old_hash;
1342         struct ftrace_hash *new_hash;
1343         int size = src->count;
1344         int bits = 0;
1345         int ret;
1346         int i;
1347
1348         /*
1349          * Remove the current set, update the hash and add
1350          * them back.
1351          */
1352         ftrace_hash_rec_disable(ops, enable);
1353
1354         /*
1355          * If the new source is empty, just free dst and assign it
1356          * the empty_hash.
1357          */
1358         if (!src->count) {
1359                 free_ftrace_hash_rcu(*dst);
1360                 rcu_assign_pointer(*dst, EMPTY_HASH);
1361                 /* still need to update the function records */
1362                 ret = 0;
1363                 goto out;
1364         }
1365
1366         /*
1367          * Make the hash size about 1/2 the # found
1368          */
1369         for (size /= 2; size; size >>= 1)
1370                 bits++;
1371
1372         /* Don't allocate too much */
1373         if (bits > FTRACE_HASH_MAX_BITS)
1374                 bits = FTRACE_HASH_MAX_BITS;
1375
1376         ret = -ENOMEM;
1377         new_hash = alloc_ftrace_hash(bits);
1378         if (!new_hash)
1379                 goto out;
1380
1381         size = 1 << src->size_bits;
1382         for (i = 0; i < size; i++) {
1383                 hhd = &src->buckets[i];
1384                 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1385                         remove_hash_entry(src, entry);
1386                         __add_hash_entry(new_hash, entry);
1387                 }
1388         }
1389
1390         old_hash = *dst;
1391         rcu_assign_pointer(*dst, new_hash);
1392         free_ftrace_hash_rcu(old_hash);
1393
1394         ret = 0;
1395  out:
1396         /*
1397          * Enable regardless of ret:
1398          *  On success, we enable the new hash.
1399          *  On failure, we re-enable the original hash.
1400          */
1401         ftrace_hash_rec_enable(ops, enable);
1402
1403         return ret;
1404 }
1405
1406 /*
1407  * Test the hashes for this ops to see if we want to call
1408  * the ops->func or not.
1409  *
1410  * It's a match if the ip is in the ops->filter_hash or
1411  * the filter_hash does not exist or is empty,
1412  *  AND
1413  * the ip is not in the ops->notrace_hash.
1414  *
1415  * This needs to be called with preemption disabled as
1416  * the hashes are freed with call_rcu_sched().
1417  */
1418 static int
1419 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1420 {
1421         struct ftrace_hash *filter_hash;
1422         struct ftrace_hash *notrace_hash;
1423         int ret;
1424
1425         filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
1426         notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
1427
1428         if ((ftrace_hash_empty(filter_hash) ||
1429              ftrace_lookup_ip(filter_hash, ip)) &&
1430             (ftrace_hash_empty(notrace_hash) ||
1431              !ftrace_lookup_ip(notrace_hash, ip)))
1432                 ret = 1;
1433         else
1434                 ret = 0;
1435
1436         return ret;
1437 }
1438
1439 /*
1440  * This is a double for. Do not use 'break' to break out of the loop,
1441  * you must use a goto.
1442  */
1443 #define do_for_each_ftrace_rec(pg, rec)                                 \
1444         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1445                 int _____i;                                             \
1446                 for (_____i = 0; _____i < pg->index; _____i++) {        \
1447                         rec = &pg->records[_____i];
1448
1449 #define while_for_each_ftrace_rec()             \
1450                 }                               \
1451         }
1452
1453
1454 static int ftrace_cmp_recs(const void *a, const void *b)
1455 {
1456         const struct dyn_ftrace *key = a;
1457         const struct dyn_ftrace *rec = b;
1458
1459         if (key->flags < rec->ip)
1460                 return -1;
1461         if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1462                 return 1;
1463         return 0;
1464 }
1465
1466 static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1467 {
1468         struct ftrace_page *pg;
1469         struct dyn_ftrace *rec;
1470         struct dyn_ftrace key;
1471
1472         key.ip = start;
1473         key.flags = end;        /* overload flags, as it is unsigned long */
1474
1475         for (pg = ftrace_pages_start; pg; pg = pg->next) {
1476                 if (end < pg->records[0].ip ||
1477                     start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1478                         continue;
1479                 rec = bsearch(&key, pg->records, pg->index,
1480                               sizeof(struct dyn_ftrace),
1481                               ftrace_cmp_recs);
1482                 if (rec)
1483                         return rec->ip;
1484         }
1485
1486         return 0;
1487 }
1488
1489 /**
1490  * ftrace_location - return true if the ip giving is a traced location
1491  * @ip: the instruction pointer to check
1492  *
1493  * Returns rec->ip if @ip given is a pointer to a ftrace location.
1494  * That is, the instruction that is either a NOP or call to
1495  * the function tracer. It checks the ftrace internal tables to
1496  * determine if the address belongs or not.
1497  */
1498 unsigned long ftrace_location(unsigned long ip)
1499 {
1500         return ftrace_location_range(ip, ip);
1501 }
1502
1503 /**
1504  * ftrace_text_reserved - return true if range contains an ftrace location
1505  * @start: start of range to search
1506  * @end: end of range to search (inclusive). @end points to the last byte to check.
1507  *
1508  * Returns 1 if @start and @end contains a ftrace location.
1509  * That is, the instruction that is either a NOP or call to
1510  * the function tracer. It checks the ftrace internal tables to
1511  * determine if the address belongs or not.
1512  */
1513 int ftrace_text_reserved(void *start, void *end)
1514 {
1515         unsigned long ret;
1516
1517         ret = ftrace_location_range((unsigned long)start,
1518                                     (unsigned long)end);
1519
1520         return (int)!!ret;
1521 }
1522
1523 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1524                                      int filter_hash,
1525                                      bool inc)
1526 {
1527         struct ftrace_hash *hash;
1528         struct ftrace_hash *other_hash;
1529         struct ftrace_page *pg;
1530         struct dyn_ftrace *rec;
1531         int count = 0;
1532         int all = 0;
1533
1534         /* Only update if the ops has been registered */
1535         if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1536                 return;
1537
1538         /*
1539          * In the filter_hash case:
1540          *   If the count is zero, we update all records.
1541          *   Otherwise we just update the items in the hash.
1542          *
1543          * In the notrace_hash case:
1544          *   We enable the update in the hash.
1545          *   As disabling notrace means enabling the tracing,
1546          *   and enabling notrace means disabling, the inc variable
1547          *   gets inversed.
1548          */
1549         if (filter_hash) {
1550                 hash = ops->filter_hash;
1551                 other_hash = ops->notrace_hash;
1552                 if (ftrace_hash_empty(hash))
1553                         all = 1;
1554         } else {
1555                 inc = !inc;
1556                 hash = ops->notrace_hash;
1557                 other_hash = ops->filter_hash;
1558                 /*
1559                  * If the notrace hash has no items,
1560                  * then there's nothing to do.
1561                  */
1562                 if (ftrace_hash_empty(hash))
1563                         return;
1564         }
1565
1566         do_for_each_ftrace_rec(pg, rec) {
1567                 int in_other_hash = 0;
1568                 int in_hash = 0;
1569                 int match = 0;
1570
1571                 if (all) {
1572                         /*
1573                          * Only the filter_hash affects all records.
1574                          * Update if the record is not in the notrace hash.
1575                          */
1576                         if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1577                                 match = 1;
1578                 } else {
1579                         in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1580                         in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1581
1582                         /*
1583                          *
1584                          */
1585                         if (filter_hash && in_hash && !in_other_hash)
1586                                 match = 1;
1587                         else if (!filter_hash && in_hash &&
1588                                  (in_other_hash || ftrace_hash_empty(other_hash)))
1589                                 match = 1;
1590                 }
1591                 if (!match)
1592                         continue;
1593
1594                 if (inc) {
1595                         rec->flags++;
1596                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1597                                 return;
1598                         /*
1599                          * If any ops wants regs saved for this function
1600                          * then all ops will get saved regs.
1601                          */
1602                         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1603                                 rec->flags |= FTRACE_FL_REGS;
1604                 } else {
1605                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1606                                 return;
1607                         rec->flags--;
1608                 }
1609                 count++;
1610                 /* Shortcut, if we handled all records, we are done. */
1611                 if (!all && count == hash->count)
1612                         return;
1613         } while_for_each_ftrace_rec();
1614 }
1615
1616 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1617                                     int filter_hash)
1618 {
1619         __ftrace_hash_rec_update(ops, filter_hash, 0);
1620 }
1621
1622 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1623                                    int filter_hash)
1624 {
1625         __ftrace_hash_rec_update(ops, filter_hash, 1);
1626 }
1627
1628 static void print_ip_ins(const char *fmt, unsigned char *p)
1629 {
1630         int i;
1631
1632         printk(KERN_CONT "%s", fmt);
1633
1634         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1635                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1636 }
1637
1638 /**
1639  * ftrace_bug - report and shutdown function tracer
1640  * @failed: The failed type (EFAULT, EINVAL, EPERM)
1641  * @ip: The address that failed
1642  *
1643  * The arch code that enables or disables the function tracing
1644  * can call ftrace_bug() when it has detected a problem in
1645  * modifying the code. @failed should be one of either:
1646  * EFAULT - if the problem happens on reading the @ip address
1647  * EINVAL - if what is read at @ip is not what was expected
1648  * EPERM - if the problem happens on writting to the @ip address
1649  */
1650 void ftrace_bug(int failed, unsigned long ip)
1651 {
1652         switch (failed) {
1653         case -EFAULT:
1654                 FTRACE_WARN_ON_ONCE(1);
1655                 pr_info("ftrace faulted on modifying ");
1656                 print_ip_sym(ip);
1657                 break;
1658         case -EINVAL:
1659                 FTRACE_WARN_ON_ONCE(1);
1660                 pr_info("ftrace failed to modify ");
1661                 print_ip_sym(ip);
1662                 print_ip_ins(" actual: ", (unsigned char *)ip);
1663                 printk(KERN_CONT "\n");
1664                 break;
1665         case -EPERM:
1666                 FTRACE_WARN_ON_ONCE(1);
1667                 pr_info("ftrace faulted on writing ");
1668                 print_ip_sym(ip);
1669                 break;
1670         default:
1671                 FTRACE_WARN_ON_ONCE(1);
1672                 pr_info("ftrace faulted on unknown error ");
1673                 print_ip_sym(ip);
1674         }
1675 }
1676
1677 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1678 {
1679         unsigned long flag = 0UL;
1680
1681         /*
1682          * If we are updating calls:
1683          *
1684          *   If the record has a ref count, then we need to enable it
1685          *   because someone is using it.
1686          *
1687          *   Otherwise we make sure its disabled.
1688          *
1689          * If we are disabling calls, then disable all records that
1690          * are enabled.
1691          */
1692         if (enable && (rec->flags & ~FTRACE_FL_MASK))
1693                 flag = FTRACE_FL_ENABLED;
1694
1695         /*
1696          * If enabling and the REGS flag does not match the REGS_EN, then
1697          * do not ignore this record. Set flags to fail the compare against
1698          * ENABLED.
1699          */
1700         if (flag &&
1701             (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN)))
1702                 flag |= FTRACE_FL_REGS;
1703
1704         /* If the state of this record hasn't changed, then do nothing */
1705         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1706                 return FTRACE_UPDATE_IGNORE;
1707
1708         if (flag) {
1709                 /* Save off if rec is being enabled (for return value) */
1710                 flag ^= rec->flags & FTRACE_FL_ENABLED;
1711
1712                 if (update) {
1713                         rec->flags |= FTRACE_FL_ENABLED;
1714                         if (flag & FTRACE_FL_REGS) {
1715                                 if (rec->flags & FTRACE_FL_REGS)
1716                                         rec->flags |= FTRACE_FL_REGS_EN;
1717                                 else
1718                                         rec->flags &= ~FTRACE_FL_REGS_EN;
1719                         }
1720                 }
1721
1722                 /*
1723                  * If this record is being updated from a nop, then
1724                  *   return UPDATE_MAKE_CALL.
1725                  * Otherwise, if the EN flag is set, then return
1726                  *   UPDATE_MODIFY_CALL_REGS to tell the caller to convert
1727                  *   from the non-save regs, to a save regs function.
1728                  * Otherwise,
1729                  *   return UPDATE_MODIFY_CALL to tell the caller to convert
1730                  *   from the save regs, to a non-save regs function.
1731                  */
1732                 if (flag & FTRACE_FL_ENABLED)
1733                         return FTRACE_UPDATE_MAKE_CALL;
1734                 else if (rec->flags & FTRACE_FL_REGS_EN)
1735                         return FTRACE_UPDATE_MODIFY_CALL_REGS;
1736                 else
1737                         return FTRACE_UPDATE_MODIFY_CALL;
1738         }
1739
1740         if (update) {
1741                 /* If there's no more users, clear all flags */
1742                 if (!(rec->flags & ~FTRACE_FL_MASK))
1743                         rec->flags = 0;
1744                 else
1745                         /* Just disable the record (keep REGS state) */
1746                         rec->flags &= ~FTRACE_FL_ENABLED;
1747         }
1748
1749         return FTRACE_UPDATE_MAKE_NOP;
1750 }
1751
1752 /**
1753  * ftrace_update_record, set a record that now is tracing or not
1754  * @rec: the record to update
1755  * @enable: set to 1 if the record is tracing, zero to force disable
1756  *
1757  * The records that represent all functions that can be traced need
1758  * to be updated when tracing has been enabled.
1759  */
1760 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1761 {
1762         return ftrace_check_record(rec, enable, 1);
1763 }
1764
1765 /**
1766  * ftrace_test_record, check if the record has been enabled or not
1767  * @rec: the record to test
1768  * @enable: set to 1 to check if enabled, 0 if it is disabled
1769  *
1770  * The arch code may need to test if a record is already set to
1771  * tracing to determine how to modify the function code that it
1772  * represents.
1773  */
1774 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1775 {
1776         return ftrace_check_record(rec, enable, 0);
1777 }
1778
1779 static int
1780 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1781 {
1782         unsigned long ftrace_old_addr;
1783         unsigned long ftrace_addr;
1784         int ret;
1785
1786         ret = ftrace_update_record(rec, enable);
1787
1788         if (rec->flags & FTRACE_FL_REGS)
1789                 ftrace_addr = (unsigned long)FTRACE_REGS_ADDR;
1790         else
1791                 ftrace_addr = (unsigned long)FTRACE_ADDR;
1792
1793         switch (ret) {
1794         case FTRACE_UPDATE_IGNORE:
1795                 return 0;
1796
1797         case FTRACE_UPDATE_MAKE_CALL:
1798                 return ftrace_make_call(rec, ftrace_addr);
1799
1800         case FTRACE_UPDATE_MAKE_NOP:
1801                 return ftrace_make_nop(NULL, rec, ftrace_addr);
1802
1803         case FTRACE_UPDATE_MODIFY_CALL_REGS:
1804         case FTRACE_UPDATE_MODIFY_CALL:
1805                 if (rec->flags & FTRACE_FL_REGS)
1806                         ftrace_old_addr = (unsigned long)FTRACE_ADDR;
1807                 else
1808                         ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR;
1809
1810                 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
1811         }
1812
1813         return -1; /* unknow ftrace bug */
1814 }
1815
1816 void __weak ftrace_replace_code(int enable)
1817 {
1818         struct dyn_ftrace *rec;
1819         struct ftrace_page *pg;
1820         int failed;
1821
1822         if (unlikely(ftrace_disabled))
1823                 return;
1824
1825         do_for_each_ftrace_rec(pg, rec) {
1826                 failed = __ftrace_replace_code(rec, enable);
1827                 if (failed) {
1828                         ftrace_bug(failed, rec->ip);
1829                         /* Stop processing */
1830                         return;
1831                 }
1832         } while_for_each_ftrace_rec();
1833 }
1834
1835 struct ftrace_rec_iter {
1836         struct ftrace_page      *pg;
1837         int                     index;
1838 };
1839
1840 /**
1841  * ftrace_rec_iter_start, start up iterating over traced functions
1842  *
1843  * Returns an iterator handle that is used to iterate over all
1844  * the records that represent address locations where functions
1845  * are traced.
1846  *
1847  * May return NULL if no records are available.
1848  */
1849 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
1850 {
1851         /*
1852          * We only use a single iterator.
1853          * Protected by the ftrace_lock mutex.
1854          */
1855         static struct ftrace_rec_iter ftrace_rec_iter;
1856         struct ftrace_rec_iter *iter = &ftrace_rec_iter;
1857
1858         iter->pg = ftrace_pages_start;
1859         iter->index = 0;
1860
1861         /* Could have empty pages */
1862         while (iter->pg && !iter->pg->index)
1863                 iter->pg = iter->pg->next;
1864
1865         if (!iter->pg)
1866                 return NULL;
1867
1868         return iter;
1869 }
1870
1871 /**
1872  * ftrace_rec_iter_next, get the next record to process.
1873  * @iter: The handle to the iterator.
1874  *
1875  * Returns the next iterator after the given iterator @iter.
1876  */
1877 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
1878 {
1879         iter->index++;
1880
1881         if (iter->index >= iter->pg->index) {
1882                 iter->pg = iter->pg->next;
1883                 iter->index = 0;
1884
1885                 /* Could have empty pages */
1886                 while (iter->pg && !iter->pg->index)
1887                         iter->pg = iter->pg->next;
1888         }
1889
1890         if (!iter->pg)
1891                 return NULL;
1892
1893         return iter;
1894 }
1895
1896 /**
1897  * ftrace_rec_iter_record, get the record at the iterator location
1898  * @iter: The current iterator location
1899  *
1900  * Returns the record that the current @iter is at.
1901  */
1902 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
1903 {
1904         return &iter->pg->records[iter->index];
1905 }
1906
1907 static int
1908 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1909 {
1910         unsigned long ip;
1911         int ret;
1912
1913         ip = rec->ip;
1914
1915         if (unlikely(ftrace_disabled))
1916                 return 0;
1917
1918         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1919         if (ret) {
1920                 ftrace_bug(ret, ip);
1921                 return 0;
1922         }
1923         return 1;
1924 }
1925
1926 /*
1927  * archs can override this function if they must do something
1928  * before the modifying code is performed.
1929  */
1930 int __weak ftrace_arch_code_modify_prepare(void)
1931 {
1932         return 0;
1933 }
1934
1935 /*
1936  * archs can override this function if they must do something
1937  * after the modifying code is performed.
1938  */
1939 int __weak ftrace_arch_code_modify_post_process(void)
1940 {
1941         return 0;
1942 }
1943
1944 void ftrace_modify_all_code(int command)
1945 {
1946         if (command & FTRACE_UPDATE_CALLS)
1947                 ftrace_replace_code(1);
1948         else if (command & FTRACE_DISABLE_CALLS)
1949                 ftrace_replace_code(0);
1950
1951         if (command & FTRACE_UPDATE_TRACE_FUNC)
1952                 ftrace_update_ftrace_func(ftrace_trace_function);
1953
1954         if (command & FTRACE_START_FUNC_RET)
1955                 ftrace_enable_ftrace_graph_caller();
1956         else if (command & FTRACE_STOP_FUNC_RET)
1957                 ftrace_disable_ftrace_graph_caller();
1958 }
1959
1960 static int __ftrace_modify_code(void *data)
1961 {
1962         int *command = data;
1963
1964         ftrace_modify_all_code(*command);
1965
1966         return 0;
1967 }
1968
1969 /**
1970  * ftrace_run_stop_machine, go back to the stop machine method
1971  * @command: The command to tell ftrace what to do
1972  *
1973  * If an arch needs to fall back to the stop machine method, the
1974  * it can call this function.
1975  */
1976 void ftrace_run_stop_machine(int command)
1977 {
1978         stop_machine(__ftrace_modify_code, &command, NULL);
1979 }
1980
1981 /**
1982  * arch_ftrace_update_code, modify the code to trace or not trace
1983  * @command: The command that needs to be done
1984  *
1985  * Archs can override this function if it does not need to
1986  * run stop_machine() to modify code.
1987  */
1988 void __weak arch_ftrace_update_code(int command)
1989 {
1990         ftrace_run_stop_machine(command);
1991 }
1992
1993 static void ftrace_run_update_code(int command)
1994 {
1995         int ret;
1996
1997         ret = ftrace_arch_code_modify_prepare();
1998         FTRACE_WARN_ON(ret);
1999         if (ret)
2000                 return;
2001         /*
2002          * Do not call function tracer while we update the code.
2003          * We are in stop machine.
2004          */
2005         function_trace_stop++;
2006
2007         /*
2008          * By default we use stop_machine() to modify the code.
2009          * But archs can do what ever they want as long as it
2010          * is safe. The stop_machine() is the safest, but also
2011          * produces the most overhead.
2012          */
2013         arch_ftrace_update_code(command);
2014
2015         function_trace_stop--;
2016
2017         ret = ftrace_arch_code_modify_post_process();
2018         FTRACE_WARN_ON(ret);
2019 }
2020
2021 static ftrace_func_t saved_ftrace_func;
2022 static int ftrace_start_up;
2023 static int global_start_up;
2024
2025 static void ftrace_startup_enable(int command)
2026 {
2027         if (saved_ftrace_func != ftrace_trace_function) {
2028                 saved_ftrace_func = ftrace_trace_function;
2029                 command |= FTRACE_UPDATE_TRACE_FUNC;
2030         }
2031
2032         if (!command || !ftrace_enabled)
2033                 return;
2034
2035         ftrace_run_update_code(command);
2036 }
2037
2038 static int ftrace_startup(struct ftrace_ops *ops, int command)
2039 {
2040         bool hash_enable = true;
2041
2042         if (unlikely(ftrace_disabled))
2043                 return -ENODEV;
2044
2045         ftrace_start_up++;
2046         command |= FTRACE_UPDATE_CALLS;
2047
2048         /* ops marked global share the filter hashes */
2049         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2050                 ops = &global_ops;
2051                 /* Don't update hash if global is already set */
2052                 if (global_start_up)
2053                         hash_enable = false;
2054                 global_start_up++;
2055         }
2056
2057         ops->flags |= FTRACE_OPS_FL_ENABLED;
2058         if (hash_enable)
2059                 ftrace_hash_rec_enable(ops, 1);
2060
2061         ftrace_startup_enable(command);
2062
2063         return 0;
2064 }
2065
2066 static void ftrace_shutdown(struct ftrace_ops *ops, int command)
2067 {
2068         bool hash_disable = true;
2069
2070         if (unlikely(ftrace_disabled))
2071                 return;
2072
2073         ftrace_start_up--;
2074         /*
2075          * Just warn in case of unbalance, no need to kill ftrace, it's not
2076          * critical but the ftrace_call callers may be never nopped again after
2077          * further ftrace uses.
2078          */
2079         WARN_ON_ONCE(ftrace_start_up < 0);
2080
2081         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2082                 ops = &global_ops;
2083                 global_start_up--;
2084                 WARN_ON_ONCE(global_start_up < 0);
2085                 /* Don't update hash if global still has users */
2086                 if (global_start_up) {
2087                         WARN_ON_ONCE(!ftrace_start_up);
2088                         hash_disable = false;
2089                 }
2090         }
2091
2092         if (hash_disable)
2093                 ftrace_hash_rec_disable(ops, 1);
2094
2095         if (ops != &global_ops || !global_start_up)
2096                 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2097
2098         command |= FTRACE_UPDATE_CALLS;
2099
2100         if (saved_ftrace_func != ftrace_trace_function) {
2101                 saved_ftrace_func = ftrace_trace_function;
2102                 command |= FTRACE_UPDATE_TRACE_FUNC;
2103         }
2104
2105         if (!command || !ftrace_enabled)
2106                 return;
2107
2108         ftrace_run_update_code(command);
2109 }
2110
2111 static void ftrace_startup_sysctl(void)
2112 {
2113         if (unlikely(ftrace_disabled))
2114                 return;
2115
2116         /* Force update next time */
2117         saved_ftrace_func = NULL;
2118         /* ftrace_start_up is true if we want ftrace running */
2119         if (ftrace_start_up)
2120                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2121 }
2122
2123 static void ftrace_shutdown_sysctl(void)
2124 {
2125         if (unlikely(ftrace_disabled))
2126                 return;
2127
2128         /* ftrace_start_up is true if ftrace is running */
2129         if (ftrace_start_up)
2130                 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
2131 }
2132
2133 static cycle_t          ftrace_update_time;
2134 static unsigned long    ftrace_update_cnt;
2135 unsigned long           ftrace_update_tot_cnt;
2136
2137 static int ops_traces_mod(struct ftrace_ops *ops)
2138 {
2139         struct ftrace_hash *hash;
2140
2141         hash = ops->filter_hash;
2142         return ftrace_hash_empty(hash);
2143 }
2144
2145 static int ftrace_update_code(struct module *mod)
2146 {
2147         struct ftrace_page *pg;
2148         struct dyn_ftrace *p;
2149         cycle_t start, stop;
2150         unsigned long ref = 0;
2151         int i;
2152
2153         /*
2154          * When adding a module, we need to check if tracers are
2155          * currently enabled and if they are set to trace all functions.
2156          * If they are, we need to enable the module functions as well
2157          * as update the reference counts for those function records.
2158          */
2159         if (mod) {
2160                 struct ftrace_ops *ops;
2161
2162                 for (ops = ftrace_ops_list;
2163                      ops != &ftrace_list_end; ops = ops->next) {
2164                         if (ops->flags & FTRACE_OPS_FL_ENABLED &&
2165                             ops_traces_mod(ops))
2166                                 ref++;
2167                 }
2168         }
2169
2170         start = ftrace_now(raw_smp_processor_id());
2171         ftrace_update_cnt = 0;
2172
2173         for (pg = ftrace_new_pgs; pg; pg = pg->next) {
2174
2175                 for (i = 0; i < pg->index; i++) {
2176                         /* If something went wrong, bail without enabling anything */
2177                         if (unlikely(ftrace_disabled))
2178                                 return -1;
2179
2180                         p = &pg->records[i];
2181                         p->flags = ref;
2182
2183                         /*
2184                          * Do the initial record conversion from mcount jump
2185                          * to the NOP instructions.
2186                          */
2187                         if (!ftrace_code_disable(mod, p))
2188                                 break;
2189
2190                         ftrace_update_cnt++;
2191
2192                         /*
2193                          * If the tracing is enabled, go ahead and enable the record.
2194                          *
2195                          * The reason not to enable the record immediatelly is the
2196                          * inherent check of ftrace_make_nop/ftrace_make_call for
2197                          * correct previous instructions.  Making first the NOP
2198                          * conversion puts the module to the correct state, thus
2199                          * passing the ftrace_make_call check.
2200                          */
2201                         if (ftrace_start_up && ref) {
2202                                 int failed = __ftrace_replace_code(p, 1);
2203                                 if (failed)
2204                                         ftrace_bug(failed, p->ip);
2205                         }
2206                 }
2207         }
2208
2209         ftrace_new_pgs = NULL;
2210
2211         stop = ftrace_now(raw_smp_processor_id());
2212         ftrace_update_time = stop - start;
2213         ftrace_update_tot_cnt += ftrace_update_cnt;
2214
2215         return 0;
2216 }
2217
2218 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2219 {
2220         int order;
2221         int cnt;
2222
2223         if (WARN_ON(!count))
2224                 return -EINVAL;
2225
2226         order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2227
2228         /*
2229          * We want to fill as much as possible. No more than a page
2230          * may be empty.
2231          */
2232         while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2233                 order--;
2234
2235  again:
2236         pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2237
2238         if (!pg->records) {
2239                 /* if we can't allocate this size, try something smaller */
2240                 if (!order)
2241                         return -ENOMEM;
2242                 order >>= 1;
2243                 goto again;
2244         }
2245
2246         cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2247         pg->size = cnt;
2248
2249         if (cnt > count)
2250                 cnt = count;
2251
2252         return cnt;
2253 }
2254
2255 static struct ftrace_page *
2256 ftrace_allocate_pages(unsigned long num_to_init)
2257 {
2258         struct ftrace_page *start_pg;
2259         struct ftrace_page *pg;
2260         int order;
2261         int cnt;
2262
2263         if (!num_to_init)
2264                 return 0;
2265
2266         start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2267         if (!pg)
2268                 return NULL;
2269
2270         /*
2271          * Try to allocate as much as possible in one continues
2272          * location that fills in all of the space. We want to
2273          * waste as little space as possible.
2274          */
2275         for (;;) {
2276                 cnt = ftrace_allocate_records(pg, num_to_init);
2277                 if (cnt < 0)
2278                         goto free_pages;
2279
2280                 num_to_init -= cnt;
2281                 if (!num_to_init)
2282                         break;
2283
2284                 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2285                 if (!pg->next)
2286                         goto free_pages;
2287
2288                 pg = pg->next;
2289         }
2290
2291         return start_pg;
2292
2293  free_pages:
2294         while (start_pg) {
2295                 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2296                 free_pages((unsigned long)pg->records, order);
2297                 start_pg = pg->next;
2298                 kfree(pg);
2299                 pg = start_pg;
2300         }
2301         pr_info("ftrace: FAILED to allocate memory for functions\n");
2302         return NULL;
2303 }
2304
2305 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
2306 {
2307         int cnt;
2308
2309         if (!num_to_init) {
2310                 pr_info("ftrace: No functions to be traced?\n");
2311                 return -1;
2312         }
2313
2314         cnt = num_to_init / ENTRIES_PER_PAGE;
2315         pr_info("ftrace: allocating %ld entries in %d pages\n",
2316                 num_to_init, cnt + 1);
2317
2318         return 0;
2319 }
2320
2321 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2322
2323 struct ftrace_iterator {
2324         loff_t                          pos;
2325         loff_t                          func_pos;
2326         struct ftrace_page              *pg;
2327         struct dyn_ftrace               *func;
2328         struct ftrace_func_probe        *probe;
2329         struct trace_parser             parser;
2330         struct ftrace_hash              *hash;
2331         struct ftrace_ops               *ops;
2332         int                             hidx;
2333         int                             idx;
2334         unsigned                        flags;
2335 };
2336
2337 static void *
2338 t_hash_next(struct seq_file *m, loff_t *pos)
2339 {
2340         struct ftrace_iterator *iter = m->private;
2341         struct hlist_node *hnd = NULL;
2342         struct hlist_head *hhd;
2343
2344         (*pos)++;
2345         iter->pos = *pos;
2346
2347         if (iter->probe)
2348                 hnd = &iter->probe->node;
2349  retry:
2350         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2351                 return NULL;
2352
2353         hhd = &ftrace_func_hash[iter->hidx];
2354
2355         if (hlist_empty(hhd)) {
2356                 iter->hidx++;
2357                 hnd = NULL;
2358                 goto retry;
2359         }
2360
2361         if (!hnd)
2362                 hnd = hhd->first;
2363         else {
2364                 hnd = hnd->next;
2365                 if (!hnd) {
2366                         iter->hidx++;
2367                         goto retry;
2368                 }
2369         }
2370
2371         if (WARN_ON_ONCE(!hnd))
2372                 return NULL;
2373
2374         iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2375
2376         return iter;
2377 }
2378
2379 static void *t_hash_start(struct seq_file *m, loff_t *pos)
2380 {
2381         struct ftrace_iterator *iter = m->private;
2382         void *p = NULL;
2383         loff_t l;
2384
2385         if (!(iter->flags & FTRACE_ITER_DO_HASH))
2386                 return NULL;
2387
2388         if (iter->func_pos > *pos)
2389                 return NULL;
2390
2391         iter->hidx = 0;
2392         for (l = 0; l <= (*pos - iter->func_pos); ) {
2393                 p = t_hash_next(m, &l);
2394                 if (!p)
2395                         break;
2396         }
2397         if (!p)
2398                 return NULL;
2399
2400         /* Only set this if we have an item */
2401         iter->flags |= FTRACE_ITER_HASH;
2402
2403         return iter;
2404 }
2405
2406 static int
2407 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2408 {
2409         struct ftrace_func_probe *rec;
2410
2411         rec = iter->probe;
2412         if (WARN_ON_ONCE(!rec))
2413                 return -EIO;
2414
2415         if (rec->ops->print)
2416                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2417
2418         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2419
2420         if (rec->data)
2421                 seq_printf(m, ":%p", rec->data);
2422         seq_putc(m, '\n');
2423
2424         return 0;
2425 }
2426
2427 static void *
2428 t_next(struct seq_file *m, void *v, loff_t *pos)
2429 {
2430         struct ftrace_iterator *iter = m->private;
2431         struct ftrace_ops *ops = iter->ops;
2432         struct dyn_ftrace *rec = NULL;
2433
2434         if (unlikely(ftrace_disabled))
2435                 return NULL;
2436
2437         if (iter->flags & FTRACE_ITER_HASH)
2438                 return t_hash_next(m, pos);
2439
2440         (*pos)++;
2441         iter->pos = iter->func_pos = *pos;
2442
2443         if (iter->flags & FTRACE_ITER_PRINTALL)
2444                 return t_hash_start(m, pos);
2445
2446  retry:
2447         if (iter->idx >= iter->pg->index) {
2448                 if (iter->pg->next) {
2449                         iter->pg = iter->pg->next;
2450                         iter->idx = 0;
2451                         goto retry;
2452                 }
2453         } else {
2454                 rec = &iter->pg->records[iter->idx++];
2455                 if (((iter->flags & FTRACE_ITER_FILTER) &&
2456                      !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2457
2458                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
2459                      !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2460
2461                     ((iter->flags & FTRACE_ITER_ENABLED) &&
2462                      !(rec->flags & FTRACE_FL_ENABLED))) {
2463
2464                         rec = NULL;
2465                         goto retry;
2466                 }
2467         }
2468
2469         if (!rec)
2470                 return t_hash_start(m, pos);
2471
2472         iter->func = rec;
2473
2474         return iter;
2475 }
2476
2477 static void reset_iter_read(struct ftrace_iterator *iter)
2478 {
2479         iter->pos = 0;
2480         iter->func_pos = 0;
2481         iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2482 }
2483
2484 static void *t_start(struct seq_file *m, loff_t *pos)
2485 {
2486         struct ftrace_iterator *iter = m->private;
2487         struct ftrace_ops *ops = iter->ops;
2488         void *p = NULL;
2489         loff_t l;
2490
2491         mutex_lock(&ftrace_lock);
2492
2493         if (unlikely(ftrace_disabled))
2494                 return NULL;
2495
2496         /*
2497          * If an lseek was done, then reset and start from beginning.
2498          */
2499         if (*pos < iter->pos)
2500                 reset_iter_read(iter);
2501
2502         /*
2503          * For set_ftrace_filter reading, if we have the filter
2504          * off, we can short cut and just print out that all
2505          * functions are enabled.
2506          */
2507         if (iter->flags & FTRACE_ITER_FILTER &&
2508             ftrace_hash_empty(ops->filter_hash)) {
2509                 if (*pos > 0)
2510                         return t_hash_start(m, pos);
2511                 iter->flags |= FTRACE_ITER_PRINTALL;
2512                 /* reset in case of seek/pread */
2513                 iter->flags &= ~FTRACE_ITER_HASH;
2514                 return iter;
2515         }
2516
2517         if (iter->flags & FTRACE_ITER_HASH)
2518                 return t_hash_start(m, pos);
2519
2520         /*
2521          * Unfortunately, we need to restart at ftrace_pages_start
2522          * every time we let go of the ftrace_mutex. This is because
2523          * those pointers can change without the lock.
2524          */
2525         iter->pg = ftrace_pages_start;
2526         iter->idx = 0;
2527         for (l = 0; l <= *pos; ) {
2528                 p = t_next(m, p, &l);
2529                 if (!p)
2530                         break;
2531         }
2532
2533         if (!p)
2534                 return t_hash_start(m, pos);
2535
2536         return iter;
2537 }
2538
2539 static void t_stop(struct seq_file *m, void *p)
2540 {
2541         mutex_unlock(&ftrace_lock);
2542 }
2543
2544 static int t_show(struct seq_file *m, void *v)
2545 {
2546         struct ftrace_iterator *iter = m->private;
2547         struct dyn_ftrace *rec;
2548
2549         if (iter->flags & FTRACE_ITER_HASH)
2550                 return t_hash_show(m, iter);
2551
2552         if (iter->flags & FTRACE_ITER_PRINTALL) {
2553                 seq_printf(m, "#### all functions enabled ####\n");
2554                 return 0;
2555         }
2556
2557         rec = iter->func;
2558
2559         if (!rec)
2560                 return 0;
2561
2562         seq_printf(m, "%ps", (void *)rec->ip);
2563         if (iter->flags & FTRACE_ITER_ENABLED)
2564                 seq_printf(m, " (%ld)%s",
2565                            rec->flags & ~FTRACE_FL_MASK,
2566                            rec->flags & FTRACE_FL_REGS ? " R" : "");
2567         seq_printf(m, "\n");
2568
2569         return 0;
2570 }
2571
2572 static const struct seq_operations show_ftrace_seq_ops = {
2573         .start = t_start,
2574         .next = t_next,
2575         .stop = t_stop,
2576         .show = t_show,
2577 };
2578
2579 static int
2580 ftrace_avail_open(struct inode *inode, struct file *file)
2581 {
2582         struct ftrace_iterator *iter;
2583
2584         if (unlikely(ftrace_disabled))
2585                 return -ENODEV;
2586
2587         iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2588         if (iter) {
2589                 iter->pg = ftrace_pages_start;
2590                 iter->ops = &global_ops;
2591         }
2592
2593         return iter ? 0 : -ENOMEM;
2594 }
2595
2596 static int
2597 ftrace_enabled_open(struct inode *inode, struct file *file)
2598 {
2599         struct ftrace_iterator *iter;
2600
2601         if (unlikely(ftrace_disabled))
2602                 return -ENODEV;
2603
2604         iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2605         if (iter) {
2606                 iter->pg = ftrace_pages_start;
2607                 iter->flags = FTRACE_ITER_ENABLED;
2608                 iter->ops = &global_ops;
2609         }
2610
2611         return iter ? 0 : -ENOMEM;
2612 }
2613
2614 static void ftrace_filter_reset(struct ftrace_hash *hash)
2615 {
2616         mutex_lock(&ftrace_lock);
2617         ftrace_hash_clear(hash);
2618         mutex_unlock(&ftrace_lock);
2619 }
2620
2621 /**
2622  * ftrace_regex_open - initialize function tracer filter files
2623  * @ops: The ftrace_ops that hold the hash filters
2624  * @flag: The type of filter to process
2625  * @inode: The inode, usually passed in to your open routine
2626  * @file: The file, usually passed in to your open routine
2627  *
2628  * ftrace_regex_open() initializes the filter files for the
2629  * @ops. Depending on @flag it may process the filter hash or
2630  * the notrace hash of @ops. With this called from the open
2631  * routine, you can use ftrace_filter_write() for the write
2632  * routine if @flag has FTRACE_ITER_FILTER set, or
2633  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
2634  * ftrace_filter_lseek() should be used as the lseek routine, and
2635  * release must call ftrace_regex_release().
2636  */
2637 int
2638 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2639                   struct inode *inode, struct file *file)
2640 {
2641         struct ftrace_iterator *iter;
2642         struct ftrace_hash *hash;
2643         int ret = 0;
2644
2645         ftrace_ops_init(ops);
2646
2647         if (unlikely(ftrace_disabled))
2648                 return -ENODEV;
2649
2650         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2651         if (!iter)
2652                 return -ENOMEM;
2653
2654         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2655                 kfree(iter);
2656                 return -ENOMEM;
2657         }
2658
2659         iter->ops = ops;
2660         iter->flags = flag;
2661
2662         mutex_lock(&ops->regex_lock);
2663
2664         if (flag & FTRACE_ITER_NOTRACE)
2665                 hash = ops->notrace_hash;
2666         else
2667                 hash = ops->filter_hash;
2668
2669         if (file->f_mode & FMODE_WRITE) {
2670                 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2671                 if (!iter->hash) {
2672                         trace_parser_put(&iter->parser);
2673                         kfree(iter);
2674                         ret = -ENOMEM;
2675                         goto out_unlock;
2676                 }
2677         }
2678
2679         if ((file->f_mode & FMODE_WRITE) &&
2680             (file->f_flags & O_TRUNC))
2681                 ftrace_filter_reset(iter->hash);
2682
2683         if (file->f_mode & FMODE_READ) {
2684                 iter->pg = ftrace_pages_start;
2685
2686                 ret = seq_open(file, &show_ftrace_seq_ops);
2687                 if (!ret) {
2688                         struct seq_file *m = file->private_data;
2689                         m->private = iter;
2690                 } else {
2691                         /* Failed */
2692                         free_ftrace_hash(iter->hash);
2693                         trace_parser_put(&iter->parser);
2694                         kfree(iter);
2695                 }
2696         } else
2697                 file->private_data = iter;
2698
2699  out_unlock:
2700         mutex_unlock(&ops->regex_lock);
2701
2702         return ret;
2703 }
2704
2705 static int
2706 ftrace_filter_open(struct inode *inode, struct file *file)
2707 {
2708         return ftrace_regex_open(&global_ops,
2709                         FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2710                         inode, file);
2711 }
2712
2713 static int
2714 ftrace_notrace_open(struct inode *inode, struct file *file)
2715 {
2716         return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2717                                  inode, file);
2718 }
2719
2720 static int ftrace_match(char *str, char *regex, int len, int type)
2721 {
2722         int matched = 0;
2723         int slen;
2724
2725         switch (type) {
2726         case MATCH_FULL:
2727                 if (strcmp(str, regex) == 0)
2728                         matched = 1;
2729                 break;
2730         case MATCH_FRONT_ONLY:
2731                 if (strncmp(str, regex, len) == 0)
2732                         matched = 1;
2733                 break;
2734         case MATCH_MIDDLE_ONLY:
2735                 if (strstr(str, regex))
2736                         matched = 1;
2737                 break;
2738         case MATCH_END_ONLY:
2739                 slen = strlen(str);
2740                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2741                         matched = 1;
2742                 break;
2743         }
2744
2745         return matched;
2746 }
2747
2748 static int
2749 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2750 {
2751         struct ftrace_func_entry *entry;
2752         int ret = 0;
2753
2754         entry = ftrace_lookup_ip(hash, rec->ip);
2755         if (not) {
2756                 /* Do nothing if it doesn't exist */
2757                 if (!entry)
2758                         return 0;
2759
2760                 free_hash_entry(hash, entry);
2761         } else {
2762                 /* Do nothing if it exists */
2763                 if (entry)
2764                         return 0;
2765
2766                 ret = add_hash_entry(hash, rec->ip);
2767         }
2768         return ret;
2769 }
2770
2771 static int
2772 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2773                     char *regex, int len, int type)
2774 {
2775         char str[KSYM_SYMBOL_LEN];
2776         char *modname;
2777
2778         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2779
2780         if (mod) {
2781                 /* module lookup requires matching the module */
2782                 if (!modname || strcmp(modname, mod))
2783                         return 0;
2784
2785                 /* blank search means to match all funcs in the mod */
2786                 if (!len)
2787                         return 1;
2788         }
2789
2790         return ftrace_match(str, regex, len, type);
2791 }
2792
2793 static int
2794 match_records(struct ftrace_hash *hash, char *buff,
2795               int len, char *mod, int not)
2796 {
2797         unsigned search_len = 0;
2798         struct ftrace_page *pg;
2799         struct dyn_ftrace *rec;
2800         int type = MATCH_FULL;
2801         char *search = buff;
2802         int found = 0;
2803         int ret;
2804
2805         if (len) {
2806                 type = filter_parse_regex(buff, len, &search, &not);
2807                 search_len = strlen(search);
2808         }
2809
2810         mutex_lock(&ftrace_lock);
2811
2812         if (unlikely(ftrace_disabled))
2813                 goto out_unlock;
2814
2815         do_for_each_ftrace_rec(pg, rec) {
2816                 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2817                         ret = enter_record(hash, rec, not);
2818                         if (ret < 0) {
2819                                 found = ret;
2820                                 goto out_unlock;
2821                         }
2822                         found = 1;
2823                 }
2824         } while_for_each_ftrace_rec();
2825  out_unlock:
2826         mutex_unlock(&ftrace_lock);
2827
2828         return found;
2829 }
2830
2831 static int
2832 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2833 {
2834         return match_records(hash, buff, len, NULL, 0);
2835 }
2836
2837 static int
2838 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2839 {
2840         int not = 0;
2841
2842         /* blank or '*' mean the same */
2843         if (strcmp(buff, "*") == 0)
2844                 buff[0] = 0;
2845
2846         /* handle the case of 'dont filter this module' */
2847         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2848                 buff[0] = 0;
2849                 not = 1;
2850         }
2851
2852         return match_records(hash, buff, strlen(buff), mod, not);
2853 }
2854
2855 /*
2856  * We register the module command as a template to show others how
2857  * to register the a command as well.
2858  */
2859
2860 static int
2861 ftrace_mod_callback(struct ftrace_hash *hash,
2862                     char *func, char *cmd, char *param, int enable)
2863 {
2864         char *mod;
2865         int ret = -EINVAL;
2866
2867         /*
2868          * cmd == 'mod' because we only registered this func
2869          * for the 'mod' ftrace_func_command.
2870          * But if you register one func with multiple commands,
2871          * you can tell which command was used by the cmd
2872          * parameter.
2873          */
2874
2875         /* we must have a module name */
2876         if (!param)
2877                 return ret;
2878
2879         mod = strsep(&param, ":");
2880         if (!strlen(mod))
2881                 return ret;
2882
2883         ret = ftrace_match_module_records(hash, func, mod);
2884         if (!ret)
2885                 ret = -EINVAL;
2886         if (ret < 0)
2887                 return ret;
2888
2889         return 0;
2890 }
2891
2892 static struct ftrace_func_command ftrace_mod_cmd = {
2893         .name                   = "mod",
2894         .func                   = ftrace_mod_callback,
2895 };
2896
2897 static int __init ftrace_mod_cmd_init(void)
2898 {
2899         return register_ftrace_command(&ftrace_mod_cmd);
2900 }
2901 core_initcall(ftrace_mod_cmd_init);
2902
2903 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2904                                       struct ftrace_ops *op, struct pt_regs *pt_regs)
2905 {
2906         struct ftrace_func_probe *entry;
2907         struct hlist_head *hhd;
2908         unsigned long key;
2909
2910         key = hash_long(ip, FTRACE_HASH_BITS);
2911
2912         hhd = &ftrace_func_hash[key];
2913
2914         if (hlist_empty(hhd))
2915                 return;
2916
2917         /*
2918          * Disable preemption for these calls to prevent a RCU grace
2919          * period. This syncs the hash iteration and freeing of items
2920          * on the hash. rcu_read_lock is too dangerous here.
2921          */
2922         preempt_disable_notrace();
2923         hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
2924                 if (entry->ip == ip)
2925                         entry->ops->func(ip, parent_ip, &entry->data);
2926         }
2927         preempt_enable_notrace();
2928 }
2929
2930 static struct ftrace_ops trace_probe_ops __read_mostly =
2931 {
2932         .func           = function_trace_probe_call,
2933         .flags          = FTRACE_OPS_FL_INITIALIZED,
2934         INIT_REGEX_LOCK(trace_probe_ops)
2935 };
2936
2937 static int ftrace_probe_registered;
2938
2939 static void __enable_ftrace_function_probe(void)
2940 {
2941         int ret;
2942         int i;
2943
2944         if (ftrace_probe_registered) {
2945                 /* still need to update the function call sites */
2946                 if (ftrace_enabled)
2947                         ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2948                 return;
2949         }
2950
2951         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2952                 struct hlist_head *hhd = &ftrace_func_hash[i];
2953                 if (hhd->first)
2954                         break;
2955         }
2956         /* Nothing registered? */
2957         if (i == FTRACE_FUNC_HASHSIZE)
2958                 return;
2959
2960         ret = __register_ftrace_function(&trace_probe_ops);
2961         if (!ret)
2962                 ret = ftrace_startup(&trace_probe_ops, 0);
2963
2964         ftrace_probe_registered = 1;
2965 }
2966
2967 static void __disable_ftrace_function_probe(void)
2968 {
2969         int ret;
2970         int i;
2971
2972         if (!ftrace_probe_registered)
2973                 return;
2974
2975         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2976                 struct hlist_head *hhd = &ftrace_func_hash[i];
2977                 if (hhd->first)
2978                         return;
2979         }
2980
2981         /* no more funcs left */
2982         ret = __unregister_ftrace_function(&trace_probe_ops);
2983         if (!ret)
2984                 ftrace_shutdown(&trace_probe_ops, 0);
2985
2986         ftrace_probe_registered = 0;
2987 }
2988
2989
2990 static void ftrace_free_entry(struct ftrace_func_probe *entry)
2991 {
2992         if (entry->ops->free)
2993                 entry->ops->free(entry->ops, entry->ip, &entry->data);
2994         kfree(entry);
2995 }
2996
2997 int
2998 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2999                               void *data)
3000 {
3001         struct ftrace_func_probe *entry;
3002         struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3003         struct ftrace_hash *hash;
3004         struct ftrace_page *pg;
3005         struct dyn_ftrace *rec;
3006         int type, len, not;
3007         unsigned long key;
3008         int count = 0;
3009         char *search;
3010         int ret;
3011
3012         type = filter_parse_regex(glob, strlen(glob), &search, &not);
3013         len = strlen(search);
3014
3015         /* we do not support '!' for function probes */
3016         if (WARN_ON(not))
3017                 return -EINVAL;
3018
3019         mutex_lock(&trace_probe_ops.regex_lock);
3020
3021         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3022         if (!hash) {
3023                 count = -ENOMEM;
3024                 goto out;
3025         }
3026
3027         if (unlikely(ftrace_disabled)) {
3028                 count = -ENODEV;
3029                 goto out;
3030         }
3031
3032         mutex_lock(&ftrace_lock);
3033
3034         do_for_each_ftrace_rec(pg, rec) {
3035
3036                 if (!ftrace_match_record(rec, NULL, search, len, type))
3037                         continue;
3038
3039                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3040                 if (!entry) {
3041                         /* If we did not process any, then return error */
3042                         if (!count)
3043                                 count = -ENOMEM;
3044                         goto out_unlock;
3045                 }
3046
3047                 count++;
3048
3049                 entry->data = data;
3050
3051                 /*
3052                  * The caller might want to do something special
3053                  * for each function we find. We call the callback
3054                  * to give the caller an opportunity to do so.
3055                  */
3056                 if (ops->init) {
3057                         if (ops->init(ops, rec->ip, &entry->data) < 0) {
3058                                 /* caller does not like this func */
3059                                 kfree(entry);
3060                                 continue;
3061                         }
3062                 }
3063
3064                 ret = enter_record(hash, rec, 0);
3065                 if (ret < 0) {
3066                         kfree(entry);
3067                         count = ret;
3068                         goto out_unlock;
3069                 }
3070
3071                 entry->ops = ops;
3072                 entry->ip = rec->ip;
3073
3074                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
3075                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3076
3077         } while_for_each_ftrace_rec();
3078
3079         ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3080         if (ret < 0)
3081                 count = ret;
3082
3083         __enable_ftrace_function_probe();
3084
3085  out_unlock:
3086         mutex_unlock(&ftrace_lock);
3087  out:
3088         mutex_unlock(&trace_probe_ops.regex_lock);
3089         free_ftrace_hash(hash);
3090
3091         return count;
3092 }
3093
3094 enum {
3095         PROBE_TEST_FUNC         = 1,
3096         PROBE_TEST_DATA         = 2
3097 };
3098
3099 static void
3100 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3101                                   void *data, int flags)
3102 {
3103         struct ftrace_func_entry *rec_entry;
3104         struct ftrace_func_probe *entry;
3105         struct ftrace_func_probe *p;
3106         struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3107         struct list_head free_list;
3108         struct ftrace_hash *hash;
3109         struct hlist_node *tmp;
3110         char str[KSYM_SYMBOL_LEN];
3111         int type = MATCH_FULL;
3112         int i, len = 0;
3113         char *search;
3114
3115         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3116                 glob = NULL;
3117         else if (glob) {
3118                 int not;
3119
3120                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
3121                 len = strlen(search);
3122
3123                 /* we do not support '!' for function probes */
3124                 if (WARN_ON(not))
3125                         return;
3126         }
3127
3128         mutex_lock(&trace_probe_ops.regex_lock);
3129
3130         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3131         if (!hash)
3132                 /* Hmm, should report this somehow */
3133                 goto out_unlock;
3134
3135         INIT_LIST_HEAD(&free_list);
3136
3137         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3138                 struct hlist_head *hhd = &ftrace_func_hash[i];
3139
3140                 hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3141
3142                         /* break up if statements for readability */
3143                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3144                                 continue;
3145
3146                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
3147                                 continue;
3148
3149                         /* do this last, since it is the most expensive */
3150                         if (glob) {
3151                                 kallsyms_lookup(entry->ip, NULL, NULL,
3152                                                 NULL, str);
3153                                 if (!ftrace_match(str, glob, len, type))
3154                                         continue;
3155                         }
3156
3157                         rec_entry = ftrace_lookup_ip(hash, entry->ip);
3158                         /* It is possible more than one entry had this ip */
3159                         if (rec_entry)
3160                                 free_hash_entry(hash, rec_entry);
3161
3162                         hlist_del_rcu(&entry->node);
3163                         list_add(&entry->free_list, &free_list);
3164                 }
3165         }
3166         mutex_lock(&ftrace_lock);
3167         __disable_ftrace_function_probe();
3168         /*
3169          * Remove after the disable is called. Otherwise, if the last
3170          * probe is removed, a null hash means *all enabled*.
3171          */
3172         ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3173         synchronize_sched();
3174         list_for_each_entry_safe(entry, p, &free_list, free_list) {
3175                 list_del(&entry->free_list);
3176                 ftrace_free_entry(entry);
3177         }
3178         mutex_unlock(&ftrace_lock);
3179                 
3180  out_unlock:
3181         mutex_unlock(&trace_probe_ops.regex_lock);
3182         free_ftrace_hash(hash);
3183 }
3184
3185 void
3186 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3187                                 void *data)
3188 {
3189         __unregister_ftrace_function_probe(glob, ops, data,
3190                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
3191 }
3192
3193 void
3194 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3195 {
3196         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3197 }
3198
3199 void unregister_ftrace_function_probe_all(char *glob)
3200 {
3201         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3202 }
3203
3204 static LIST_HEAD(ftrace_commands);
3205 static DEFINE_MUTEX(ftrace_cmd_mutex);
3206
3207 int register_ftrace_command(struct ftrace_func_command *cmd)
3208 {
3209         struct ftrace_func_command *p;
3210         int ret = 0;
3211
3212         mutex_lock(&ftrace_cmd_mutex);
3213         list_for_each_entry(p, &ftrace_commands, list) {
3214                 if (strcmp(cmd->name, p->name) == 0) {
3215                         ret = -EBUSY;
3216                         goto out_unlock;
3217                 }
3218         }
3219         list_add(&cmd->list, &ftrace_commands);
3220  out_unlock:
3221         mutex_unlock(&ftrace_cmd_mutex);
3222
3223         return ret;
3224 }
3225
3226 int unregister_ftrace_command(struct ftrace_func_command *cmd)
3227 {
3228         struct ftrace_func_command *p, *n;
3229         int ret = -ENODEV;
3230
3231         mutex_lock(&ftrace_cmd_mutex);
3232         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3233                 if (strcmp(cmd->name, p->name) == 0) {
3234                         ret = 0;
3235                         list_del_init(&p->list);
3236                         goto out_unlock;
3237                 }
3238         }
3239  out_unlock:
3240         mutex_unlock(&ftrace_cmd_mutex);
3241
3242         return ret;
3243 }
3244
3245 static int ftrace_process_regex(struct ftrace_hash *hash,
3246                                 char *buff, int len, int enable)
3247 {
3248         char *func, *command, *next = buff;
3249         struct ftrace_func_command *p;
3250         int ret = -EINVAL;
3251
3252         func = strsep(&next, ":");
3253
3254         if (!next) {
3255                 ret = ftrace_match_records(hash, func, len);
3256                 if (!ret)
3257                         ret = -EINVAL;
3258                 if (ret < 0)
3259                         return ret;
3260                 return 0;
3261         }
3262
3263         /* command found */
3264
3265         command = strsep(&next, ":");
3266
3267         mutex_lock(&ftrace_cmd_mutex);
3268         list_for_each_entry(p, &ftrace_commands, list) {
3269                 if (strcmp(p->name, command) == 0) {
3270                         ret = p->func(hash, func, command, next, enable);
3271                         goto out_unlock;
3272                 }
3273         }
3274  out_unlock:
3275         mutex_unlock(&ftrace_cmd_mutex);
3276
3277         return ret;
3278 }
3279
3280 static ssize_t
3281 ftrace_regex_write(struct file *file, const char __user *ubuf,
3282                    size_t cnt, loff_t *ppos, int enable)
3283 {
3284         struct ftrace_iterator *iter;
3285         struct trace_parser *parser;
3286         ssize_t ret, read;
3287
3288         if (!cnt)
3289                 return 0;
3290
3291         if (file->f_mode & FMODE_READ) {
3292                 struct seq_file *m = file->private_data;
3293                 iter = m->private;
3294         } else
3295                 iter = file->private_data;
3296
3297         if (unlikely(ftrace_disabled))
3298                 return -ENODEV;
3299
3300         /* iter->hash is a local copy, so we don't need regex_lock */
3301
3302         parser = &iter->parser;
3303         read = trace_get_user(parser, ubuf, cnt, ppos);
3304
3305         if (read >= 0 && trace_parser_loaded(parser) &&
3306             !trace_parser_cont(parser)) {
3307                 ret = ftrace_process_regex(iter->hash, parser->buffer,
3308                                            parser->idx, enable);
3309                 trace_parser_clear(parser);
3310                 if (ret < 0)
3311                         goto out;
3312         }
3313
3314         ret = read;
3315  out:
3316         return ret;
3317 }
3318
3319 ssize_t
3320 ftrace_filter_write(struct file *file, const char __user *ubuf,
3321                     size_t cnt, loff_t *ppos)
3322 {
3323         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3324 }
3325
3326 ssize_t
3327 ftrace_notrace_write(struct file *file, const char __user *ubuf,
3328                      size_t cnt, loff_t *ppos)
3329 {
3330         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3331 }
3332
3333 static int
3334 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3335 {
3336         struct ftrace_func_entry *entry;
3337
3338         if (!ftrace_location(ip))
3339                 return -EINVAL;
3340
3341         if (remove) {
3342                 entry = ftrace_lookup_ip(hash, ip);
3343                 if (!entry)
3344                         return -ENOENT;
3345                 free_hash_entry(hash, entry);
3346                 return 0;
3347         }
3348
3349         return add_hash_entry(hash, ip);
3350 }
3351
3352 static int
3353 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3354                 unsigned long ip, int remove, int reset, int enable)
3355 {
3356         struct ftrace_hash **orig_hash;
3357         struct ftrace_hash *hash;
3358         int ret;
3359
3360         /* All global ops uses the global ops filters */
3361         if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3362                 ops = &global_ops;
3363
3364         if (unlikely(ftrace_disabled))
3365                 return -ENODEV;
3366
3367         mutex_lock(&ops->regex_lock);
3368
3369         if (enable)
3370                 orig_hash = &ops->filter_hash;
3371         else
3372                 orig_hash = &ops->notrace_hash;
3373
3374         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3375         if (!hash) {
3376                 ret = -ENOMEM;
3377                 goto out_regex_unlock;
3378         }
3379
3380         if (reset)
3381                 ftrace_filter_reset(hash);
3382         if (buf && !ftrace_match_records(hash, buf, len)) {
3383                 ret = -EINVAL;
3384                 goto out_regex_unlock;
3385         }
3386         if (ip) {
3387                 ret = ftrace_match_addr(hash, ip, remove);
3388                 if (ret < 0)
3389                         goto out_regex_unlock;
3390         }
3391
3392         mutex_lock(&ftrace_lock);
3393         ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3394         if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
3395             && ftrace_enabled)
3396                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3397
3398         mutex_unlock(&ftrace_lock);
3399
3400  out_regex_unlock:
3401         mutex_unlock(&ops->regex_lock);
3402
3403         free_ftrace_hash(hash);
3404         return ret;
3405 }
3406
3407 static int
3408 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3409                 int reset, int enable)
3410 {
3411         return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
3412 }
3413
3414 /**
3415  * ftrace_set_filter_ip - set a function to filter on in ftrace by address
3416  * @ops - the ops to set the filter with
3417  * @ip - the address to add to or remove from the filter.
3418  * @remove - non zero to remove the ip from the filter
3419  * @reset - non zero to reset all filters before applying this filter.
3420  *
3421  * Filters denote which functions should be enabled when tracing is enabled
3422  * If @ip is NULL, it failes to update filter.
3423  */
3424 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3425                          int remove, int reset)
3426 {
3427         ftrace_ops_init(ops);
3428         return ftrace_set_addr(ops, ip, remove, reset, 1);
3429 }
3430 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
3431
3432 static int
3433 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3434                  int reset, int enable)
3435 {
3436         return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
3437 }
3438
3439 /**
3440  * ftrace_set_filter - set a function to filter on in ftrace
3441  * @ops - the ops to set the filter with
3442  * @buf - the string that holds the function filter text.
3443  * @len - the length of the string.
3444  * @reset - non zero to reset all filters before applying this filter.
3445  *
3446  * Filters denote which functions should be enabled when tracing is enabled.
3447  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3448  */
3449 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3450                        int len, int reset)
3451 {
3452         ftrace_ops_init(ops);
3453         return ftrace_set_regex(ops, buf, len, reset, 1);
3454 }
3455 EXPORT_SYMBOL_GPL(ftrace_set_filter);
3456
3457 /**
3458  * ftrace_set_notrace - set a function to not trace in ftrace
3459  * @ops - the ops to set the notrace filter with
3460  * @buf - the string that holds the function notrace text.
3461  * @len - the length of the string.
3462  * @reset - non zero to reset all filters before applying this filter.
3463  *
3464  * Notrace Filters denote which functions should not be enabled when tracing
3465  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3466  * for tracing.
3467  */
3468 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3469                         int len, int reset)
3470 {
3471         ftrace_ops_init(ops);
3472         return ftrace_set_regex(ops, buf, len, reset, 0);
3473 }
3474 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3475 /**
3476  * ftrace_set_filter - set a function to filter on in ftrace
3477  * @ops - the ops to set the filter with
3478  * @buf - the string that holds the function filter text.
3479  * @len - the length of the string.
3480  * @reset - non zero to reset all filters before applying this filter.
3481  *
3482  * Filters denote which functions should be enabled when tracing is enabled.
3483  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3484  */
3485 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3486 {
3487         ftrace_set_regex(&global_ops, buf, len, reset, 1);
3488 }
3489 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3490
3491 /**
3492  * ftrace_set_notrace - set a function to not trace in ftrace
3493  * @ops - the ops to set the notrace filter with
3494  * @buf - the string that holds the function notrace text.
3495  * @len - the length of the string.
3496  * @reset - non zero to reset all filters before applying this filter.
3497  *
3498  * Notrace Filters denote which functions should not be enabled when tracing
3499  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3500  * for tracing.
3501  */
3502 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3503 {
3504         ftrace_set_regex(&global_ops, buf, len, reset, 0);
3505 }
3506 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3507
3508 /*
3509  * command line interface to allow users to set filters on boot up.
3510  */
3511 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
3512 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3513 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3514
3515 static int __init set_ftrace_notrace(char *str)
3516 {
3517         strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3518         return 1;
3519 }
3520 __setup("ftrace_notrace=", set_ftrace_notrace);
3521
3522 static int __init set_ftrace_filter(char *str)
3523 {
3524         strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3525         return 1;
3526 }
3527 __setup("ftrace_filter=", set_ftrace_filter);
3528
3529 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3530 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3531 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3532
3533 static int __init set_graph_function(char *str)
3534 {
3535         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3536         return 1;
3537 }
3538 __setup("ftrace_graph_filter=", set_graph_function);
3539
3540 static void __init set_ftrace_early_graph(char *buf)
3541 {
3542         int ret;
3543         char *func;
3544
3545         while (buf) {
3546                 func = strsep(&buf, ",");
3547                 /* we allow only one expression at a time */
3548                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3549                                       func);
3550                 if (ret)
3551                         printk(KERN_DEBUG "ftrace: function %s not "
3552                                           "traceable\n", func);
3553         }
3554 }
3555 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3556
3557 void __init
3558 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3559 {
3560         char *func;
3561
3562         ftrace_ops_init(ops);
3563
3564         while (buf) {
3565                 func = strsep(&buf, ",");
3566                 ftrace_set_regex(ops, func, strlen(func), 0, enable);
3567         }
3568 }
3569
3570 static void __init set_ftrace_early_filters(void)
3571 {
3572         if (ftrace_filter_buf[0])
3573                 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
3574         if (ftrace_notrace_buf[0])
3575                 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
3576 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3577         if (ftrace_graph_buf[0])
3578                 set_ftrace_early_graph(ftrace_graph_buf);
3579 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3580 }
3581
3582 int ftrace_regex_release(struct inode *inode, struct file *file)
3583 {
3584         struct seq_file *m = (struct seq_file *)file->private_data;
3585         struct ftrace_iterator *iter;
3586         struct ftrace_hash **orig_hash;
3587         struct trace_parser *parser;
3588         int filter_hash;
3589         int ret;
3590
3591         if (file->f_mode & FMODE_READ) {
3592                 iter = m->private;
3593                 seq_release(inode, file);
3594         } else
3595                 iter = file->private_data;
3596
3597         parser = &iter->parser;
3598         if (trace_parser_loaded(parser)) {
3599                 parser->buffer[parser->idx] = 0;
3600                 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3601         }
3602
3603         trace_parser_put(parser);
3604
3605         mutex_lock(&iter->ops->regex_lock);
3606
3607         if (file->f_mode & FMODE_WRITE) {
3608                 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3609
3610                 if (filter_hash)
3611                         orig_hash = &iter->ops->filter_hash;
3612                 else
3613                         orig_hash = &iter->ops->notrace_hash;
3614
3615                 mutex_lock(&ftrace_lock);
3616                 ret = ftrace_hash_move(iter->ops, filter_hash,
3617                                        orig_hash, iter->hash);
3618                 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3619                     && ftrace_enabled)
3620                         ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3621
3622                 mutex_unlock(&ftrace_lock);
3623         }
3624
3625         mutex_unlock(&iter->ops->regex_lock);
3626         free_ftrace_hash(iter->hash);
3627         kfree(iter);
3628
3629         return 0;
3630 }
3631
3632 static const struct file_operations ftrace_avail_fops = {
3633         .open = ftrace_avail_open,
3634         .read = seq_read,
3635         .llseek = seq_lseek,
3636         .release = seq_release_private,
3637 };
3638
3639 static const struct file_operations ftrace_enabled_fops = {
3640         .open = ftrace_enabled_open,
3641         .read = seq_read,
3642         .llseek = seq_lseek,
3643         .release = seq_release_private,
3644 };
3645
3646 static const struct file_operations ftrace_filter_fops = {
3647         .open = ftrace_filter_open,
3648         .read = seq_read,
3649         .write = ftrace_filter_write,
3650         .llseek = ftrace_filter_lseek,
3651         .release = ftrace_regex_release,
3652 };
3653
3654 static const struct file_operations ftrace_notrace_fops = {
3655         .open = ftrace_notrace_open,
3656         .read = seq_read,
3657         .write = ftrace_notrace_write,
3658         .llseek = ftrace_filter_lseek,
3659         .release = ftrace_regex_release,
3660 };
3661
3662 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3663
3664 static DEFINE_MUTEX(graph_lock);
3665
3666 int ftrace_graph_count;
3667 int ftrace_graph_filter_enabled;
3668 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3669
3670 static void *
3671 __g_next(struct seq_file *m, loff_t *pos)
3672 {
3673         if (*pos >= ftrace_graph_count)
3674                 return NULL;
3675         return &ftrace_graph_funcs[*pos];
3676 }
3677
3678 static void *
3679 g_next(struct seq_file *m, void *v, loff_t *pos)
3680 {
3681         (*pos)++;
3682         return __g_next(m, pos);
3683 }
3684
3685 static void *g_start(struct seq_file *m, loff_t *pos)
3686 {
3687         mutex_lock(&graph_lock);
3688
3689         /* Nothing, tell g_show to print all functions are enabled */
3690         if (!ftrace_graph_filter_enabled && !*pos)
3691                 return (void *)1;
3692
3693         return __g_next(m, pos);
3694 }
3695
3696 static void g_stop(struct seq_file *m, void *p)
3697 {
3698         mutex_unlock(&graph_lock);
3699 }
3700
3701 static int g_show(struct seq_file *m, void *v)
3702 {
3703         unsigned long *ptr = v;
3704
3705         if (!ptr)
3706                 return 0;
3707
3708         if (ptr == (unsigned long *)1) {
3709                 seq_printf(m, "#### all functions enabled ####\n");
3710                 return 0;
3711         }
3712
3713         seq_printf(m, "%ps\n", (void *)*ptr);
3714
3715         return 0;
3716 }
3717
3718 static const struct seq_operations ftrace_graph_seq_ops = {
3719         .start = g_start,
3720         .next = g_next,
3721         .stop = g_stop,
3722         .show = g_show,
3723 };
3724
3725 static int
3726 ftrace_graph_open(struct inode *inode, struct file *file)
3727 {
3728         int ret = 0;
3729
3730         if (unlikely(ftrace_disabled))
3731                 return -ENODEV;
3732
3733         mutex_lock(&graph_lock);
3734         if ((file->f_mode & FMODE_WRITE) &&
3735             (file->f_flags & O_TRUNC)) {
3736                 ftrace_graph_filter_enabled = 0;
3737                 ftrace_graph_count = 0;
3738                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3739         }
3740         mutex_unlock(&graph_lock);
3741
3742         if (file->f_mode & FMODE_READ)
3743                 ret = seq_open(file, &ftrace_graph_seq_ops);
3744
3745         return ret;
3746 }
3747
3748 static int
3749 ftrace_graph_release(struct inode *inode, struct file *file)
3750 {
3751         if (file->f_mode & FMODE_READ)
3752                 seq_release(inode, file);
3753         return 0;
3754 }
3755
3756 static int
3757 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3758 {
3759         struct dyn_ftrace *rec;
3760         struct ftrace_page *pg;
3761         int search_len;
3762         int fail = 1;
3763         int type, not;
3764         char *search;
3765         bool exists;
3766         int i;
3767
3768         /* decode regex */
3769         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3770         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3771                 return -EBUSY;
3772
3773         search_len = strlen(search);
3774
3775         mutex_lock(&ftrace_lock);
3776
3777         if (unlikely(ftrace_disabled)) {
3778                 mutex_unlock(&ftrace_lock);
3779                 return -ENODEV;
3780         }
3781
3782         do_for_each_ftrace_rec(pg, rec) {
3783
3784                 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3785                         /* if it is in the array */
3786                         exists = false;
3787                         for (i = 0; i < *idx; i++) {
3788                                 if (array[i] == rec->ip) {
3789                                         exists = true;
3790                                         break;
3791                                 }
3792                         }
3793
3794                         if (!not) {
3795                                 fail = 0;
3796                                 if (!exists) {
3797                                         array[(*idx)++] = rec->ip;
3798                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3799                                                 goto out;
3800                                 }
3801                         } else {
3802                                 if (exists) {
3803                                         array[i] = array[--(*idx)];
3804                                         array[*idx] = 0;
3805                                         fail = 0;
3806                                 }
3807                         }
3808                 }
3809         } while_for_each_ftrace_rec();
3810 out:
3811         mutex_unlock(&ftrace_lock);
3812
3813         if (fail)
3814                 return -EINVAL;
3815
3816         ftrace_graph_filter_enabled = !!(*idx);
3817
3818         return 0;
3819 }
3820
3821 static ssize_t
3822 ftrace_graph_write(struct file *file, const char __user *ubuf,
3823                    size_t cnt, loff_t *ppos)
3824 {
3825         struct trace_parser parser;
3826         ssize_t read, ret;
3827
3828         if (!cnt)
3829                 return 0;
3830
3831         mutex_lock(&graph_lock);
3832
3833         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3834                 ret = -ENOMEM;
3835                 goto out_unlock;
3836         }
3837
3838         read = trace_get_user(&parser, ubuf, cnt, ppos);
3839
3840         if (read >= 0 && trace_parser_loaded((&parser))) {
3841                 parser.buffer[parser.idx] = 0;
3842
3843                 /* we allow only one expression at a time */
3844                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3845                                         parser.buffer);
3846                 if (ret)
3847                         goto out_free;
3848         }
3849
3850         ret = read;
3851
3852 out_free:
3853         trace_parser_put(&parser);
3854 out_unlock:
3855         mutex_unlock(&graph_lock);
3856
3857         return ret;
3858 }
3859
3860 static const struct file_operations ftrace_graph_fops = {
3861         .open           = ftrace_graph_open,
3862         .read           = seq_read,
3863         .write          = ftrace_graph_write,
3864         .llseek         = ftrace_filter_lseek,
3865         .release        = ftrace_graph_release,
3866 };
3867 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3868
3869 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3870 {
3871
3872         trace_create_file("available_filter_functions", 0444,
3873                         d_tracer, NULL, &ftrace_avail_fops);
3874
3875         trace_create_file("enabled_functions", 0444,
3876                         d_tracer, NULL, &ftrace_enabled_fops);
3877
3878         trace_create_file("set_ftrace_filter", 0644, d_tracer,
3879                         NULL, &ftrace_filter_fops);
3880
3881         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3882                                     NULL, &ftrace_notrace_fops);
3883
3884 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3885         trace_create_file("set_graph_function", 0444, d_tracer,
3886                                     NULL,
3887                                     &ftrace_graph_fops);
3888 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3889
3890         return 0;
3891 }
3892
3893 static int ftrace_cmp_ips(const void *a, const void *b)
3894 {
3895         const unsigned long *ipa = a;
3896         const unsigned long *ipb = b;
3897
3898         if (*ipa > *ipb)
3899                 return 1;
3900         if (*ipa < *ipb)
3901                 return -1;
3902         return 0;
3903 }
3904
3905 static void ftrace_swap_ips(void *a, void *b, int size)
3906 {
3907         unsigned long *ipa = a;
3908         unsigned long *ipb = b;
3909         unsigned long t;
3910
3911         t = *ipa;
3912         *ipa = *ipb;
3913         *ipb = t;
3914 }
3915
3916 static int ftrace_process_locs(struct module *mod,
3917                                unsigned long *start,
3918                                unsigned long *end)
3919 {
3920         struct ftrace_page *start_pg;
3921         struct ftrace_page *pg;
3922         struct dyn_ftrace *rec;
3923         unsigned long count;
3924         unsigned long *p;
3925         unsigned long addr;
3926         unsigned long flags = 0; /* Shut up gcc */
3927         int ret = -ENOMEM;
3928
3929         count = end - start;
3930
3931         if (!count)
3932                 return 0;
3933
3934         sort(start, count, sizeof(*start),
3935              ftrace_cmp_ips, ftrace_swap_ips);
3936
3937         start_pg = ftrace_allocate_pages(count);
3938         if (!start_pg)
3939                 return -ENOMEM;
3940
3941         mutex_lock(&ftrace_lock);
3942
3943         /*
3944          * Core and each module needs their own pages, as
3945          * modules will free them when they are removed.
3946          * Force a new page to be allocated for modules.
3947          */
3948         if (!mod) {
3949                 WARN_ON(ftrace_pages || ftrace_pages_start);
3950                 /* First initialization */
3951                 ftrace_pages = ftrace_pages_start = start_pg;
3952         } else {
3953                 if (!ftrace_pages)
3954                         goto out;
3955
3956                 if (WARN_ON(ftrace_pages->next)) {
3957                         /* Hmm, we have free pages? */
3958                         while (ftrace_pages->next)
3959                                 ftrace_pages = ftrace_pages->next;
3960                 }
3961
3962                 ftrace_pages->next = start_pg;
3963         }
3964
3965         p = start;
3966         pg = start_pg;
3967         while (p < end) {
3968                 addr = ftrace_call_adjust(*p++);
3969                 /*
3970                  * Some architecture linkers will pad between
3971                  * the different mcount_loc sections of different
3972                  * object files to satisfy alignments.
3973                  * Skip any NULL pointers.
3974                  */
3975                 if (!addr)
3976                         continue;
3977
3978                 if (pg->index == pg->size) {
3979                         /* We should have allocated enough */
3980                         if (WARN_ON(!pg->next))
3981                                 break;
3982                         pg = pg->next;
3983                 }
3984
3985                 rec = &pg->records[pg->index++];
3986                 rec->ip = addr;
3987         }
3988
3989         /* We should have used all pages */
3990         WARN_ON(pg->next);
3991
3992         /* Assign the last page to ftrace_pages */
3993         ftrace_pages = pg;
3994
3995         /* These new locations need to be initialized */
3996         ftrace_new_pgs = start_pg;
3997
3998         /*
3999          * We only need to disable interrupts on start up
4000          * because we are modifying code that an interrupt
4001          * may execute, and the modification is not atomic.
4002          * But for modules, nothing runs the code we modify
4003          * until we are finished with it, and there's no
4004          * reason to cause large interrupt latencies while we do it.
4005          */
4006         if (!mod)
4007                 local_irq_save(flags);
4008         ftrace_update_code(mod);
4009         if (!mod)
4010                 local_irq_restore(flags);
4011         ret = 0;
4012  out:
4013         mutex_unlock(&ftrace_lock);
4014
4015         return ret;
4016 }
4017
4018 #ifdef CONFIG_MODULES
4019
4020 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4021
4022 void ftrace_release_mod(struct module *mod)
4023 {
4024         struct dyn_ftrace *rec;
4025         struct ftrace_page **last_pg;
4026         struct ftrace_page *pg;
4027         int order;
4028
4029         mutex_lock(&ftrace_lock);
4030
4031         if (ftrace_disabled)
4032                 goto out_unlock;
4033
4034         /*
4035          * Each module has its own ftrace_pages, remove
4036          * them from the list.
4037          */
4038         last_pg = &ftrace_pages_start;
4039         for (pg = ftrace_pages_start; pg; pg = *last_pg) {
4040                 rec = &pg->records[0];
4041                 if (within_module_core(rec->ip, mod)) {
4042                         /*
4043                          * As core pages are first, the first
4044                          * page should never be a module page.
4045                          */
4046                         if (WARN_ON(pg == ftrace_pages_start))
4047                                 goto out_unlock;
4048
4049                         /* Check if we are deleting the last page */
4050                         if (pg == ftrace_pages)
4051                                 ftrace_pages = next_to_ftrace_page(last_pg);
4052
4053                         *last_pg = pg->next;
4054                         order = get_count_order(pg->size / ENTRIES_PER_PAGE);
4055                         free_pages((unsigned long)pg->records, order);
4056                         kfree(pg);
4057                 } else
4058                         last_pg = &pg->next;
4059         }
4060  out_unlock:
4061         mutex_unlock(&ftrace_lock);
4062 }
4063
4064 static void ftrace_init_module(struct module *mod,
4065                                unsigned long *start, unsigned long *end)
4066 {
4067         if (ftrace_disabled || start == end)
4068                 return;
4069         ftrace_process_locs(mod, start, end);
4070 }
4071
4072 static int ftrace_module_notify_enter(struct notifier_block *self,
4073                                       unsigned long val, void *data)
4074 {
4075         struct module *mod = data;
4076
4077         if (val == MODULE_STATE_COMING)
4078                 ftrace_init_module(mod, mod->ftrace_callsites,
4079                                    mod->ftrace_callsites +
4080                                    mod->num_ftrace_callsites);
4081         return 0;
4082 }
4083
4084 static int ftrace_module_notify_exit(struct notifier_block *self,
4085                                      unsigned long val, void *data)
4086 {
4087         struct module *mod = data;
4088
4089         if (val == MODULE_STATE_GOING)
4090                 ftrace_release_mod(mod);
4091
4092         return 0;
4093 }
4094 #else
4095 static int ftrace_module_notify_enter(struct notifier_block *self,
4096                                       unsigned long val, void *data)
4097 {
4098         return 0;
4099 }
4100 static int ftrace_module_notify_exit(struct notifier_block *self,
4101                                      unsigned long val, void *data)
4102 {
4103         return 0;
4104 }
4105 #endif /* CONFIG_MODULES */
4106
4107 struct notifier_block ftrace_module_enter_nb = {
4108         .notifier_call = ftrace_module_notify_enter,
4109         .priority = INT_MAX,    /* Run before anything that can use kprobes */
4110 };
4111
4112 struct notifier_block ftrace_module_exit_nb = {
4113         .notifier_call = ftrace_module_notify_exit,
4114         .priority = INT_MIN,    /* Run after anything that can remove kprobes */
4115 };
4116
4117 extern unsigned long __start_mcount_loc[];
4118 extern unsigned long __stop_mcount_loc[];
4119
4120 void __init ftrace_init(void)
4121 {
4122         unsigned long count, addr, flags;
4123         int ret;
4124
4125         /* Keep the ftrace pointer to the stub */
4126         addr = (unsigned long)ftrace_stub;
4127
4128         local_irq_save(flags);
4129         ftrace_dyn_arch_init(&addr);
4130         local_irq_restore(flags);
4131
4132         /* ftrace_dyn_arch_init places the return code in addr */
4133         if (addr)
4134                 goto failed;
4135
4136         count = __stop_mcount_loc - __start_mcount_loc;
4137
4138         ret = ftrace_dyn_table_alloc(count);
4139         if (ret)
4140                 goto failed;
4141
4142         last_ftrace_enabled = ftrace_enabled = 1;
4143
4144         ret = ftrace_process_locs(NULL,
4145                                   __start_mcount_loc,
4146                                   __stop_mcount_loc);
4147
4148         ret = register_module_notifier(&ftrace_module_enter_nb);
4149         if (ret)
4150                 pr_warning("Failed to register trace ftrace module enter notifier\n");
4151
4152         ret = register_module_notifier(&ftrace_module_exit_nb);
4153         if (ret)
4154                 pr_warning("Failed to register trace ftrace module exit notifier\n");
4155
4156         set_ftrace_early_filters();
4157
4158         return;
4159  failed:
4160         ftrace_disabled = 1;
4161 }
4162
4163 #else
4164
4165 static struct ftrace_ops global_ops = {
4166         .func                   = ftrace_stub,
4167         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4168         INIT_REGEX_LOCK(global_ops)
4169 };
4170
4171 static int __init ftrace_nodyn_init(void)
4172 {
4173         ftrace_enabled = 1;
4174         return 0;
4175 }
4176 core_initcall(ftrace_nodyn_init);
4177
4178 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4179 static inline void ftrace_startup_enable(int command) { }
4180 /* Keep as macros so we do not need to define the commands */
4181 # define ftrace_startup(ops, command)                   \
4182         ({                                              \
4183                 (ops)->flags |= FTRACE_OPS_FL_ENABLED;  \
4184                 0;                                      \
4185         })
4186 # define ftrace_shutdown(ops, command)  do { } while (0)
4187 # define ftrace_startup_sysctl()        do { } while (0)
4188 # define ftrace_shutdown_sysctl()       do { } while (0)
4189
4190 static inline int
4191 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
4192 {
4193         return 1;
4194 }
4195
4196 #endif /* CONFIG_DYNAMIC_FTRACE */
4197
4198 static void
4199 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4200                         struct ftrace_ops *op, struct pt_regs *regs)
4201 {
4202         if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4203                 return;
4204
4205         /*
4206          * Some of the ops may be dynamically allocated,
4207          * they must be freed after a synchronize_sched().
4208          */
4209         preempt_disable_notrace();
4210         trace_recursion_set(TRACE_CONTROL_BIT);
4211         do_for_each_ftrace_op(op, ftrace_control_list) {
4212                 if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4213                     !ftrace_function_local_disabled(op) &&
4214                     ftrace_ops_test(op, ip))
4215                         op->func(ip, parent_ip, op, regs);
4216         } while_for_each_ftrace_op(op);
4217         trace_recursion_clear(TRACE_CONTROL_BIT);
4218         preempt_enable_notrace();
4219 }
4220
4221 static struct ftrace_ops control_ops = {
4222         .func   = ftrace_ops_control_func,
4223         .flags  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4224         INIT_REGEX_LOCK(control_ops)
4225 };
4226
4227 static inline void
4228 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4229                        struct ftrace_ops *ignored, struct pt_regs *regs)
4230 {
4231         struct ftrace_ops *op;
4232         int bit;
4233
4234         if (function_trace_stop)
4235                 return;
4236
4237         bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4238         if (bit < 0)
4239                 return;
4240
4241         /*
4242          * Some of the ops may be dynamically allocated,
4243          * they must be freed after a synchronize_sched().
4244          */
4245         preempt_disable_notrace();
4246         do_for_each_ftrace_op(op, ftrace_ops_list) {
4247                 if (ftrace_ops_test(op, ip))
4248                         op->func(ip, parent_ip, op, regs);
4249         } while_for_each_ftrace_op(op);
4250         preempt_enable_notrace();
4251         trace_clear_recursion(bit);
4252 }
4253
4254 /*
4255  * Some archs only support passing ip and parent_ip. Even though
4256  * the list function ignores the op parameter, we do not want any
4257  * C side effects, where a function is called without the caller
4258  * sending a third parameter.
4259  * Archs are to support both the regs and ftrace_ops at the same time.
4260  * If they support ftrace_ops, it is assumed they support regs.
4261  * If call backs want to use regs, they must either check for regs
4262  * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
4263  * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
4264  * An architecture can pass partial regs with ftrace_ops and still
4265  * set the ARCH_SUPPORT_FTARCE_OPS.
4266  */
4267 #if ARCH_SUPPORTS_FTRACE_OPS
4268 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4269                                  struct ftrace_ops *op, struct pt_regs *regs)
4270 {
4271         __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
4272 }
4273 #else
4274 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4275 {
4276         __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
4277 }
4278 #endif
4279
4280 static void clear_ftrace_swapper(void)
4281 {
4282         struct task_struct *p;
4283         int cpu;
4284
4285         get_online_cpus();
4286         for_each_online_cpu(cpu) {
4287                 p = idle_task(cpu);
4288                 clear_tsk_trace_trace(p);
4289         }
4290         put_online_cpus();
4291 }
4292
4293 static void set_ftrace_swapper(void)
4294 {
4295         struct task_struct *p;
4296         int cpu;
4297
4298         get_online_cpus();
4299         for_each_online_cpu(cpu) {
4300                 p = idle_task(cpu);
4301                 set_tsk_trace_trace(p);
4302         }
4303         put_online_cpus();
4304 }
4305
4306 static void clear_ftrace_pid(struct pid *pid)
4307 {
4308         struct task_struct *p;
4309
4310         rcu_read_lock();
4311         do_each_pid_task(pid, PIDTYPE_PID, p) {
4312                 clear_tsk_trace_trace(p);
4313         } while_each_pid_task(pid, PIDTYPE_PID, p);
4314         rcu_read_unlock();
4315
4316         put_pid(pid);
4317 }
4318
4319 static void set_ftrace_pid(struct pid *pid)
4320 {
4321         struct task_struct *p;
4322
4323         rcu_read_lock();
4324         do_each_pid_task(pid, PIDTYPE_PID, p) {
4325                 set_tsk_trace_trace(p);
4326         } while_each_pid_task(pid, PIDTYPE_PID, p);
4327         rcu_read_unlock();
4328 }
4329
4330 static void clear_ftrace_pid_task(struct pid *pid)
4331 {
4332         if (pid == ftrace_swapper_pid)
4333                 clear_ftrace_swapper();
4334         else
4335                 clear_ftrace_pid(pid);
4336 }
4337
4338 static void set_ftrace_pid_task(struct pid *pid)
4339 {
4340         if (pid == ftrace_swapper_pid)
4341                 set_ftrace_swapper();
4342         else
4343                 set_ftrace_pid(pid);
4344 }
4345
4346 static int ftrace_pid_add(int p)
4347 {
4348         struct pid *pid;
4349         struct ftrace_pid *fpid;
4350         int ret = -EINVAL;
4351
4352         mutex_lock(&ftrace_lock);
4353
4354         if (!p)
4355                 pid = ftrace_swapper_pid;
4356         else
4357                 pid = find_get_pid(p);
4358
4359         if (!pid)
4360                 goto out;
4361
4362         ret = 0;
4363
4364         list_for_each_entry(fpid, &ftrace_pids, list)
4365                 if (fpid->pid == pid)
4366                         goto out_put;
4367
4368         ret = -ENOMEM;
4369
4370         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4371         if (!fpid)
4372                 goto out_put;
4373
4374         list_add(&fpid->list, &ftrace_pids);
4375         fpid->pid = pid;
4376
4377         set_ftrace_pid_task(pid);
4378
4379         ftrace_update_pid_func();
4380         ftrace_startup_enable(0);
4381
4382         mutex_unlock(&ftrace_lock);
4383         return 0;
4384
4385 out_put:
4386         if (pid != ftrace_swapper_pid)
4387                 put_pid(pid);
4388
4389 out:
4390         mutex_unlock(&ftrace_lock);
4391         return ret;
4392 }
4393
4394 static void ftrace_pid_reset(void)
4395 {
4396         struct ftrace_pid *fpid, *safe;
4397
4398         mutex_lock(&ftrace_lock);
4399         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
4400                 struct pid *pid = fpid->pid;
4401
4402                 clear_ftrace_pid_task(pid);
4403
4404                 list_del(&fpid->list);
4405                 kfree(fpid);
4406         }
4407
4408         ftrace_update_pid_func();
4409         ftrace_startup_enable(0);
4410
4411         mutex_unlock(&ftrace_lock);
4412 }
4413
4414 static void *fpid_start(struct seq_file *m, loff_t *pos)
4415 {
4416         mutex_lock(&ftrace_lock);
4417
4418         if (list_empty(&ftrace_pids) && (!*pos))
4419                 return (void *) 1;
4420
4421         return seq_list_start(&ftrace_pids, *pos);
4422 }
4423
4424 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4425 {
4426         if (v == (void *)1)
4427                 return NULL;
4428
4429         return seq_list_next(v, &ftrace_pids, pos);
4430 }
4431
4432 static void fpid_stop(struct seq_file *m, void *p)
4433 {
4434         mutex_unlock(&ftrace_lock);
4435 }
4436
4437 static int fpid_show(struct seq_file *m, void *v)
4438 {
4439         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4440
4441         if (v == (void *)1) {
4442                 seq_printf(m, "no pid\n");
4443                 return 0;
4444         }
4445
4446         if (fpid->pid == ftrace_swapper_pid)
4447                 seq_printf(m, "swapper tasks\n");
4448         else
4449                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
4450
4451         return 0;
4452 }
4453
4454 static const struct seq_operations ftrace_pid_sops = {
4455         .start = fpid_start,
4456         .next = fpid_next,
4457         .stop = fpid_stop,
4458         .show = fpid_show,
4459 };
4460
4461 static int
4462 ftrace_pid_open(struct inode *inode, struct file *file)
4463 {
4464         int ret = 0;
4465
4466         if ((file->f_mode & FMODE_WRITE) &&
4467             (file->f_flags & O_TRUNC))
4468                 ftrace_pid_reset();
4469
4470         if (file->f_mode & FMODE_READ)
4471                 ret = seq_open(file, &ftrace_pid_sops);
4472
4473         return ret;
4474 }
4475
4476 static ssize_t
4477 ftrace_pid_write(struct file *filp, const char __user *ubuf,
4478                    size_t cnt, loff_t *ppos)
4479 {
4480         char buf[64], *tmp;
4481         long val;
4482         int ret;
4483
4484         if (cnt >= sizeof(buf))
4485                 return -EINVAL;
4486
4487         if (copy_from_user(&buf, ubuf, cnt))
4488                 return -EFAULT;
4489
4490         buf[cnt] = 0;
4491
4492         /*
4493          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
4494          * to clean the filter quietly.
4495          */
4496         tmp = strstrip(buf);
4497         if (strlen(tmp) == 0)
4498                 return 1;
4499
4500         ret = kstrtol(tmp, 10, &val);
4501         if (ret < 0)
4502                 return ret;
4503
4504         ret = ftrace_pid_add(val);
4505
4506         return ret ? ret : cnt;
4507 }
4508
4509 static int
4510 ftrace_pid_release(struct inode *inode, struct file *file)
4511 {
4512         if (file->f_mode & FMODE_READ)
4513                 seq_release(inode, file);
4514
4515         return 0;
4516 }
4517
4518 static const struct file_operations ftrace_pid_fops = {
4519         .open           = ftrace_pid_open,
4520         .write          = ftrace_pid_write,
4521         .read           = seq_read,
4522         .llseek         = ftrace_filter_lseek,
4523         .release        = ftrace_pid_release,
4524 };
4525
4526 static __init int ftrace_init_debugfs(void)
4527 {
4528         struct dentry *d_tracer;
4529
4530         d_tracer = tracing_init_dentry();
4531         if (!d_tracer)
4532                 return 0;
4533
4534         ftrace_init_dyn_debugfs(d_tracer);
4535
4536         trace_create_file("set_ftrace_pid", 0644, d_tracer,
4537                             NULL, &ftrace_pid_fops);
4538
4539         ftrace_profile_debugfs(d_tracer);
4540
4541         return 0;
4542 }
4543 fs_initcall(ftrace_init_debugfs);
4544
4545 /**
4546  * ftrace_kill - kill ftrace
4547  *
4548  * This function should be used by panic code. It stops ftrace
4549  * but in a not so nice way. If you need to simply kill ftrace
4550  * from a non-atomic section, use ftrace_kill.
4551  */
4552 void ftrace_kill(void)
4553 {
4554         ftrace_disabled = 1;
4555         ftrace_enabled = 0;
4556         clear_ftrace_function();
4557 }
4558
4559 /**
4560  * Test if ftrace is dead or not.
4561  */
4562 int ftrace_is_dead(void)
4563 {
4564         return ftrace_disabled;
4565 }
4566
4567 /**
4568  * register_ftrace_function - register a function for profiling
4569  * @ops - ops structure that holds the function for profiling.
4570  *
4571  * Register a function to be called by all functions in the
4572  * kernel.
4573  *
4574  * Note: @ops->func and all the functions it calls must be labeled
4575  *       with "notrace", otherwise it will go into a
4576  *       recursive loop.
4577  */
4578 int register_ftrace_function(struct ftrace_ops *ops)
4579 {
4580         int ret = -1;
4581
4582         ftrace_ops_init(ops);
4583
4584         mutex_lock(&ftrace_lock);
4585
4586         ret = __register_ftrace_function(ops);
4587         if (!ret)
4588                 ret = ftrace_startup(ops, 0);
4589
4590         mutex_unlock(&ftrace_lock);
4591
4592         return ret;
4593 }
4594 EXPORT_SYMBOL_GPL(register_ftrace_function);
4595
4596 /**
4597  * unregister_ftrace_function - unregister a function for profiling.
4598  * @ops - ops structure that holds the function to unregister
4599  *
4600  * Unregister a function that was added to be called by ftrace profiling.
4601  */
4602 int unregister_ftrace_function(struct ftrace_ops *ops)
4603 {
4604         int ret;
4605
4606         mutex_lock(&ftrace_lock);
4607         ret = __unregister_ftrace_function(ops);
4608         if (!ret)
4609                 ftrace_shutdown(ops, 0);
4610         mutex_unlock(&ftrace_lock);
4611
4612         return ret;
4613 }
4614 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
4615
4616 int
4617 ftrace_enable_sysctl(struct ctl_table *table, int write,
4618                      void __user *buffer, size_t *lenp,
4619                      loff_t *ppos)
4620 {
4621         int ret = -ENODEV;
4622
4623         mutex_lock(&ftrace_lock);
4624
4625         if (unlikely(ftrace_disabled))
4626                 goto out;
4627
4628         ret = proc_dointvec(table, write, buffer, lenp, ppos);
4629
4630         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
4631                 goto out;
4632
4633         last_ftrace_enabled = !!ftrace_enabled;
4634
4635         if (ftrace_enabled) {
4636
4637                 ftrace_startup_sysctl();
4638
4639                 /* we are starting ftrace again */
4640                 if (ftrace_ops_list != &ftrace_list_end)
4641                         update_ftrace_function();
4642
4643         } else {
4644                 /* stopping ftrace calls (just send to ftrace_stub) */
4645                 ftrace_trace_function = ftrace_stub;
4646
4647                 ftrace_shutdown_sysctl();
4648         }
4649
4650  out:
4651         mutex_unlock(&ftrace_lock);
4652         return ret;
4653 }
4654
4655 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4656
4657 static int ftrace_graph_active;
4658 static struct notifier_block ftrace_suspend_notifier;
4659
4660 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4661 {
4662         return 0;
4663 }
4664
4665 /* The callbacks that hook a function */
4666 trace_func_graph_ret_t ftrace_graph_return =
4667                         (trace_func_graph_ret_t)ftrace_stub;
4668 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4669
4670 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4671 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4672 {
4673         int i;
4674         int ret = 0;
4675         unsigned long flags;
4676         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4677         struct task_struct *g, *t;
4678
4679         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4680                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4681                                         * sizeof(struct ftrace_ret_stack),
4682                                         GFP_KERNEL);
4683                 if (!ret_stack_list[i]) {
4684                         start = 0;
4685                         end = i;
4686                         ret = -ENOMEM;
4687                         goto free;
4688                 }
4689         }
4690
4691         read_lock_irqsave(&tasklist_lock, flags);
4692         do_each_thread(g, t) {
4693                 if (start == end) {
4694                         ret = -EAGAIN;
4695                         goto unlock;
4696                 }
4697
4698                 if (t->ret_stack == NULL) {
4699                         atomic_set(&t->tracing_graph_pause, 0);
4700                         atomic_set(&t->trace_overrun, 0);
4701                         t->curr_ret_stack = -1;
4702                         /* Make sure the tasks see the -1 first: */
4703                         smp_wmb();
4704                         t->ret_stack = ret_stack_list[start++];
4705                 }
4706         } while_each_thread(g, t);
4707
4708 unlock:
4709         read_unlock_irqrestore(&tasklist_lock, flags);
4710 free:
4711         for (i = start; i < end; i++)
4712                 kfree(ret_stack_list[i]);
4713         return ret;
4714 }
4715
4716 static void
4717 ftrace_graph_probe_sched_switch(void *ignore,
4718                         struct task_struct *prev, struct task_struct *next)
4719 {
4720         unsigned long long timestamp;
4721         int index;
4722
4723         /*
4724          * Does the user want to count the time a function was asleep.
4725          * If so, do not update the time stamps.
4726          */
4727         if (trace_flags & TRACE_ITER_SLEEP_TIME)
4728                 return;
4729
4730         timestamp = trace_clock_local();
4731
4732         prev->ftrace_timestamp = timestamp;
4733
4734         /* only process tasks that we timestamped */
4735         if (!next->ftrace_timestamp)
4736                 return;
4737
4738         /*
4739          * Update all the counters in next to make up for the
4740          * time next was sleeping.
4741          */
4742         timestamp -= next->ftrace_timestamp;
4743
4744         for (index = next->curr_ret_stack; index >= 0; index--)
4745                 next->ret_stack[index].calltime += timestamp;
4746 }
4747
4748 /* Allocate a return stack for each task */
4749 static int start_graph_tracing(void)
4750 {
4751         struct ftrace_ret_stack **ret_stack_list;
4752         int ret, cpu;
4753
4754         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4755                                 sizeof(struct ftrace_ret_stack *),
4756                                 GFP_KERNEL);
4757
4758         if (!ret_stack_list)
4759                 return -ENOMEM;
4760
4761         /* The cpu_boot init_task->ret_stack will never be freed */
4762         for_each_online_cpu(cpu) {
4763                 if (!idle_task(cpu)->ret_stack)
4764                         ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4765         }
4766
4767         do {
4768                 ret = alloc_retstack_tasklist(ret_stack_list);
4769         } while (ret == -EAGAIN);
4770
4771         if (!ret) {
4772                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4773                 if (ret)
4774                         pr_info("ftrace_graph: Couldn't activate tracepoint"
4775                                 " probe to kernel_sched_switch\n");
4776         }
4777
4778         kfree(ret_stack_list);
4779         return ret;
4780 }
4781
4782 /*
4783  * Hibernation protection.
4784  * The state of the current task is too much unstable during
4785  * suspend/restore to disk. We want to protect against that.
4786  */
4787 static int
4788 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4789                                                         void *unused)
4790 {
4791         switch (state) {
4792         case PM_HIBERNATION_PREPARE:
4793                 pause_graph_tracing();
4794                 break;
4795
4796         case PM_POST_HIBERNATION:
4797                 unpause_graph_tracing();
4798                 break;
4799         }
4800         return NOTIFY_DONE;
4801 }
4802
4803 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4804                         trace_func_graph_ent_t entryfunc)
4805 {
4806         int ret = 0;
4807
4808         mutex_lock(&ftrace_lock);
4809
4810         /* we currently allow only one tracer registered at a time */
4811         if (ftrace_graph_active) {
4812                 ret = -EBUSY;
4813                 goto out;
4814         }
4815
4816         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4817         register_pm_notifier(&ftrace_suspend_notifier);
4818
4819         ftrace_graph_active++;
4820         ret = start_graph_tracing();
4821         if (ret) {
4822                 ftrace_graph_active--;
4823                 goto out;
4824         }
4825
4826         ftrace_graph_return = retfunc;
4827         ftrace_graph_entry = entryfunc;
4828
4829         ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
4830
4831 out:
4832         mutex_unlock(&ftrace_lock);
4833         return ret;
4834 }
4835
4836 void unregister_ftrace_graph(void)
4837 {
4838         mutex_lock(&ftrace_lock);
4839
4840         if (unlikely(!ftrace_graph_active))
4841                 goto out;
4842
4843         ftrace_graph_active--;
4844         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4845         ftrace_graph_entry = ftrace_graph_entry_stub;
4846         ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
4847         unregister_pm_notifier(&ftrace_suspend_notifier);
4848         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4849
4850  out:
4851         mutex_unlock(&ftrace_lock);
4852 }
4853
4854 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4855
4856 static void
4857 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4858 {
4859         atomic_set(&t->tracing_graph_pause, 0);
4860         atomic_set(&t->trace_overrun, 0);
4861         t->ftrace_timestamp = 0;
4862         /* make curr_ret_stack visible before we add the ret_stack */
4863         smp_wmb();
4864         t->ret_stack = ret_stack;
4865 }
4866
4867 /*
4868  * Allocate a return stack for the idle task. May be the first
4869  * time through, or it may be done by CPU hotplug online.
4870  */
4871 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4872 {
4873         t->curr_ret_stack = -1;
4874         /*
4875          * The idle task has no parent, it either has its own
4876          * stack or no stack at all.
4877          */
4878         if (t->ret_stack)
4879                 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4880
4881         if (ftrace_graph_active) {
4882                 struct ftrace_ret_stack *ret_stack;
4883
4884                 ret_stack = per_cpu(idle_ret_stack, cpu);
4885                 if (!ret_stack) {
4886                         ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4887                                             * sizeof(struct ftrace_ret_stack),
4888                                             GFP_KERNEL);
4889                         if (!ret_stack)
4890                                 return;
4891                         per_cpu(idle_ret_stack, cpu) = ret_stack;
4892                 }
4893                 graph_init_task(t, ret_stack);
4894         }
4895 }
4896
4897 /* Allocate a return stack for newly created task */
4898 void ftrace_graph_init_task(struct task_struct *t)
4899 {
4900         /* Make sure we do not use the parent ret_stack */
4901         t->ret_stack = NULL;
4902         t->curr_ret_stack = -1;
4903
4904         if (ftrace_graph_active) {
4905                 struct ftrace_ret_stack *ret_stack;
4906
4907                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4908                                 * sizeof(struct ftrace_ret_stack),
4909                                 GFP_KERNEL);
4910                 if (!ret_stack)
4911                         return;
4912                 graph_init_task(t, ret_stack);
4913         }
4914 }
4915
4916 void ftrace_graph_exit_task(struct task_struct *t)
4917 {
4918         struct ftrace_ret_stack *ret_stack = t->ret_stack;
4919
4920         t->ret_stack = NULL;
4921         /* NULL must become visible to IRQs before we free it: */
4922         barrier();
4923
4924         kfree(ret_stack);
4925 }
4926
4927 void ftrace_graph_stop(void)
4928 {
4929         ftrace_stop();
4930 }
4931 #endif