Merge tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm...
[firefly-linux-kernel-4.4.55.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
35
36 #include <trace/events/sched.h>
37
38 #include <asm/setup.h>
39
40 #include "trace_output.h"
41 #include "trace_stat.h"
42
43 #define FTRACE_WARN_ON(cond)                    \
44         ({                                      \
45                 int ___r = cond;                \
46                 if (WARN_ON(___r))              \
47                         ftrace_kill();          \
48                 ___r;                           \
49         })
50
51 #define FTRACE_WARN_ON_ONCE(cond)               \
52         ({                                      \
53                 int ___r = cond;                \
54                 if (WARN_ON_ONCE(___r))         \
55                         ftrace_kill();          \
56                 ___r;                           \
57         })
58
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
64
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66
67 #ifdef CONFIG_DYNAMIC_FTRACE
68 #define INIT_REGEX_LOCK(opsname)        \
69         .regex_lock     = __MUTEX_INITIALIZER(opsname.regex_lock),
70 #else
71 #define INIT_REGEX_LOCK(opsname)
72 #endif
73
74 static struct ftrace_ops ftrace_list_end __read_mostly = {
75         .func           = ftrace_stub,
76         .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
77 };
78
79 /* ftrace_enabled is a method to turn ftrace on or off */
80 int ftrace_enabled __read_mostly;
81 static int last_ftrace_enabled;
82
83 /* Quick disabling of function tracer. */
84 int function_trace_stop __read_mostly;
85
86 /* Current function tracing op */
87 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
88
89 /* List for set_ftrace_pid's pids. */
90 LIST_HEAD(ftrace_pids);
91 struct ftrace_pid {
92         struct list_head list;
93         struct pid *pid;
94 };
95
96 /*
97  * ftrace_disabled is set when an anomaly is discovered.
98  * ftrace_disabled is much stronger than ftrace_enabled.
99  */
100 static int ftrace_disabled __read_mostly;
101
102 static DEFINE_MUTEX(ftrace_lock);
103
104 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
105 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
106 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
107 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
108 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
109 static struct ftrace_ops global_ops;
110 static struct ftrace_ops control_ops;
111
112 #if ARCH_SUPPORTS_FTRACE_OPS
113 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
114                                  struct ftrace_ops *op, struct pt_regs *regs);
115 #else
116 /* See comment below, where ftrace_ops_list_func is defined */
117 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
118 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
119 #endif
120
121 /*
122  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
123  * can use rcu_dereference_raw_notrace() is that elements removed from this list
124  * are simply leaked, so there is no need to interact with a grace-period
125  * mechanism.  The rcu_dereference_raw_notrace() calls are needed to handle
126  * concurrent insertions into the ftrace_global_list.
127  *
128  * Silly Alpha and silly pointer-speculation compiler optimizations!
129  */
130 #define do_for_each_ftrace_op(op, list)                 \
131         op = rcu_dereference_raw_notrace(list);                 \
132         do
133
134 /*
135  * Optimized for just a single item in the list (as that is the normal case).
136  */
137 #define while_for_each_ftrace_op(op)                            \
138         while (likely(op = rcu_dereference_raw_notrace((op)->next)) &&  \
139                unlikely((op) != &ftrace_list_end))
140
141 static inline void ftrace_ops_init(struct ftrace_ops *ops)
142 {
143 #ifdef CONFIG_DYNAMIC_FTRACE
144         if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
145                 mutex_init(&ops->regex_lock);
146                 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
147         }
148 #endif
149 }
150
151 /**
152  * ftrace_nr_registered_ops - return number of ops registered
153  *
154  * Returns the number of ftrace_ops registered and tracing functions
155  */
156 int ftrace_nr_registered_ops(void)
157 {
158         struct ftrace_ops *ops;
159         int cnt = 0;
160
161         mutex_lock(&ftrace_lock);
162
163         for (ops = ftrace_ops_list;
164              ops != &ftrace_list_end; ops = ops->next)
165                 cnt++;
166
167         mutex_unlock(&ftrace_lock);
168
169         return cnt;
170 }
171
172 static void
173 ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
174                         struct ftrace_ops *op, struct pt_regs *regs)
175 {
176         int bit;
177
178         bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
179         if (bit < 0)
180                 return;
181
182         do_for_each_ftrace_op(op, ftrace_global_list) {
183                 op->func(ip, parent_ip, op, regs);
184         } while_for_each_ftrace_op(op);
185
186         trace_clear_recursion(bit);
187 }
188
189 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
190                             struct ftrace_ops *op, struct pt_regs *regs)
191 {
192         if (!test_tsk_trace_trace(current))
193                 return;
194
195         ftrace_pid_function(ip, parent_ip, op, regs);
196 }
197
198 static void set_ftrace_pid_function(ftrace_func_t func)
199 {
200         /* do not set ftrace_pid_function to itself! */
201         if (func != ftrace_pid_func)
202                 ftrace_pid_function = func;
203 }
204
205 /**
206  * clear_ftrace_function - reset the ftrace function
207  *
208  * This NULLs the ftrace function and in essence stops
209  * tracing.  There may be lag
210  */
211 void clear_ftrace_function(void)
212 {
213         ftrace_trace_function = ftrace_stub;
214         ftrace_pid_function = ftrace_stub;
215 }
216
217 static void control_ops_disable_all(struct ftrace_ops *ops)
218 {
219         int cpu;
220
221         for_each_possible_cpu(cpu)
222                 *per_cpu_ptr(ops->disabled, cpu) = 1;
223 }
224
225 static int control_ops_alloc(struct ftrace_ops *ops)
226 {
227         int __percpu *disabled;
228
229         disabled = alloc_percpu(int);
230         if (!disabled)
231                 return -ENOMEM;
232
233         ops->disabled = disabled;
234         control_ops_disable_all(ops);
235         return 0;
236 }
237
238 static void control_ops_free(struct ftrace_ops *ops)
239 {
240         free_percpu(ops->disabled);
241 }
242
243 static void update_global_ops(void)
244 {
245         ftrace_func_t func;
246
247         /*
248          * If there's only one function registered, then call that
249          * function directly. Otherwise, we need to iterate over the
250          * registered callers.
251          */
252         if (ftrace_global_list == &ftrace_list_end ||
253             ftrace_global_list->next == &ftrace_list_end) {
254                 func = ftrace_global_list->func;
255                 /*
256                  * As we are calling the function directly.
257                  * If it does not have recursion protection,
258                  * the function_trace_op needs to be updated
259                  * accordingly.
260                  */
261                 if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)
262                         global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
263                 else
264                         global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
265         } else {
266                 func = ftrace_global_list_func;
267                 /* The list has its own recursion protection. */
268                 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
269         }
270
271
272         /* If we filter on pids, update to use the pid function */
273         if (!list_empty(&ftrace_pids)) {
274                 set_ftrace_pid_function(func);
275                 func = ftrace_pid_func;
276         }
277
278         global_ops.func = func;
279 }
280
281 static void update_ftrace_function(void)
282 {
283         ftrace_func_t func;
284
285         update_global_ops();
286
287         /*
288          * If we are at the end of the list and this ops is
289          * recursion safe and not dynamic and the arch supports passing ops,
290          * then have the mcount trampoline call the function directly.
291          */
292         if (ftrace_ops_list == &ftrace_list_end ||
293             (ftrace_ops_list->next == &ftrace_list_end &&
294              !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
295              (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
296              !FTRACE_FORCE_LIST_FUNC)) {
297                 /* Set the ftrace_ops that the arch callback uses */
298                 if (ftrace_ops_list == &global_ops)
299                         function_trace_op = ftrace_global_list;
300                 else
301                         function_trace_op = ftrace_ops_list;
302                 func = ftrace_ops_list->func;
303         } else {
304                 /* Just use the default ftrace_ops */
305                 function_trace_op = &ftrace_list_end;
306                 func = ftrace_ops_list_func;
307         }
308
309         ftrace_trace_function = func;
310 }
311
312 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
313 {
314         ops->next = *list;
315         /*
316          * We are entering ops into the list but another
317          * CPU might be walking that list. We need to make sure
318          * the ops->next pointer is valid before another CPU sees
319          * the ops pointer included into the list.
320          */
321         rcu_assign_pointer(*list, ops);
322 }
323
324 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
325 {
326         struct ftrace_ops **p;
327
328         /*
329          * If we are removing the last function, then simply point
330          * to the ftrace_stub.
331          */
332         if (*list == ops && ops->next == &ftrace_list_end) {
333                 *list = &ftrace_list_end;
334                 return 0;
335         }
336
337         for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
338                 if (*p == ops)
339                         break;
340
341         if (*p != ops)
342                 return -1;
343
344         *p = (*p)->next;
345         return 0;
346 }
347
348 static void add_ftrace_list_ops(struct ftrace_ops **list,
349                                 struct ftrace_ops *main_ops,
350                                 struct ftrace_ops *ops)
351 {
352         int first = *list == &ftrace_list_end;
353         add_ftrace_ops(list, ops);
354         if (first)
355                 add_ftrace_ops(&ftrace_ops_list, main_ops);
356 }
357
358 static int remove_ftrace_list_ops(struct ftrace_ops **list,
359                                   struct ftrace_ops *main_ops,
360                                   struct ftrace_ops *ops)
361 {
362         int ret = remove_ftrace_ops(list, ops);
363         if (!ret && *list == &ftrace_list_end)
364                 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
365         return ret;
366 }
367
368 static int __register_ftrace_function(struct ftrace_ops *ops)
369 {
370         if (unlikely(ftrace_disabled))
371                 return -ENODEV;
372
373         if (FTRACE_WARN_ON(ops == &global_ops))
374                 return -EINVAL;
375
376         if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
377                 return -EBUSY;
378
379         /* We don't support both control and global flags set. */
380         if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
381                 return -EINVAL;
382
383 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
384         /*
385          * If the ftrace_ops specifies SAVE_REGS, then it only can be used
386          * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
387          * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
388          */
389         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
390             !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
391                 return -EINVAL;
392
393         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
394                 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
395 #endif
396
397         if (!core_kernel_data((unsigned long)ops))
398                 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
399
400         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
401                 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
402                 ops->flags |= FTRACE_OPS_FL_ENABLED;
403         } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
404                 if (control_ops_alloc(ops))
405                         return -ENOMEM;
406                 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
407         } else
408                 add_ftrace_ops(&ftrace_ops_list, ops);
409
410         if (ftrace_enabled)
411                 update_ftrace_function();
412
413         return 0;
414 }
415
416 static void ftrace_sync(struct work_struct *work)
417 {
418         /*
419          * This function is just a stub to implement a hard force
420          * of synchronize_sched(). This requires synchronizing
421          * tasks even in userspace and idle.
422          *
423          * Yes, function tracing is rude.
424          */
425 }
426
427 static int __unregister_ftrace_function(struct ftrace_ops *ops)
428 {
429         int ret;
430
431         if (ftrace_disabled)
432                 return -ENODEV;
433
434         if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
435                 return -EBUSY;
436
437         if (FTRACE_WARN_ON(ops == &global_ops))
438                 return -EINVAL;
439
440         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
441                 ret = remove_ftrace_list_ops(&ftrace_global_list,
442                                              &global_ops, ops);
443                 if (!ret)
444                         ops->flags &= ~FTRACE_OPS_FL_ENABLED;
445         } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
446                 ret = remove_ftrace_list_ops(&ftrace_control_list,
447                                              &control_ops, ops);
448                 if (!ret) {
449                         /*
450                          * The ftrace_ops is now removed from the list,
451                          * so there'll be no new users. We must ensure
452                          * all current users are done before we free
453                          * the control data.
454                          * Note synchronize_sched() is not enough, as we
455                          * use preempt_disable() to do RCU, but the function
456                          * tracer can be called where RCU is not active
457                          * (before user_exit()).
458                          */
459                         schedule_on_each_cpu(ftrace_sync);
460                         control_ops_free(ops);
461                 }
462         } else
463                 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
464
465         if (ret < 0)
466                 return ret;
467
468         if (ftrace_enabled)
469                 update_ftrace_function();
470
471         /*
472          * Dynamic ops may be freed, we must make sure that all
473          * callers are done before leaving this function.
474          *
475          * Again, normal synchronize_sched() is not good enough.
476          * We need to do a hard force of sched synchronization.
477          */
478         if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
479                 schedule_on_each_cpu(ftrace_sync);
480
481
482         return 0;
483 }
484
485 static void ftrace_update_pid_func(void)
486 {
487         /* Only do something if we are tracing something */
488         if (ftrace_trace_function == ftrace_stub)
489                 return;
490
491         update_ftrace_function();
492 }
493
494 #ifdef CONFIG_FUNCTION_PROFILER
495 struct ftrace_profile {
496         struct hlist_node               node;
497         unsigned long                   ip;
498         unsigned long                   counter;
499 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
500         unsigned long long              time;
501         unsigned long long              time_squared;
502 #endif
503 };
504
505 struct ftrace_profile_page {
506         struct ftrace_profile_page      *next;
507         unsigned long                   index;
508         struct ftrace_profile           records[];
509 };
510
511 struct ftrace_profile_stat {
512         atomic_t                        disabled;
513         struct hlist_head               *hash;
514         struct ftrace_profile_page      *pages;
515         struct ftrace_profile_page      *start;
516         struct tracer_stat              stat;
517 };
518
519 #define PROFILE_RECORDS_SIZE                                            \
520         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
521
522 #define PROFILES_PER_PAGE                                       \
523         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
524
525 static int ftrace_profile_enabled __read_mostly;
526
527 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
528 static DEFINE_MUTEX(ftrace_profile_lock);
529
530 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
531
532 #define FTRACE_PROFILE_HASH_BITS 10
533 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
534
535 static void *
536 function_stat_next(void *v, int idx)
537 {
538         struct ftrace_profile *rec = v;
539         struct ftrace_profile_page *pg;
540
541         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
542
543  again:
544         if (idx != 0)
545                 rec++;
546
547         if ((void *)rec >= (void *)&pg->records[pg->index]) {
548                 pg = pg->next;
549                 if (!pg)
550                         return NULL;
551                 rec = &pg->records[0];
552                 if (!rec->counter)
553                         goto again;
554         }
555
556         return rec;
557 }
558
559 static void *function_stat_start(struct tracer_stat *trace)
560 {
561         struct ftrace_profile_stat *stat =
562                 container_of(trace, struct ftrace_profile_stat, stat);
563
564         if (!stat || !stat->start)
565                 return NULL;
566
567         return function_stat_next(&stat->start->records[0], 0);
568 }
569
570 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
571 /* function graph compares on total time */
572 static int function_stat_cmp(void *p1, void *p2)
573 {
574         struct ftrace_profile *a = p1;
575         struct ftrace_profile *b = p2;
576
577         if (a->time < b->time)
578                 return -1;
579         if (a->time > b->time)
580                 return 1;
581         else
582                 return 0;
583 }
584 #else
585 /* not function graph compares against hits */
586 static int function_stat_cmp(void *p1, void *p2)
587 {
588         struct ftrace_profile *a = p1;
589         struct ftrace_profile *b = p2;
590
591         if (a->counter < b->counter)
592                 return -1;
593         if (a->counter > b->counter)
594                 return 1;
595         else
596                 return 0;
597 }
598 #endif
599
600 static int function_stat_headers(struct seq_file *m)
601 {
602 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
603         seq_printf(m, "  Function                               "
604                    "Hit    Time            Avg             s^2\n"
605                       "  --------                               "
606                    "---    ----            ---             ---\n");
607 #else
608         seq_printf(m, "  Function                               Hit\n"
609                       "  --------                               ---\n");
610 #endif
611         return 0;
612 }
613
614 static int function_stat_show(struct seq_file *m, void *v)
615 {
616         struct ftrace_profile *rec = v;
617         char str[KSYM_SYMBOL_LEN];
618         int ret = 0;
619 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
620         static struct trace_seq s;
621         unsigned long long avg;
622         unsigned long long stddev;
623 #endif
624         mutex_lock(&ftrace_profile_lock);
625
626         /* we raced with function_profile_reset() */
627         if (unlikely(rec->counter == 0)) {
628                 ret = -EBUSY;
629                 goto out;
630         }
631
632         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
633         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
634
635 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
636         seq_printf(m, "    ");
637         avg = rec->time;
638         do_div(avg, rec->counter);
639
640         /* Sample standard deviation (s^2) */
641         if (rec->counter <= 1)
642                 stddev = 0;
643         else {
644                 /*
645                  * Apply Welford's method:
646                  * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
647                  */
648                 stddev = rec->counter * rec->time_squared -
649                          rec->time * rec->time;
650
651                 /*
652                  * Divide only 1000 for ns^2 -> us^2 conversion.
653                  * trace_print_graph_duration will divide 1000 again.
654                  */
655                 do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
656         }
657
658         trace_seq_init(&s);
659         trace_print_graph_duration(rec->time, &s);
660         trace_seq_puts(&s, "    ");
661         trace_print_graph_duration(avg, &s);
662         trace_seq_puts(&s, "    ");
663         trace_print_graph_duration(stddev, &s);
664         trace_print_seq(m, &s);
665 #endif
666         seq_putc(m, '\n');
667 out:
668         mutex_unlock(&ftrace_profile_lock);
669
670         return ret;
671 }
672
673 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
674 {
675         struct ftrace_profile_page *pg;
676
677         pg = stat->pages = stat->start;
678
679         while (pg) {
680                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
681                 pg->index = 0;
682                 pg = pg->next;
683         }
684
685         memset(stat->hash, 0,
686                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
687 }
688
689 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
690 {
691         struct ftrace_profile_page *pg;
692         int functions;
693         int pages;
694         int i;
695
696         /* If we already allocated, do nothing */
697         if (stat->pages)
698                 return 0;
699
700         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
701         if (!stat->pages)
702                 return -ENOMEM;
703
704 #ifdef CONFIG_DYNAMIC_FTRACE
705         functions = ftrace_update_tot_cnt;
706 #else
707         /*
708          * We do not know the number of functions that exist because
709          * dynamic tracing is what counts them. With past experience
710          * we have around 20K functions. That should be more than enough.
711          * It is highly unlikely we will execute every function in
712          * the kernel.
713          */
714         functions = 20000;
715 #endif
716
717         pg = stat->start = stat->pages;
718
719         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
720
721         for (i = 1; i < pages; i++) {
722                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
723                 if (!pg->next)
724                         goto out_free;
725                 pg = pg->next;
726         }
727
728         return 0;
729
730  out_free:
731         pg = stat->start;
732         while (pg) {
733                 unsigned long tmp = (unsigned long)pg;
734
735                 pg = pg->next;
736                 free_page(tmp);
737         }
738
739         stat->pages = NULL;
740         stat->start = NULL;
741
742         return -ENOMEM;
743 }
744
745 static int ftrace_profile_init_cpu(int cpu)
746 {
747         struct ftrace_profile_stat *stat;
748         int size;
749
750         stat = &per_cpu(ftrace_profile_stats, cpu);
751
752         if (stat->hash) {
753                 /* If the profile is already created, simply reset it */
754                 ftrace_profile_reset(stat);
755                 return 0;
756         }
757
758         /*
759          * We are profiling all functions, but usually only a few thousand
760          * functions are hit. We'll make a hash of 1024 items.
761          */
762         size = FTRACE_PROFILE_HASH_SIZE;
763
764         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
765
766         if (!stat->hash)
767                 return -ENOMEM;
768
769         /* Preallocate the function profiling pages */
770         if (ftrace_profile_pages_init(stat) < 0) {
771                 kfree(stat->hash);
772                 stat->hash = NULL;
773                 return -ENOMEM;
774         }
775
776         return 0;
777 }
778
779 static int ftrace_profile_init(void)
780 {
781         int cpu;
782         int ret = 0;
783
784         for_each_online_cpu(cpu) {
785                 ret = ftrace_profile_init_cpu(cpu);
786                 if (ret)
787                         break;
788         }
789
790         return ret;
791 }
792
793 /* interrupts must be disabled */
794 static struct ftrace_profile *
795 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
796 {
797         struct ftrace_profile *rec;
798         struct hlist_head *hhd;
799         unsigned long key;
800
801         key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
802         hhd = &stat->hash[key];
803
804         if (hlist_empty(hhd))
805                 return NULL;
806
807         hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
808                 if (rec->ip == ip)
809                         return rec;
810         }
811
812         return NULL;
813 }
814
815 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
816                                struct ftrace_profile *rec)
817 {
818         unsigned long key;
819
820         key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
821         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
822 }
823
824 /*
825  * The memory is already allocated, this simply finds a new record to use.
826  */
827 static struct ftrace_profile *
828 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
829 {
830         struct ftrace_profile *rec = NULL;
831
832         /* prevent recursion (from NMIs) */
833         if (atomic_inc_return(&stat->disabled) != 1)
834                 goto out;
835
836         /*
837          * Try to find the function again since an NMI
838          * could have added it
839          */
840         rec = ftrace_find_profiled_func(stat, ip);
841         if (rec)
842                 goto out;
843
844         if (stat->pages->index == PROFILES_PER_PAGE) {
845                 if (!stat->pages->next)
846                         goto out;
847                 stat->pages = stat->pages->next;
848         }
849
850         rec = &stat->pages->records[stat->pages->index++];
851         rec->ip = ip;
852         ftrace_add_profile(stat, rec);
853
854  out:
855         atomic_dec(&stat->disabled);
856
857         return rec;
858 }
859
860 static void
861 function_profile_call(unsigned long ip, unsigned long parent_ip,
862                       struct ftrace_ops *ops, struct pt_regs *regs)
863 {
864         struct ftrace_profile_stat *stat;
865         struct ftrace_profile *rec;
866         unsigned long flags;
867
868         if (!ftrace_profile_enabled)
869                 return;
870
871         local_irq_save(flags);
872
873         stat = &__get_cpu_var(ftrace_profile_stats);
874         if (!stat->hash || !ftrace_profile_enabled)
875                 goto out;
876
877         rec = ftrace_find_profiled_func(stat, ip);
878         if (!rec) {
879                 rec = ftrace_profile_alloc(stat, ip);
880                 if (!rec)
881                         goto out;
882         }
883
884         rec->counter++;
885  out:
886         local_irq_restore(flags);
887 }
888
889 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
890 static int profile_graph_entry(struct ftrace_graph_ent *trace)
891 {
892         function_profile_call(trace->func, 0, NULL, NULL);
893         return 1;
894 }
895
896 static void profile_graph_return(struct ftrace_graph_ret *trace)
897 {
898         struct ftrace_profile_stat *stat;
899         unsigned long long calltime;
900         struct ftrace_profile *rec;
901         unsigned long flags;
902
903         local_irq_save(flags);
904         stat = &__get_cpu_var(ftrace_profile_stats);
905         if (!stat->hash || !ftrace_profile_enabled)
906                 goto out;
907
908         /* If the calltime was zero'd ignore it */
909         if (!trace->calltime)
910                 goto out;
911
912         calltime = trace->rettime - trace->calltime;
913
914         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
915                 int index;
916
917                 index = trace->depth;
918
919                 /* Append this call time to the parent time to subtract */
920                 if (index)
921                         current->ret_stack[index - 1].subtime += calltime;
922
923                 if (current->ret_stack[index].subtime < calltime)
924                         calltime -= current->ret_stack[index].subtime;
925                 else
926                         calltime = 0;
927         }
928
929         rec = ftrace_find_profiled_func(stat, trace->func);
930         if (rec) {
931                 rec->time += calltime;
932                 rec->time_squared += calltime * calltime;
933         }
934
935  out:
936         local_irq_restore(flags);
937 }
938
939 static int register_ftrace_profiler(void)
940 {
941         return register_ftrace_graph(&profile_graph_return,
942                                      &profile_graph_entry);
943 }
944
945 static void unregister_ftrace_profiler(void)
946 {
947         unregister_ftrace_graph();
948 }
949 #else
950 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
951         .func           = function_profile_call,
952         .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
953         INIT_REGEX_LOCK(ftrace_profile_ops)
954 };
955
956 static int register_ftrace_profiler(void)
957 {
958         return register_ftrace_function(&ftrace_profile_ops);
959 }
960
961 static void unregister_ftrace_profiler(void)
962 {
963         unregister_ftrace_function(&ftrace_profile_ops);
964 }
965 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
966
967 static ssize_t
968 ftrace_profile_write(struct file *filp, const char __user *ubuf,
969                      size_t cnt, loff_t *ppos)
970 {
971         unsigned long val;
972         int ret;
973
974         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
975         if (ret)
976                 return ret;
977
978         val = !!val;
979
980         mutex_lock(&ftrace_profile_lock);
981         if (ftrace_profile_enabled ^ val) {
982                 if (val) {
983                         ret = ftrace_profile_init();
984                         if (ret < 0) {
985                                 cnt = ret;
986                                 goto out;
987                         }
988
989                         ret = register_ftrace_profiler();
990                         if (ret < 0) {
991                                 cnt = ret;
992                                 goto out;
993                         }
994                         ftrace_profile_enabled = 1;
995                 } else {
996                         ftrace_profile_enabled = 0;
997                         /*
998                          * unregister_ftrace_profiler calls stop_machine
999                          * so this acts like an synchronize_sched.
1000                          */
1001                         unregister_ftrace_profiler();
1002                 }
1003         }
1004  out:
1005         mutex_unlock(&ftrace_profile_lock);
1006
1007         *ppos += cnt;
1008
1009         return cnt;
1010 }
1011
1012 static ssize_t
1013 ftrace_profile_read(struct file *filp, char __user *ubuf,
1014                      size_t cnt, loff_t *ppos)
1015 {
1016         char buf[64];           /* big enough to hold a number */
1017         int r;
1018
1019         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
1020         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1021 }
1022
1023 static const struct file_operations ftrace_profile_fops = {
1024         .open           = tracing_open_generic,
1025         .read           = ftrace_profile_read,
1026         .write          = ftrace_profile_write,
1027         .llseek         = default_llseek,
1028 };
1029
1030 /* used to initialize the real stat files */
1031 static struct tracer_stat function_stats __initdata = {
1032         .name           = "functions",
1033         .stat_start     = function_stat_start,
1034         .stat_next      = function_stat_next,
1035         .stat_cmp       = function_stat_cmp,
1036         .stat_headers   = function_stat_headers,
1037         .stat_show      = function_stat_show
1038 };
1039
1040 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1041 {
1042         struct ftrace_profile_stat *stat;
1043         struct dentry *entry;
1044         char *name;
1045         int ret;
1046         int cpu;
1047
1048         for_each_possible_cpu(cpu) {
1049                 stat = &per_cpu(ftrace_profile_stats, cpu);
1050
1051                 /* allocate enough for function name + cpu number */
1052                 name = kmalloc(32, GFP_KERNEL);
1053                 if (!name) {
1054                         /*
1055                          * The files created are permanent, if something happens
1056                          * we still do not free memory.
1057                          */
1058                         WARN(1,
1059                              "Could not allocate stat file for cpu %d\n",
1060                              cpu);
1061                         return;
1062                 }
1063                 stat->stat = function_stats;
1064                 snprintf(name, 32, "function%d", cpu);
1065                 stat->stat.name = name;
1066                 ret = register_stat_tracer(&stat->stat);
1067                 if (ret) {
1068                         WARN(1,
1069                              "Could not register function stat for cpu %d\n",
1070                              cpu);
1071                         kfree(name);
1072                         return;
1073                 }
1074         }
1075
1076         entry = debugfs_create_file("function_profile_enabled", 0644,
1077                                     d_tracer, NULL, &ftrace_profile_fops);
1078         if (!entry)
1079                 pr_warning("Could not create debugfs "
1080                            "'function_profile_enabled' entry\n");
1081 }
1082
1083 #else /* CONFIG_FUNCTION_PROFILER */
1084 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1085 {
1086 }
1087 #endif /* CONFIG_FUNCTION_PROFILER */
1088
1089 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1090
1091 loff_t
1092 ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
1093 {
1094         loff_t ret;
1095
1096         if (file->f_mode & FMODE_READ)
1097                 ret = seq_lseek(file, offset, whence);
1098         else
1099                 file->f_pos = ret = 1;
1100
1101         return ret;
1102 }
1103
1104 #ifdef CONFIG_DYNAMIC_FTRACE
1105
1106 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1107 # error Dynamic ftrace depends on MCOUNT_RECORD
1108 #endif
1109
1110 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1111
1112 struct ftrace_func_probe {
1113         struct hlist_node       node;
1114         struct ftrace_probe_ops *ops;
1115         unsigned long           flags;
1116         unsigned long           ip;
1117         void                    *data;
1118         struct list_head        free_list;
1119 };
1120
1121 struct ftrace_func_entry {
1122         struct hlist_node hlist;
1123         unsigned long ip;
1124 };
1125
1126 struct ftrace_hash {
1127         unsigned long           size_bits;
1128         struct hlist_head       *buckets;
1129         unsigned long           count;
1130         struct rcu_head         rcu;
1131 };
1132
1133 /*
1134  * We make these constant because no one should touch them,
1135  * but they are used as the default "empty hash", to avoid allocating
1136  * it all the time. These are in a read only section such that if
1137  * anyone does try to modify it, it will cause an exception.
1138  */
1139 static const struct hlist_head empty_buckets[1];
1140 static const struct ftrace_hash empty_hash = {
1141         .buckets = (struct hlist_head *)empty_buckets,
1142 };
1143 #define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
1144
1145 static struct ftrace_ops global_ops = {
1146         .func                   = ftrace_stub,
1147         .notrace_hash           = EMPTY_HASH,
1148         .filter_hash            = EMPTY_HASH,
1149         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
1150         INIT_REGEX_LOCK(global_ops)
1151 };
1152
1153 struct ftrace_page {
1154         struct ftrace_page      *next;
1155         struct dyn_ftrace       *records;
1156         int                     index;
1157         int                     size;
1158 };
1159
1160 static struct ftrace_page *ftrace_new_pgs;
1161
1162 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1163 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1164
1165 /* estimate from running different kernels */
1166 #define NR_TO_INIT              10000
1167
1168 static struct ftrace_page       *ftrace_pages_start;
1169 static struct ftrace_page       *ftrace_pages;
1170
1171 static bool ftrace_hash_empty(struct ftrace_hash *hash)
1172 {
1173         return !hash || !hash->count;
1174 }
1175
1176 static struct ftrace_func_entry *
1177 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1178 {
1179         unsigned long key;
1180         struct ftrace_func_entry *entry;
1181         struct hlist_head *hhd;
1182
1183         if (ftrace_hash_empty(hash))
1184                 return NULL;
1185
1186         if (hash->size_bits > 0)
1187                 key = hash_long(ip, hash->size_bits);
1188         else
1189                 key = 0;
1190
1191         hhd = &hash->buckets[key];
1192
1193         hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1194                 if (entry->ip == ip)
1195                         return entry;
1196         }
1197         return NULL;
1198 }
1199
1200 static void __add_hash_entry(struct ftrace_hash *hash,
1201                              struct ftrace_func_entry *entry)
1202 {
1203         struct hlist_head *hhd;
1204         unsigned long key;
1205
1206         if (hash->size_bits)
1207                 key = hash_long(entry->ip, hash->size_bits);
1208         else
1209                 key = 0;
1210
1211         hhd = &hash->buckets[key];
1212         hlist_add_head(&entry->hlist, hhd);
1213         hash->count++;
1214 }
1215
1216 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1217 {
1218         struct ftrace_func_entry *entry;
1219
1220         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1221         if (!entry)
1222                 return -ENOMEM;
1223
1224         entry->ip = ip;
1225         __add_hash_entry(hash, entry);
1226
1227         return 0;
1228 }
1229
1230 static void
1231 free_hash_entry(struct ftrace_hash *hash,
1232                   struct ftrace_func_entry *entry)
1233 {
1234         hlist_del(&entry->hlist);
1235         kfree(entry);
1236         hash->count--;
1237 }
1238
1239 static void
1240 remove_hash_entry(struct ftrace_hash *hash,
1241                   struct ftrace_func_entry *entry)
1242 {
1243         hlist_del(&entry->hlist);
1244         hash->count--;
1245 }
1246
1247 static void ftrace_hash_clear(struct ftrace_hash *hash)
1248 {
1249         struct hlist_head *hhd;
1250         struct hlist_node *tn;
1251         struct ftrace_func_entry *entry;
1252         int size = 1 << hash->size_bits;
1253         int i;
1254
1255         if (!hash->count)
1256                 return;
1257
1258         for (i = 0; i < size; i++) {
1259                 hhd = &hash->buckets[i];
1260                 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1261                         free_hash_entry(hash, entry);
1262         }
1263         FTRACE_WARN_ON(hash->count);
1264 }
1265
1266 static void free_ftrace_hash(struct ftrace_hash *hash)
1267 {
1268         if (!hash || hash == EMPTY_HASH)
1269                 return;
1270         ftrace_hash_clear(hash);
1271         kfree(hash->buckets);
1272         kfree(hash);
1273 }
1274
1275 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1276 {
1277         struct ftrace_hash *hash;
1278
1279         hash = container_of(rcu, struct ftrace_hash, rcu);
1280         free_ftrace_hash(hash);
1281 }
1282
1283 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1284 {
1285         if (!hash || hash == EMPTY_HASH)
1286                 return;
1287         call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1288 }
1289
1290 void ftrace_free_filter(struct ftrace_ops *ops)
1291 {
1292         ftrace_ops_init(ops);
1293         free_ftrace_hash(ops->filter_hash);
1294         free_ftrace_hash(ops->notrace_hash);
1295 }
1296
1297 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1298 {
1299         struct ftrace_hash *hash;
1300         int size;
1301
1302         hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1303         if (!hash)
1304                 return NULL;
1305
1306         size = 1 << size_bits;
1307         hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1308
1309         if (!hash->buckets) {
1310                 kfree(hash);
1311                 return NULL;
1312         }
1313
1314         hash->size_bits = size_bits;
1315
1316         return hash;
1317 }
1318
1319 static struct ftrace_hash *
1320 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1321 {
1322         struct ftrace_func_entry *entry;
1323         struct ftrace_hash *new_hash;
1324         int size;
1325         int ret;
1326         int i;
1327
1328         new_hash = alloc_ftrace_hash(size_bits);
1329         if (!new_hash)
1330                 return NULL;
1331
1332         /* Empty hash? */
1333         if (ftrace_hash_empty(hash))
1334                 return new_hash;
1335
1336         size = 1 << hash->size_bits;
1337         for (i = 0; i < size; i++) {
1338                 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1339                         ret = add_hash_entry(new_hash, entry->ip);
1340                         if (ret < 0)
1341                                 goto free_hash;
1342                 }
1343         }
1344
1345         FTRACE_WARN_ON(new_hash->count != hash->count);
1346
1347         return new_hash;
1348
1349  free_hash:
1350         free_ftrace_hash(new_hash);
1351         return NULL;
1352 }
1353
1354 static void
1355 ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1356 static void
1357 ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1358
1359 static int
1360 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1361                  struct ftrace_hash **dst, struct ftrace_hash *src)
1362 {
1363         struct ftrace_func_entry *entry;
1364         struct hlist_node *tn;
1365         struct hlist_head *hhd;
1366         struct ftrace_hash *old_hash;
1367         struct ftrace_hash *new_hash;
1368         int size = src->count;
1369         int bits = 0;
1370         int ret;
1371         int i;
1372
1373         /*
1374          * Remove the current set, update the hash and add
1375          * them back.
1376          */
1377         ftrace_hash_rec_disable(ops, enable);
1378
1379         /*
1380          * If the new source is empty, just free dst and assign it
1381          * the empty_hash.
1382          */
1383         if (!src->count) {
1384                 free_ftrace_hash_rcu(*dst);
1385                 rcu_assign_pointer(*dst, EMPTY_HASH);
1386                 /* still need to update the function records */
1387                 ret = 0;
1388                 goto out;
1389         }
1390
1391         /*
1392          * Make the hash size about 1/2 the # found
1393          */
1394         for (size /= 2; size; size >>= 1)
1395                 bits++;
1396
1397         /* Don't allocate too much */
1398         if (bits > FTRACE_HASH_MAX_BITS)
1399                 bits = FTRACE_HASH_MAX_BITS;
1400
1401         ret = -ENOMEM;
1402         new_hash = alloc_ftrace_hash(bits);
1403         if (!new_hash)
1404                 goto out;
1405
1406         size = 1 << src->size_bits;
1407         for (i = 0; i < size; i++) {
1408                 hhd = &src->buckets[i];
1409                 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1410                         remove_hash_entry(src, entry);
1411                         __add_hash_entry(new_hash, entry);
1412                 }
1413         }
1414
1415         old_hash = *dst;
1416         rcu_assign_pointer(*dst, new_hash);
1417         free_ftrace_hash_rcu(old_hash);
1418
1419         ret = 0;
1420  out:
1421         /*
1422          * Enable regardless of ret:
1423          *  On success, we enable the new hash.
1424          *  On failure, we re-enable the original hash.
1425          */
1426         ftrace_hash_rec_enable(ops, enable);
1427
1428         return ret;
1429 }
1430
1431 /*
1432  * Test the hashes for this ops to see if we want to call
1433  * the ops->func or not.
1434  *
1435  * It's a match if the ip is in the ops->filter_hash or
1436  * the filter_hash does not exist or is empty,
1437  *  AND
1438  * the ip is not in the ops->notrace_hash.
1439  *
1440  * This needs to be called with preemption disabled as
1441  * the hashes are freed with call_rcu_sched().
1442  */
1443 static int
1444 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1445 {
1446         struct ftrace_hash *filter_hash;
1447         struct ftrace_hash *notrace_hash;
1448         int ret;
1449
1450         filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
1451         notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
1452
1453         if ((ftrace_hash_empty(filter_hash) ||
1454              ftrace_lookup_ip(filter_hash, ip)) &&
1455             (ftrace_hash_empty(notrace_hash) ||
1456              !ftrace_lookup_ip(notrace_hash, ip)))
1457                 ret = 1;
1458         else
1459                 ret = 0;
1460
1461         return ret;
1462 }
1463
1464 /*
1465  * This is a double for. Do not use 'break' to break out of the loop,
1466  * you must use a goto.
1467  */
1468 #define do_for_each_ftrace_rec(pg, rec)                                 \
1469         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1470                 int _____i;                                             \
1471                 for (_____i = 0; _____i < pg->index; _____i++) {        \
1472                         rec = &pg->records[_____i];
1473
1474 #define while_for_each_ftrace_rec()             \
1475                 }                               \
1476         }
1477
1478
1479 static int ftrace_cmp_recs(const void *a, const void *b)
1480 {
1481         const struct dyn_ftrace *key = a;
1482         const struct dyn_ftrace *rec = b;
1483
1484         if (key->flags < rec->ip)
1485                 return -1;
1486         if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1487                 return 1;
1488         return 0;
1489 }
1490
1491 static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1492 {
1493         struct ftrace_page *pg;
1494         struct dyn_ftrace *rec;
1495         struct dyn_ftrace key;
1496
1497         key.ip = start;
1498         key.flags = end;        /* overload flags, as it is unsigned long */
1499
1500         for (pg = ftrace_pages_start; pg; pg = pg->next) {
1501                 if (end < pg->records[0].ip ||
1502                     start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1503                         continue;
1504                 rec = bsearch(&key, pg->records, pg->index,
1505                               sizeof(struct dyn_ftrace),
1506                               ftrace_cmp_recs);
1507                 if (rec)
1508                         return rec->ip;
1509         }
1510
1511         return 0;
1512 }
1513
1514 /**
1515  * ftrace_location - return true if the ip giving is a traced location
1516  * @ip: the instruction pointer to check
1517  *
1518  * Returns rec->ip if @ip given is a pointer to a ftrace location.
1519  * That is, the instruction that is either a NOP or call to
1520  * the function tracer. It checks the ftrace internal tables to
1521  * determine if the address belongs or not.
1522  */
1523 unsigned long ftrace_location(unsigned long ip)
1524 {
1525         return ftrace_location_range(ip, ip);
1526 }
1527
1528 /**
1529  * ftrace_text_reserved - return true if range contains an ftrace location
1530  * @start: start of range to search
1531  * @end: end of range to search (inclusive). @end points to the last byte to check.
1532  *
1533  * Returns 1 if @start and @end contains a ftrace location.
1534  * That is, the instruction that is either a NOP or call to
1535  * the function tracer. It checks the ftrace internal tables to
1536  * determine if the address belongs or not.
1537  */
1538 int ftrace_text_reserved(void *start, void *end)
1539 {
1540         unsigned long ret;
1541
1542         ret = ftrace_location_range((unsigned long)start,
1543                                     (unsigned long)end);
1544
1545         return (int)!!ret;
1546 }
1547
1548 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1549                                      int filter_hash,
1550                                      bool inc)
1551 {
1552         struct ftrace_hash *hash;
1553         struct ftrace_hash *other_hash;
1554         struct ftrace_page *pg;
1555         struct dyn_ftrace *rec;
1556         int count = 0;
1557         int all = 0;
1558
1559         /* Only update if the ops has been registered */
1560         if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1561                 return;
1562
1563         /*
1564          * In the filter_hash case:
1565          *   If the count is zero, we update all records.
1566          *   Otherwise we just update the items in the hash.
1567          *
1568          * In the notrace_hash case:
1569          *   We enable the update in the hash.
1570          *   As disabling notrace means enabling the tracing,
1571          *   and enabling notrace means disabling, the inc variable
1572          *   gets inversed.
1573          */
1574         if (filter_hash) {
1575                 hash = ops->filter_hash;
1576                 other_hash = ops->notrace_hash;
1577                 if (ftrace_hash_empty(hash))
1578                         all = 1;
1579         } else {
1580                 inc = !inc;
1581                 hash = ops->notrace_hash;
1582                 other_hash = ops->filter_hash;
1583                 /*
1584                  * If the notrace hash has no items,
1585                  * then there's nothing to do.
1586                  */
1587                 if (ftrace_hash_empty(hash))
1588                         return;
1589         }
1590
1591         do_for_each_ftrace_rec(pg, rec) {
1592                 int in_other_hash = 0;
1593                 int in_hash = 0;
1594                 int match = 0;
1595
1596                 if (all) {
1597                         /*
1598                          * Only the filter_hash affects all records.
1599                          * Update if the record is not in the notrace hash.
1600                          */
1601                         if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1602                                 match = 1;
1603                 } else {
1604                         in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1605                         in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1606
1607                         /*
1608                          *
1609                          */
1610                         if (filter_hash && in_hash && !in_other_hash)
1611                                 match = 1;
1612                         else if (!filter_hash && in_hash &&
1613                                  (in_other_hash || ftrace_hash_empty(other_hash)))
1614                                 match = 1;
1615                 }
1616                 if (!match)
1617                         continue;
1618
1619                 if (inc) {
1620                         rec->flags++;
1621                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1622                                 return;
1623                         /*
1624                          * If any ops wants regs saved for this function
1625                          * then all ops will get saved regs.
1626                          */
1627                         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1628                                 rec->flags |= FTRACE_FL_REGS;
1629                 } else {
1630                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1631                                 return;
1632                         rec->flags--;
1633                 }
1634                 count++;
1635                 /* Shortcut, if we handled all records, we are done. */
1636                 if (!all && count == hash->count)
1637                         return;
1638         } while_for_each_ftrace_rec();
1639 }
1640
1641 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1642                                     int filter_hash)
1643 {
1644         __ftrace_hash_rec_update(ops, filter_hash, 0);
1645 }
1646
1647 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1648                                    int filter_hash)
1649 {
1650         __ftrace_hash_rec_update(ops, filter_hash, 1);
1651 }
1652
1653 static void print_ip_ins(const char *fmt, unsigned char *p)
1654 {
1655         int i;
1656
1657         printk(KERN_CONT "%s", fmt);
1658
1659         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1660                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1661 }
1662
1663 /**
1664  * ftrace_bug - report and shutdown function tracer
1665  * @failed: The failed type (EFAULT, EINVAL, EPERM)
1666  * @ip: The address that failed
1667  *
1668  * The arch code that enables or disables the function tracing
1669  * can call ftrace_bug() when it has detected a problem in
1670  * modifying the code. @failed should be one of either:
1671  * EFAULT - if the problem happens on reading the @ip address
1672  * EINVAL - if what is read at @ip is not what was expected
1673  * EPERM - if the problem happens on writting to the @ip address
1674  */
1675 void ftrace_bug(int failed, unsigned long ip)
1676 {
1677         switch (failed) {
1678         case -EFAULT:
1679                 FTRACE_WARN_ON_ONCE(1);
1680                 pr_info("ftrace faulted on modifying ");
1681                 print_ip_sym(ip);
1682                 break;
1683         case -EINVAL:
1684                 FTRACE_WARN_ON_ONCE(1);
1685                 pr_info("ftrace failed to modify ");
1686                 print_ip_sym(ip);
1687                 print_ip_ins(" actual: ", (unsigned char *)ip);
1688                 printk(KERN_CONT "\n");
1689                 break;
1690         case -EPERM:
1691                 FTRACE_WARN_ON_ONCE(1);
1692                 pr_info("ftrace faulted on writing ");
1693                 print_ip_sym(ip);
1694                 break;
1695         default:
1696                 FTRACE_WARN_ON_ONCE(1);
1697                 pr_info("ftrace faulted on unknown error ");
1698                 print_ip_sym(ip);
1699         }
1700 }
1701
1702 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1703 {
1704         unsigned long flag = 0UL;
1705
1706         /*
1707          * If we are updating calls:
1708          *
1709          *   If the record has a ref count, then we need to enable it
1710          *   because someone is using it.
1711          *
1712          *   Otherwise we make sure its disabled.
1713          *
1714          * If we are disabling calls, then disable all records that
1715          * are enabled.
1716          */
1717         if (enable && (rec->flags & ~FTRACE_FL_MASK))
1718                 flag = FTRACE_FL_ENABLED;
1719
1720         /*
1721          * If enabling and the REGS flag does not match the REGS_EN, then
1722          * do not ignore this record. Set flags to fail the compare against
1723          * ENABLED.
1724          */
1725         if (flag &&
1726             (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN)))
1727                 flag |= FTRACE_FL_REGS;
1728
1729         /* If the state of this record hasn't changed, then do nothing */
1730         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1731                 return FTRACE_UPDATE_IGNORE;
1732
1733         if (flag) {
1734                 /* Save off if rec is being enabled (for return value) */
1735                 flag ^= rec->flags & FTRACE_FL_ENABLED;
1736
1737                 if (update) {
1738                         rec->flags |= FTRACE_FL_ENABLED;
1739                         if (flag & FTRACE_FL_REGS) {
1740                                 if (rec->flags & FTRACE_FL_REGS)
1741                                         rec->flags |= FTRACE_FL_REGS_EN;
1742                                 else
1743                                         rec->flags &= ~FTRACE_FL_REGS_EN;
1744                         }
1745                 }
1746
1747                 /*
1748                  * If this record is being updated from a nop, then
1749                  *   return UPDATE_MAKE_CALL.
1750                  * Otherwise, if the EN flag is set, then return
1751                  *   UPDATE_MODIFY_CALL_REGS to tell the caller to convert
1752                  *   from the non-save regs, to a save regs function.
1753                  * Otherwise,
1754                  *   return UPDATE_MODIFY_CALL to tell the caller to convert
1755                  *   from the save regs, to a non-save regs function.
1756                  */
1757                 if (flag & FTRACE_FL_ENABLED)
1758                         return FTRACE_UPDATE_MAKE_CALL;
1759                 else if (rec->flags & FTRACE_FL_REGS_EN)
1760                         return FTRACE_UPDATE_MODIFY_CALL_REGS;
1761                 else
1762                         return FTRACE_UPDATE_MODIFY_CALL;
1763         }
1764
1765         if (update) {
1766                 /* If there's no more users, clear all flags */
1767                 if (!(rec->flags & ~FTRACE_FL_MASK))
1768                         rec->flags = 0;
1769                 else
1770                         /* Just disable the record (keep REGS state) */
1771                         rec->flags &= ~FTRACE_FL_ENABLED;
1772         }
1773
1774         return FTRACE_UPDATE_MAKE_NOP;
1775 }
1776
1777 /**
1778  * ftrace_update_record, set a record that now is tracing or not
1779  * @rec: the record to update
1780  * @enable: set to 1 if the record is tracing, zero to force disable
1781  *
1782  * The records that represent all functions that can be traced need
1783  * to be updated when tracing has been enabled.
1784  */
1785 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1786 {
1787         return ftrace_check_record(rec, enable, 1);
1788 }
1789
1790 /**
1791  * ftrace_test_record, check if the record has been enabled or not
1792  * @rec: the record to test
1793  * @enable: set to 1 to check if enabled, 0 if it is disabled
1794  *
1795  * The arch code may need to test if a record is already set to
1796  * tracing to determine how to modify the function code that it
1797  * represents.
1798  */
1799 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1800 {
1801         return ftrace_check_record(rec, enable, 0);
1802 }
1803
1804 static int
1805 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1806 {
1807         unsigned long ftrace_old_addr;
1808         unsigned long ftrace_addr;
1809         int ret;
1810
1811         ret = ftrace_update_record(rec, enable);
1812
1813         if (rec->flags & FTRACE_FL_REGS)
1814                 ftrace_addr = (unsigned long)FTRACE_REGS_ADDR;
1815         else
1816                 ftrace_addr = (unsigned long)FTRACE_ADDR;
1817
1818         switch (ret) {
1819         case FTRACE_UPDATE_IGNORE:
1820                 return 0;
1821
1822         case FTRACE_UPDATE_MAKE_CALL:
1823                 return ftrace_make_call(rec, ftrace_addr);
1824
1825         case FTRACE_UPDATE_MAKE_NOP:
1826                 return ftrace_make_nop(NULL, rec, ftrace_addr);
1827
1828         case FTRACE_UPDATE_MODIFY_CALL_REGS:
1829         case FTRACE_UPDATE_MODIFY_CALL:
1830                 if (rec->flags & FTRACE_FL_REGS)
1831                         ftrace_old_addr = (unsigned long)FTRACE_ADDR;
1832                 else
1833                         ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR;
1834
1835                 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
1836         }
1837
1838         return -1; /* unknow ftrace bug */
1839 }
1840
1841 void __weak ftrace_replace_code(int enable)
1842 {
1843         struct dyn_ftrace *rec;
1844         struct ftrace_page *pg;
1845         int failed;
1846
1847         if (unlikely(ftrace_disabled))
1848                 return;
1849
1850         do_for_each_ftrace_rec(pg, rec) {
1851                 failed = __ftrace_replace_code(rec, enable);
1852                 if (failed) {
1853                         ftrace_bug(failed, rec->ip);
1854                         /* Stop processing */
1855                         return;
1856                 }
1857         } while_for_each_ftrace_rec();
1858 }
1859
1860 struct ftrace_rec_iter {
1861         struct ftrace_page      *pg;
1862         int                     index;
1863 };
1864
1865 /**
1866  * ftrace_rec_iter_start, start up iterating over traced functions
1867  *
1868  * Returns an iterator handle that is used to iterate over all
1869  * the records that represent address locations where functions
1870  * are traced.
1871  *
1872  * May return NULL if no records are available.
1873  */
1874 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
1875 {
1876         /*
1877          * We only use a single iterator.
1878          * Protected by the ftrace_lock mutex.
1879          */
1880         static struct ftrace_rec_iter ftrace_rec_iter;
1881         struct ftrace_rec_iter *iter = &ftrace_rec_iter;
1882
1883         iter->pg = ftrace_pages_start;
1884         iter->index = 0;
1885
1886         /* Could have empty pages */
1887         while (iter->pg && !iter->pg->index)
1888                 iter->pg = iter->pg->next;
1889
1890         if (!iter->pg)
1891                 return NULL;
1892
1893         return iter;
1894 }
1895
1896 /**
1897  * ftrace_rec_iter_next, get the next record to process.
1898  * @iter: The handle to the iterator.
1899  *
1900  * Returns the next iterator after the given iterator @iter.
1901  */
1902 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
1903 {
1904         iter->index++;
1905
1906         if (iter->index >= iter->pg->index) {
1907                 iter->pg = iter->pg->next;
1908                 iter->index = 0;
1909
1910                 /* Could have empty pages */
1911                 while (iter->pg && !iter->pg->index)
1912                         iter->pg = iter->pg->next;
1913         }
1914
1915         if (!iter->pg)
1916                 return NULL;
1917
1918         return iter;
1919 }
1920
1921 /**
1922  * ftrace_rec_iter_record, get the record at the iterator location
1923  * @iter: The current iterator location
1924  *
1925  * Returns the record that the current @iter is at.
1926  */
1927 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
1928 {
1929         return &iter->pg->records[iter->index];
1930 }
1931
1932 static int
1933 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1934 {
1935         unsigned long ip;
1936         int ret;
1937
1938         ip = rec->ip;
1939
1940         if (unlikely(ftrace_disabled))
1941                 return 0;
1942
1943         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1944         if (ret) {
1945                 ftrace_bug(ret, ip);
1946                 return 0;
1947         }
1948         return 1;
1949 }
1950
1951 /*
1952  * archs can override this function if they must do something
1953  * before the modifying code is performed.
1954  */
1955 int __weak ftrace_arch_code_modify_prepare(void)
1956 {
1957         return 0;
1958 }
1959
1960 /*
1961  * archs can override this function if they must do something
1962  * after the modifying code is performed.
1963  */
1964 int __weak ftrace_arch_code_modify_post_process(void)
1965 {
1966         return 0;
1967 }
1968
1969 void ftrace_modify_all_code(int command)
1970 {
1971         if (command & FTRACE_UPDATE_CALLS)
1972                 ftrace_replace_code(1);
1973         else if (command & FTRACE_DISABLE_CALLS)
1974                 ftrace_replace_code(0);
1975
1976         if (command & FTRACE_UPDATE_TRACE_FUNC)
1977                 ftrace_update_ftrace_func(ftrace_trace_function);
1978
1979         if (command & FTRACE_START_FUNC_RET)
1980                 ftrace_enable_ftrace_graph_caller();
1981         else if (command & FTRACE_STOP_FUNC_RET)
1982                 ftrace_disable_ftrace_graph_caller();
1983 }
1984
1985 static int __ftrace_modify_code(void *data)
1986 {
1987         int *command = data;
1988
1989         ftrace_modify_all_code(*command);
1990
1991         return 0;
1992 }
1993
1994 /**
1995  * ftrace_run_stop_machine, go back to the stop machine method
1996  * @command: The command to tell ftrace what to do
1997  *
1998  * If an arch needs to fall back to the stop machine method, the
1999  * it can call this function.
2000  */
2001 void ftrace_run_stop_machine(int command)
2002 {
2003         stop_machine(__ftrace_modify_code, &command, NULL);
2004 }
2005
2006 /**
2007  * arch_ftrace_update_code, modify the code to trace or not trace
2008  * @command: The command that needs to be done
2009  *
2010  * Archs can override this function if it does not need to
2011  * run stop_machine() to modify code.
2012  */
2013 void __weak arch_ftrace_update_code(int command)
2014 {
2015         ftrace_run_stop_machine(command);
2016 }
2017
2018 static void ftrace_run_update_code(int command)
2019 {
2020         int ret;
2021
2022         ret = ftrace_arch_code_modify_prepare();
2023         FTRACE_WARN_ON(ret);
2024         if (ret)
2025                 return;
2026         /*
2027          * Do not call function tracer while we update the code.
2028          * We are in stop machine.
2029          */
2030         function_trace_stop++;
2031
2032         /*
2033          * By default we use stop_machine() to modify the code.
2034          * But archs can do what ever they want as long as it
2035          * is safe. The stop_machine() is the safest, but also
2036          * produces the most overhead.
2037          */
2038         arch_ftrace_update_code(command);
2039
2040         function_trace_stop--;
2041
2042         ret = ftrace_arch_code_modify_post_process();
2043         FTRACE_WARN_ON(ret);
2044 }
2045
2046 static ftrace_func_t saved_ftrace_func;
2047 static int ftrace_start_up;
2048 static int global_start_up;
2049
2050 static void ftrace_startup_enable(int command)
2051 {
2052         if (saved_ftrace_func != ftrace_trace_function) {
2053                 saved_ftrace_func = ftrace_trace_function;
2054                 command |= FTRACE_UPDATE_TRACE_FUNC;
2055         }
2056
2057         if (!command || !ftrace_enabled)
2058                 return;
2059
2060         ftrace_run_update_code(command);
2061 }
2062
2063 static int ftrace_startup(struct ftrace_ops *ops, int command)
2064 {
2065         bool hash_enable = true;
2066
2067         if (unlikely(ftrace_disabled))
2068                 return -ENODEV;
2069
2070         ftrace_start_up++;
2071         command |= FTRACE_UPDATE_CALLS;
2072
2073         /* ops marked global share the filter hashes */
2074         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2075                 ops = &global_ops;
2076                 /* Don't update hash if global is already set */
2077                 if (global_start_up)
2078                         hash_enable = false;
2079                 global_start_up++;
2080         }
2081
2082         ops->flags |= FTRACE_OPS_FL_ENABLED;
2083         if (hash_enable)
2084                 ftrace_hash_rec_enable(ops, 1);
2085
2086         ftrace_startup_enable(command);
2087
2088         return 0;
2089 }
2090
2091 static void ftrace_shutdown(struct ftrace_ops *ops, int command)
2092 {
2093         bool hash_disable = true;
2094
2095         if (unlikely(ftrace_disabled))
2096                 return;
2097
2098         ftrace_start_up--;
2099         /*
2100          * Just warn in case of unbalance, no need to kill ftrace, it's not
2101          * critical but the ftrace_call callers may be never nopped again after
2102          * further ftrace uses.
2103          */
2104         WARN_ON_ONCE(ftrace_start_up < 0);
2105
2106         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2107                 ops = &global_ops;
2108                 global_start_up--;
2109                 WARN_ON_ONCE(global_start_up < 0);
2110                 /* Don't update hash if global still has users */
2111                 if (global_start_up) {
2112                         WARN_ON_ONCE(!ftrace_start_up);
2113                         hash_disable = false;
2114                 }
2115         }
2116
2117         if (hash_disable)
2118                 ftrace_hash_rec_disable(ops, 1);
2119
2120         if (ops != &global_ops || !global_start_up)
2121                 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2122
2123         command |= FTRACE_UPDATE_CALLS;
2124
2125         if (saved_ftrace_func != ftrace_trace_function) {
2126                 saved_ftrace_func = ftrace_trace_function;
2127                 command |= FTRACE_UPDATE_TRACE_FUNC;
2128         }
2129
2130         if (!command || !ftrace_enabled)
2131                 return;
2132
2133         ftrace_run_update_code(command);
2134 }
2135
2136 static void ftrace_startup_sysctl(void)
2137 {
2138         if (unlikely(ftrace_disabled))
2139                 return;
2140
2141         /* Force update next time */
2142         saved_ftrace_func = NULL;
2143         /* ftrace_start_up is true if we want ftrace running */
2144         if (ftrace_start_up)
2145                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2146 }
2147
2148 static void ftrace_shutdown_sysctl(void)
2149 {
2150         if (unlikely(ftrace_disabled))
2151                 return;
2152
2153         /* ftrace_start_up is true if ftrace is running */
2154         if (ftrace_start_up)
2155                 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
2156 }
2157
2158 static cycle_t          ftrace_update_time;
2159 static unsigned long    ftrace_update_cnt;
2160 unsigned long           ftrace_update_tot_cnt;
2161
2162 static int ops_traces_mod(struct ftrace_ops *ops)
2163 {
2164         struct ftrace_hash *hash;
2165
2166         hash = ops->filter_hash;
2167         return ftrace_hash_empty(hash);
2168 }
2169
2170 static int ftrace_update_code(struct module *mod)
2171 {
2172         struct ftrace_page *pg;
2173         struct dyn_ftrace *p;
2174         cycle_t start, stop;
2175         unsigned long ref = 0;
2176         int i;
2177
2178         /*
2179          * When adding a module, we need to check if tracers are
2180          * currently enabled and if they are set to trace all functions.
2181          * If they are, we need to enable the module functions as well
2182          * as update the reference counts for those function records.
2183          */
2184         if (mod) {
2185                 struct ftrace_ops *ops;
2186
2187                 for (ops = ftrace_ops_list;
2188                      ops != &ftrace_list_end; ops = ops->next) {
2189                         if (ops->flags & FTRACE_OPS_FL_ENABLED &&
2190                             ops_traces_mod(ops))
2191                                 ref++;
2192                 }
2193         }
2194
2195         start = ftrace_now(raw_smp_processor_id());
2196         ftrace_update_cnt = 0;
2197
2198         for (pg = ftrace_new_pgs; pg; pg = pg->next) {
2199
2200                 for (i = 0; i < pg->index; i++) {
2201                         /* If something went wrong, bail without enabling anything */
2202                         if (unlikely(ftrace_disabled))
2203                                 return -1;
2204
2205                         p = &pg->records[i];
2206                         p->flags = ref;
2207
2208                         /*
2209                          * Do the initial record conversion from mcount jump
2210                          * to the NOP instructions.
2211                          */
2212                         if (!ftrace_code_disable(mod, p))
2213                                 break;
2214
2215                         ftrace_update_cnt++;
2216
2217                         /*
2218                          * If the tracing is enabled, go ahead and enable the record.
2219                          *
2220                          * The reason not to enable the record immediatelly is the
2221                          * inherent check of ftrace_make_nop/ftrace_make_call for
2222                          * correct previous instructions.  Making first the NOP
2223                          * conversion puts the module to the correct state, thus
2224                          * passing the ftrace_make_call check.
2225                          */
2226                         if (ftrace_start_up && ref) {
2227                                 int failed = __ftrace_replace_code(p, 1);
2228                                 if (failed)
2229                                         ftrace_bug(failed, p->ip);
2230                         }
2231                 }
2232         }
2233
2234         ftrace_new_pgs = NULL;
2235
2236         stop = ftrace_now(raw_smp_processor_id());
2237         ftrace_update_time = stop - start;
2238         ftrace_update_tot_cnt += ftrace_update_cnt;
2239
2240         return 0;
2241 }
2242
2243 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2244 {
2245         int order;
2246         int cnt;
2247
2248         if (WARN_ON(!count))
2249                 return -EINVAL;
2250
2251         order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2252
2253         /*
2254          * We want to fill as much as possible. No more than a page
2255          * may be empty.
2256          */
2257         while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2258                 order--;
2259
2260  again:
2261         pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2262
2263         if (!pg->records) {
2264                 /* if we can't allocate this size, try something smaller */
2265                 if (!order)
2266                         return -ENOMEM;
2267                 order >>= 1;
2268                 goto again;
2269         }
2270
2271         cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2272         pg->size = cnt;
2273
2274         if (cnt > count)
2275                 cnt = count;
2276
2277         return cnt;
2278 }
2279
2280 static struct ftrace_page *
2281 ftrace_allocate_pages(unsigned long num_to_init)
2282 {
2283         struct ftrace_page *start_pg;
2284         struct ftrace_page *pg;
2285         int order;
2286         int cnt;
2287
2288         if (!num_to_init)
2289                 return 0;
2290
2291         start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2292         if (!pg)
2293                 return NULL;
2294
2295         /*
2296          * Try to allocate as much as possible in one continues
2297          * location that fills in all of the space. We want to
2298          * waste as little space as possible.
2299          */
2300         for (;;) {
2301                 cnt = ftrace_allocate_records(pg, num_to_init);
2302                 if (cnt < 0)
2303                         goto free_pages;
2304
2305                 num_to_init -= cnt;
2306                 if (!num_to_init)
2307                         break;
2308
2309                 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2310                 if (!pg->next)
2311                         goto free_pages;
2312
2313                 pg = pg->next;
2314         }
2315
2316         return start_pg;
2317
2318  free_pages:
2319         while (start_pg) {
2320                 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2321                 free_pages((unsigned long)pg->records, order);
2322                 start_pg = pg->next;
2323                 kfree(pg);
2324                 pg = start_pg;
2325         }
2326         pr_info("ftrace: FAILED to allocate memory for functions\n");
2327         return NULL;
2328 }
2329
2330 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
2331 {
2332         int cnt;
2333
2334         if (!num_to_init) {
2335                 pr_info("ftrace: No functions to be traced?\n");
2336                 return -1;
2337         }
2338
2339         cnt = num_to_init / ENTRIES_PER_PAGE;
2340         pr_info("ftrace: allocating %ld entries in %d pages\n",
2341                 num_to_init, cnt + 1);
2342
2343         return 0;
2344 }
2345
2346 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2347
2348 struct ftrace_iterator {
2349         loff_t                          pos;
2350         loff_t                          func_pos;
2351         struct ftrace_page              *pg;
2352         struct dyn_ftrace               *func;
2353         struct ftrace_func_probe        *probe;
2354         struct trace_parser             parser;
2355         struct ftrace_hash              *hash;
2356         struct ftrace_ops               *ops;
2357         int                             hidx;
2358         int                             idx;
2359         unsigned                        flags;
2360 };
2361
2362 static void *
2363 t_hash_next(struct seq_file *m, loff_t *pos)
2364 {
2365         struct ftrace_iterator *iter = m->private;
2366         struct hlist_node *hnd = NULL;
2367         struct hlist_head *hhd;
2368
2369         (*pos)++;
2370         iter->pos = *pos;
2371
2372         if (iter->probe)
2373                 hnd = &iter->probe->node;
2374  retry:
2375         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2376                 return NULL;
2377
2378         hhd = &ftrace_func_hash[iter->hidx];
2379
2380         if (hlist_empty(hhd)) {
2381                 iter->hidx++;
2382                 hnd = NULL;
2383                 goto retry;
2384         }
2385
2386         if (!hnd)
2387                 hnd = hhd->first;
2388         else {
2389                 hnd = hnd->next;
2390                 if (!hnd) {
2391                         iter->hidx++;
2392                         goto retry;
2393                 }
2394         }
2395
2396         if (WARN_ON_ONCE(!hnd))
2397                 return NULL;
2398
2399         iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2400
2401         return iter;
2402 }
2403
2404 static void *t_hash_start(struct seq_file *m, loff_t *pos)
2405 {
2406         struct ftrace_iterator *iter = m->private;
2407         void *p = NULL;
2408         loff_t l;
2409
2410         if (!(iter->flags & FTRACE_ITER_DO_HASH))
2411                 return NULL;
2412
2413         if (iter->func_pos > *pos)
2414                 return NULL;
2415
2416         iter->hidx = 0;
2417         for (l = 0; l <= (*pos - iter->func_pos); ) {
2418                 p = t_hash_next(m, &l);
2419                 if (!p)
2420                         break;
2421         }
2422         if (!p)
2423                 return NULL;
2424
2425         /* Only set this if we have an item */
2426         iter->flags |= FTRACE_ITER_HASH;
2427
2428         return iter;
2429 }
2430
2431 static int
2432 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2433 {
2434         struct ftrace_func_probe *rec;
2435
2436         rec = iter->probe;
2437         if (WARN_ON_ONCE(!rec))
2438                 return -EIO;
2439
2440         if (rec->ops->print)
2441                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2442
2443         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2444
2445         if (rec->data)
2446                 seq_printf(m, ":%p", rec->data);
2447         seq_putc(m, '\n');
2448
2449         return 0;
2450 }
2451
2452 static void *
2453 t_next(struct seq_file *m, void *v, loff_t *pos)
2454 {
2455         struct ftrace_iterator *iter = m->private;
2456         struct ftrace_ops *ops = iter->ops;
2457         struct dyn_ftrace *rec = NULL;
2458
2459         if (unlikely(ftrace_disabled))
2460                 return NULL;
2461
2462         if (iter->flags & FTRACE_ITER_HASH)
2463                 return t_hash_next(m, pos);
2464
2465         (*pos)++;
2466         iter->pos = iter->func_pos = *pos;
2467
2468         if (iter->flags & FTRACE_ITER_PRINTALL)
2469                 return t_hash_start(m, pos);
2470
2471  retry:
2472         if (iter->idx >= iter->pg->index) {
2473                 if (iter->pg->next) {
2474                         iter->pg = iter->pg->next;
2475                         iter->idx = 0;
2476                         goto retry;
2477                 }
2478         } else {
2479                 rec = &iter->pg->records[iter->idx++];
2480                 if (((iter->flags & FTRACE_ITER_FILTER) &&
2481                      !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2482
2483                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
2484                      !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2485
2486                     ((iter->flags & FTRACE_ITER_ENABLED) &&
2487                      !(rec->flags & FTRACE_FL_ENABLED))) {
2488
2489                         rec = NULL;
2490                         goto retry;
2491                 }
2492         }
2493
2494         if (!rec)
2495                 return t_hash_start(m, pos);
2496
2497         iter->func = rec;
2498
2499         return iter;
2500 }
2501
2502 static void reset_iter_read(struct ftrace_iterator *iter)
2503 {
2504         iter->pos = 0;
2505         iter->func_pos = 0;
2506         iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2507 }
2508
2509 static void *t_start(struct seq_file *m, loff_t *pos)
2510 {
2511         struct ftrace_iterator *iter = m->private;
2512         struct ftrace_ops *ops = iter->ops;
2513         void *p = NULL;
2514         loff_t l;
2515
2516         mutex_lock(&ftrace_lock);
2517
2518         if (unlikely(ftrace_disabled))
2519                 return NULL;
2520
2521         /*
2522          * If an lseek was done, then reset and start from beginning.
2523          */
2524         if (*pos < iter->pos)
2525                 reset_iter_read(iter);
2526
2527         /*
2528          * For set_ftrace_filter reading, if we have the filter
2529          * off, we can short cut and just print out that all
2530          * functions are enabled.
2531          */
2532         if (iter->flags & FTRACE_ITER_FILTER &&
2533             ftrace_hash_empty(ops->filter_hash)) {
2534                 if (*pos > 0)
2535                         return t_hash_start(m, pos);
2536                 iter->flags |= FTRACE_ITER_PRINTALL;
2537                 /* reset in case of seek/pread */
2538                 iter->flags &= ~FTRACE_ITER_HASH;
2539                 return iter;
2540         }
2541
2542         if (iter->flags & FTRACE_ITER_HASH)
2543                 return t_hash_start(m, pos);
2544
2545         /*
2546          * Unfortunately, we need to restart at ftrace_pages_start
2547          * every time we let go of the ftrace_mutex. This is because
2548          * those pointers can change without the lock.
2549          */
2550         iter->pg = ftrace_pages_start;
2551         iter->idx = 0;
2552         for (l = 0; l <= *pos; ) {
2553                 p = t_next(m, p, &l);
2554                 if (!p)
2555                         break;
2556         }
2557
2558         if (!p)
2559                 return t_hash_start(m, pos);
2560
2561         return iter;
2562 }
2563
2564 static void t_stop(struct seq_file *m, void *p)
2565 {
2566         mutex_unlock(&ftrace_lock);
2567 }
2568
2569 static int t_show(struct seq_file *m, void *v)
2570 {
2571         struct ftrace_iterator *iter = m->private;
2572         struct dyn_ftrace *rec;
2573
2574         if (iter->flags & FTRACE_ITER_HASH)
2575                 return t_hash_show(m, iter);
2576
2577         if (iter->flags & FTRACE_ITER_PRINTALL) {
2578                 seq_printf(m, "#### all functions enabled ####\n");
2579                 return 0;
2580         }
2581
2582         rec = iter->func;
2583
2584         if (!rec)
2585                 return 0;
2586
2587         seq_printf(m, "%ps", (void *)rec->ip);
2588         if (iter->flags & FTRACE_ITER_ENABLED)
2589                 seq_printf(m, " (%ld)%s",
2590                            rec->flags & ~FTRACE_FL_MASK,
2591                            rec->flags & FTRACE_FL_REGS ? " R" : "");
2592         seq_printf(m, "\n");
2593
2594         return 0;
2595 }
2596
2597 static const struct seq_operations show_ftrace_seq_ops = {
2598         .start = t_start,
2599         .next = t_next,
2600         .stop = t_stop,
2601         .show = t_show,
2602 };
2603
2604 static int
2605 ftrace_avail_open(struct inode *inode, struct file *file)
2606 {
2607         struct ftrace_iterator *iter;
2608
2609         if (unlikely(ftrace_disabled))
2610                 return -ENODEV;
2611
2612         iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2613         if (iter) {
2614                 iter->pg = ftrace_pages_start;
2615                 iter->ops = &global_ops;
2616         }
2617
2618         return iter ? 0 : -ENOMEM;
2619 }
2620
2621 static int
2622 ftrace_enabled_open(struct inode *inode, struct file *file)
2623 {
2624         struct ftrace_iterator *iter;
2625
2626         if (unlikely(ftrace_disabled))
2627                 return -ENODEV;
2628
2629         iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2630         if (iter) {
2631                 iter->pg = ftrace_pages_start;
2632                 iter->flags = FTRACE_ITER_ENABLED;
2633                 iter->ops = &global_ops;
2634         }
2635
2636         return iter ? 0 : -ENOMEM;
2637 }
2638
2639 static void ftrace_filter_reset(struct ftrace_hash *hash)
2640 {
2641         mutex_lock(&ftrace_lock);
2642         ftrace_hash_clear(hash);
2643         mutex_unlock(&ftrace_lock);
2644 }
2645
2646 /**
2647  * ftrace_regex_open - initialize function tracer filter files
2648  * @ops: The ftrace_ops that hold the hash filters
2649  * @flag: The type of filter to process
2650  * @inode: The inode, usually passed in to your open routine
2651  * @file: The file, usually passed in to your open routine
2652  *
2653  * ftrace_regex_open() initializes the filter files for the
2654  * @ops. Depending on @flag it may process the filter hash or
2655  * the notrace hash of @ops. With this called from the open
2656  * routine, you can use ftrace_filter_write() for the write
2657  * routine if @flag has FTRACE_ITER_FILTER set, or
2658  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
2659  * ftrace_filter_lseek() should be used as the lseek routine, and
2660  * release must call ftrace_regex_release().
2661  */
2662 int
2663 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2664                   struct inode *inode, struct file *file)
2665 {
2666         struct ftrace_iterator *iter;
2667         struct ftrace_hash *hash;
2668         int ret = 0;
2669
2670         ftrace_ops_init(ops);
2671
2672         if (unlikely(ftrace_disabled))
2673                 return -ENODEV;
2674
2675         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2676         if (!iter)
2677                 return -ENOMEM;
2678
2679         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2680                 kfree(iter);
2681                 return -ENOMEM;
2682         }
2683
2684         iter->ops = ops;
2685         iter->flags = flag;
2686
2687         mutex_lock(&ops->regex_lock);
2688
2689         if (flag & FTRACE_ITER_NOTRACE)
2690                 hash = ops->notrace_hash;
2691         else
2692                 hash = ops->filter_hash;
2693
2694         if (file->f_mode & FMODE_WRITE) {
2695                 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2696                 if (!iter->hash) {
2697                         trace_parser_put(&iter->parser);
2698                         kfree(iter);
2699                         ret = -ENOMEM;
2700                         goto out_unlock;
2701                 }
2702         }
2703
2704         if ((file->f_mode & FMODE_WRITE) &&
2705             (file->f_flags & O_TRUNC))
2706                 ftrace_filter_reset(iter->hash);
2707
2708         if (file->f_mode & FMODE_READ) {
2709                 iter->pg = ftrace_pages_start;
2710
2711                 ret = seq_open(file, &show_ftrace_seq_ops);
2712                 if (!ret) {
2713                         struct seq_file *m = file->private_data;
2714                         m->private = iter;
2715                 } else {
2716                         /* Failed */
2717                         free_ftrace_hash(iter->hash);
2718                         trace_parser_put(&iter->parser);
2719                         kfree(iter);
2720                 }
2721         } else
2722                 file->private_data = iter;
2723
2724  out_unlock:
2725         mutex_unlock(&ops->regex_lock);
2726
2727         return ret;
2728 }
2729
2730 static int
2731 ftrace_filter_open(struct inode *inode, struct file *file)
2732 {
2733         return ftrace_regex_open(&global_ops,
2734                         FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2735                         inode, file);
2736 }
2737
2738 static int
2739 ftrace_notrace_open(struct inode *inode, struct file *file)
2740 {
2741         return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2742                                  inode, file);
2743 }
2744
2745 static int ftrace_match(char *str, char *regex, int len, int type)
2746 {
2747         int matched = 0;
2748         int slen;
2749
2750         switch (type) {
2751         case MATCH_FULL:
2752                 if (strcmp(str, regex) == 0)
2753                         matched = 1;
2754                 break;
2755         case MATCH_FRONT_ONLY:
2756                 if (strncmp(str, regex, len) == 0)
2757                         matched = 1;
2758                 break;
2759         case MATCH_MIDDLE_ONLY:
2760                 if (strstr(str, regex))
2761                         matched = 1;
2762                 break;
2763         case MATCH_END_ONLY:
2764                 slen = strlen(str);
2765                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2766                         matched = 1;
2767                 break;
2768         }
2769
2770         return matched;
2771 }
2772
2773 static int
2774 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2775 {
2776         struct ftrace_func_entry *entry;
2777         int ret = 0;
2778
2779         entry = ftrace_lookup_ip(hash, rec->ip);
2780         if (not) {
2781                 /* Do nothing if it doesn't exist */
2782                 if (!entry)
2783                         return 0;
2784
2785                 free_hash_entry(hash, entry);
2786         } else {
2787                 /* Do nothing if it exists */
2788                 if (entry)
2789                         return 0;
2790
2791                 ret = add_hash_entry(hash, rec->ip);
2792         }
2793         return ret;
2794 }
2795
2796 static int
2797 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2798                     char *regex, int len, int type)
2799 {
2800         char str[KSYM_SYMBOL_LEN];
2801         char *modname;
2802
2803         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2804
2805         if (mod) {
2806                 /* module lookup requires matching the module */
2807                 if (!modname || strcmp(modname, mod))
2808                         return 0;
2809
2810                 /* blank search means to match all funcs in the mod */
2811                 if (!len)
2812                         return 1;
2813         }
2814
2815         return ftrace_match(str, regex, len, type);
2816 }
2817
2818 static int
2819 match_records(struct ftrace_hash *hash, char *buff,
2820               int len, char *mod, int not)
2821 {
2822         unsigned search_len = 0;
2823         struct ftrace_page *pg;
2824         struct dyn_ftrace *rec;
2825         int type = MATCH_FULL;
2826         char *search = buff;
2827         int found = 0;
2828         int ret;
2829
2830         if (len) {
2831                 type = filter_parse_regex(buff, len, &search, &not);
2832                 search_len = strlen(search);
2833         }
2834
2835         mutex_lock(&ftrace_lock);
2836
2837         if (unlikely(ftrace_disabled))
2838                 goto out_unlock;
2839
2840         do_for_each_ftrace_rec(pg, rec) {
2841                 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2842                         ret = enter_record(hash, rec, not);
2843                         if (ret < 0) {
2844                                 found = ret;
2845                                 goto out_unlock;
2846                         }
2847                         found = 1;
2848                 }
2849         } while_for_each_ftrace_rec();
2850  out_unlock:
2851         mutex_unlock(&ftrace_lock);
2852
2853         return found;
2854 }
2855
2856 static int
2857 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2858 {
2859         return match_records(hash, buff, len, NULL, 0);
2860 }
2861
2862 static int
2863 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2864 {
2865         int not = 0;
2866
2867         /* blank or '*' mean the same */
2868         if (strcmp(buff, "*") == 0)
2869                 buff[0] = 0;
2870
2871         /* handle the case of 'dont filter this module' */
2872         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2873                 buff[0] = 0;
2874                 not = 1;
2875         }
2876
2877         return match_records(hash, buff, strlen(buff), mod, not);
2878 }
2879
2880 /*
2881  * We register the module command as a template to show others how
2882  * to register the a command as well.
2883  */
2884
2885 static int
2886 ftrace_mod_callback(struct ftrace_hash *hash,
2887                     char *func, char *cmd, char *param, int enable)
2888 {
2889         char *mod;
2890         int ret = -EINVAL;
2891
2892         /*
2893          * cmd == 'mod' because we only registered this func
2894          * for the 'mod' ftrace_func_command.
2895          * But if you register one func with multiple commands,
2896          * you can tell which command was used by the cmd
2897          * parameter.
2898          */
2899
2900         /* we must have a module name */
2901         if (!param)
2902                 return ret;
2903
2904         mod = strsep(&param, ":");
2905         if (!strlen(mod))
2906                 return ret;
2907
2908         ret = ftrace_match_module_records(hash, func, mod);
2909         if (!ret)
2910                 ret = -EINVAL;
2911         if (ret < 0)
2912                 return ret;
2913
2914         return 0;
2915 }
2916
2917 static struct ftrace_func_command ftrace_mod_cmd = {
2918         .name                   = "mod",
2919         .func                   = ftrace_mod_callback,
2920 };
2921
2922 static int __init ftrace_mod_cmd_init(void)
2923 {
2924         return register_ftrace_command(&ftrace_mod_cmd);
2925 }
2926 core_initcall(ftrace_mod_cmd_init);
2927
2928 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2929                                       struct ftrace_ops *op, struct pt_regs *pt_regs)
2930 {
2931         struct ftrace_func_probe *entry;
2932         struct hlist_head *hhd;
2933         unsigned long key;
2934
2935         key = hash_long(ip, FTRACE_HASH_BITS);
2936
2937         hhd = &ftrace_func_hash[key];
2938
2939         if (hlist_empty(hhd))
2940                 return;
2941
2942         /*
2943          * Disable preemption for these calls to prevent a RCU grace
2944          * period. This syncs the hash iteration and freeing of items
2945          * on the hash. rcu_read_lock is too dangerous here.
2946          */
2947         preempt_disable_notrace();
2948         hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
2949                 if (entry->ip == ip)
2950                         entry->ops->func(ip, parent_ip, &entry->data);
2951         }
2952         preempt_enable_notrace();
2953 }
2954
2955 static struct ftrace_ops trace_probe_ops __read_mostly =
2956 {
2957         .func           = function_trace_probe_call,
2958         .flags          = FTRACE_OPS_FL_INITIALIZED,
2959         INIT_REGEX_LOCK(trace_probe_ops)
2960 };
2961
2962 static int ftrace_probe_registered;
2963
2964 static void __enable_ftrace_function_probe(void)
2965 {
2966         int ret;
2967         int i;
2968
2969         if (ftrace_probe_registered) {
2970                 /* still need to update the function call sites */
2971                 if (ftrace_enabled)
2972                         ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2973                 return;
2974         }
2975
2976         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2977                 struct hlist_head *hhd = &ftrace_func_hash[i];
2978                 if (hhd->first)
2979                         break;
2980         }
2981         /* Nothing registered? */
2982         if (i == FTRACE_FUNC_HASHSIZE)
2983                 return;
2984
2985         ret = __register_ftrace_function(&trace_probe_ops);
2986         if (!ret)
2987                 ret = ftrace_startup(&trace_probe_ops, 0);
2988
2989         ftrace_probe_registered = 1;
2990 }
2991
2992 static void __disable_ftrace_function_probe(void)
2993 {
2994         int ret;
2995         int i;
2996
2997         if (!ftrace_probe_registered)
2998                 return;
2999
3000         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3001                 struct hlist_head *hhd = &ftrace_func_hash[i];
3002                 if (hhd->first)
3003                         return;
3004         }
3005
3006         /* no more funcs left */
3007         ret = __unregister_ftrace_function(&trace_probe_ops);
3008         if (!ret)
3009                 ftrace_shutdown(&trace_probe_ops, 0);
3010
3011         ftrace_probe_registered = 0;
3012 }
3013
3014
3015 static void ftrace_free_entry(struct ftrace_func_probe *entry)
3016 {
3017         if (entry->ops->free)
3018                 entry->ops->free(entry->ops, entry->ip, &entry->data);
3019         kfree(entry);
3020 }
3021
3022 int
3023 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3024                               void *data)
3025 {
3026         struct ftrace_func_probe *entry;
3027         struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3028         struct ftrace_hash *hash;
3029         struct ftrace_page *pg;
3030         struct dyn_ftrace *rec;
3031         int type, len, not;
3032         unsigned long key;
3033         int count = 0;
3034         char *search;
3035         int ret;
3036
3037         type = filter_parse_regex(glob, strlen(glob), &search, &not);
3038         len = strlen(search);
3039
3040         /* we do not support '!' for function probes */
3041         if (WARN_ON(not))
3042                 return -EINVAL;
3043
3044         mutex_lock(&trace_probe_ops.regex_lock);
3045
3046         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3047         if (!hash) {
3048                 count = -ENOMEM;
3049                 goto out;
3050         }
3051
3052         if (unlikely(ftrace_disabled)) {
3053                 count = -ENODEV;
3054                 goto out;
3055         }
3056
3057         mutex_lock(&ftrace_lock);
3058
3059         do_for_each_ftrace_rec(pg, rec) {
3060
3061                 if (!ftrace_match_record(rec, NULL, search, len, type))
3062                         continue;
3063
3064                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3065                 if (!entry) {
3066                         /* If we did not process any, then return error */
3067                         if (!count)
3068                                 count = -ENOMEM;
3069                         goto out_unlock;
3070                 }
3071
3072                 count++;
3073
3074                 entry->data = data;
3075
3076                 /*
3077                  * The caller might want to do something special
3078                  * for each function we find. We call the callback
3079                  * to give the caller an opportunity to do so.
3080                  */
3081                 if (ops->init) {
3082                         if (ops->init(ops, rec->ip, &entry->data) < 0) {
3083                                 /* caller does not like this func */
3084                                 kfree(entry);
3085                                 continue;
3086                         }
3087                 }
3088
3089                 ret = enter_record(hash, rec, 0);
3090                 if (ret < 0) {
3091                         kfree(entry);
3092                         count = ret;
3093                         goto out_unlock;
3094                 }
3095
3096                 entry->ops = ops;
3097                 entry->ip = rec->ip;
3098
3099                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
3100                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3101
3102         } while_for_each_ftrace_rec();
3103
3104         ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3105         if (ret < 0)
3106                 count = ret;
3107
3108         __enable_ftrace_function_probe();
3109
3110  out_unlock:
3111         mutex_unlock(&ftrace_lock);
3112  out:
3113         mutex_unlock(&trace_probe_ops.regex_lock);
3114         free_ftrace_hash(hash);
3115
3116         return count;
3117 }
3118
3119 enum {
3120         PROBE_TEST_FUNC         = 1,
3121         PROBE_TEST_DATA         = 2
3122 };
3123
3124 static void
3125 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3126                                   void *data, int flags)
3127 {
3128         struct ftrace_func_entry *rec_entry;
3129         struct ftrace_func_probe *entry;
3130         struct ftrace_func_probe *p;
3131         struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3132         struct list_head free_list;
3133         struct ftrace_hash *hash;
3134         struct hlist_node *tmp;
3135         char str[KSYM_SYMBOL_LEN];
3136         int type = MATCH_FULL;
3137         int i, len = 0;
3138         char *search;
3139
3140         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3141                 glob = NULL;
3142         else if (glob) {
3143                 int not;
3144
3145                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
3146                 len = strlen(search);
3147
3148                 /* we do not support '!' for function probes */
3149                 if (WARN_ON(not))
3150                         return;
3151         }
3152
3153         mutex_lock(&trace_probe_ops.regex_lock);
3154
3155         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3156         if (!hash)
3157                 /* Hmm, should report this somehow */
3158                 goto out_unlock;
3159
3160         INIT_LIST_HEAD(&free_list);
3161
3162         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3163                 struct hlist_head *hhd = &ftrace_func_hash[i];
3164
3165                 hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3166
3167                         /* break up if statements for readability */
3168                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3169                                 continue;
3170
3171                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
3172                                 continue;
3173
3174                         /* do this last, since it is the most expensive */
3175                         if (glob) {
3176                                 kallsyms_lookup(entry->ip, NULL, NULL,
3177                                                 NULL, str);
3178                                 if (!ftrace_match(str, glob, len, type))
3179                                         continue;
3180                         }
3181
3182                         rec_entry = ftrace_lookup_ip(hash, entry->ip);
3183                         /* It is possible more than one entry had this ip */
3184                         if (rec_entry)
3185                                 free_hash_entry(hash, rec_entry);
3186
3187                         hlist_del_rcu(&entry->node);
3188                         list_add(&entry->free_list, &free_list);
3189                 }
3190         }
3191         mutex_lock(&ftrace_lock);
3192         __disable_ftrace_function_probe();
3193         /*
3194          * Remove after the disable is called. Otherwise, if the last
3195          * probe is removed, a null hash means *all enabled*.
3196          */
3197         ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3198         synchronize_sched();
3199         list_for_each_entry_safe(entry, p, &free_list, free_list) {
3200                 list_del(&entry->free_list);
3201                 ftrace_free_entry(entry);
3202         }
3203         mutex_unlock(&ftrace_lock);
3204                 
3205  out_unlock:
3206         mutex_unlock(&trace_probe_ops.regex_lock);
3207         free_ftrace_hash(hash);
3208 }
3209
3210 void
3211 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3212                                 void *data)
3213 {
3214         __unregister_ftrace_function_probe(glob, ops, data,
3215                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
3216 }
3217
3218 void
3219 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3220 {
3221         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3222 }
3223
3224 void unregister_ftrace_function_probe_all(char *glob)
3225 {
3226         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3227 }
3228
3229 static LIST_HEAD(ftrace_commands);
3230 static DEFINE_MUTEX(ftrace_cmd_mutex);
3231
3232 int register_ftrace_command(struct ftrace_func_command *cmd)
3233 {
3234         struct ftrace_func_command *p;
3235         int ret = 0;
3236
3237         mutex_lock(&ftrace_cmd_mutex);
3238         list_for_each_entry(p, &ftrace_commands, list) {
3239                 if (strcmp(cmd->name, p->name) == 0) {
3240                         ret = -EBUSY;
3241                         goto out_unlock;
3242                 }
3243         }
3244         list_add(&cmd->list, &ftrace_commands);
3245  out_unlock:
3246         mutex_unlock(&ftrace_cmd_mutex);
3247
3248         return ret;
3249 }
3250
3251 int unregister_ftrace_command(struct ftrace_func_command *cmd)
3252 {
3253         struct ftrace_func_command *p, *n;
3254         int ret = -ENODEV;
3255
3256         mutex_lock(&ftrace_cmd_mutex);
3257         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3258                 if (strcmp(cmd->name, p->name) == 0) {
3259                         ret = 0;
3260                         list_del_init(&p->list);
3261                         goto out_unlock;
3262                 }
3263         }
3264  out_unlock:
3265         mutex_unlock(&ftrace_cmd_mutex);
3266
3267         return ret;
3268 }
3269
3270 static int ftrace_process_regex(struct ftrace_hash *hash,
3271                                 char *buff, int len, int enable)
3272 {
3273         char *func, *command, *next = buff;
3274         struct ftrace_func_command *p;
3275         int ret = -EINVAL;
3276
3277         func = strsep(&next, ":");
3278
3279         if (!next) {
3280                 ret = ftrace_match_records(hash, func, len);
3281                 if (!ret)
3282                         ret = -EINVAL;
3283                 if (ret < 0)
3284                         return ret;
3285                 return 0;
3286         }
3287
3288         /* command found */
3289
3290         command = strsep(&next, ":");
3291
3292         mutex_lock(&ftrace_cmd_mutex);
3293         list_for_each_entry(p, &ftrace_commands, list) {
3294                 if (strcmp(p->name, command) == 0) {
3295                         ret = p->func(hash, func, command, next, enable);
3296                         goto out_unlock;
3297                 }
3298         }
3299  out_unlock:
3300         mutex_unlock(&ftrace_cmd_mutex);
3301
3302         return ret;
3303 }
3304
3305 static ssize_t
3306 ftrace_regex_write(struct file *file, const char __user *ubuf,
3307                    size_t cnt, loff_t *ppos, int enable)
3308 {
3309         struct ftrace_iterator *iter;
3310         struct trace_parser *parser;
3311         ssize_t ret, read;
3312
3313         if (!cnt)
3314                 return 0;
3315
3316         if (file->f_mode & FMODE_READ) {
3317                 struct seq_file *m = file->private_data;
3318                 iter = m->private;
3319         } else
3320                 iter = file->private_data;
3321
3322         if (unlikely(ftrace_disabled))
3323                 return -ENODEV;
3324
3325         /* iter->hash is a local copy, so we don't need regex_lock */
3326
3327         parser = &iter->parser;
3328         read = trace_get_user(parser, ubuf, cnt, ppos);
3329
3330         if (read >= 0 && trace_parser_loaded(parser) &&
3331             !trace_parser_cont(parser)) {
3332                 ret = ftrace_process_regex(iter->hash, parser->buffer,
3333                                            parser->idx, enable);
3334                 trace_parser_clear(parser);
3335                 if (ret < 0)
3336                         goto out;
3337         }
3338
3339         ret = read;
3340  out:
3341         return ret;
3342 }
3343
3344 ssize_t
3345 ftrace_filter_write(struct file *file, const char __user *ubuf,
3346                     size_t cnt, loff_t *ppos)
3347 {
3348         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3349 }
3350
3351 ssize_t
3352 ftrace_notrace_write(struct file *file, const char __user *ubuf,
3353                      size_t cnt, loff_t *ppos)
3354 {
3355         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3356 }
3357
3358 static int
3359 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3360 {
3361         struct ftrace_func_entry *entry;
3362
3363         if (!ftrace_location(ip))
3364                 return -EINVAL;
3365
3366         if (remove) {
3367                 entry = ftrace_lookup_ip(hash, ip);
3368                 if (!entry)
3369                         return -ENOENT;
3370                 free_hash_entry(hash, entry);
3371                 return 0;
3372         }
3373
3374         return add_hash_entry(hash, ip);
3375 }
3376
3377 static int
3378 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3379                 unsigned long ip, int remove, int reset, int enable)
3380 {
3381         struct ftrace_hash **orig_hash;
3382         struct ftrace_hash *hash;
3383         int ret;
3384
3385         /* All global ops uses the global ops filters */
3386         if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3387                 ops = &global_ops;
3388
3389         if (unlikely(ftrace_disabled))
3390                 return -ENODEV;
3391
3392         mutex_lock(&ops->regex_lock);
3393
3394         if (enable)
3395                 orig_hash = &ops->filter_hash;
3396         else
3397                 orig_hash = &ops->notrace_hash;
3398
3399         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3400         if (!hash) {
3401                 ret = -ENOMEM;
3402                 goto out_regex_unlock;
3403         }
3404
3405         if (reset)
3406                 ftrace_filter_reset(hash);
3407         if (buf && !ftrace_match_records(hash, buf, len)) {
3408                 ret = -EINVAL;
3409                 goto out_regex_unlock;
3410         }
3411         if (ip) {
3412                 ret = ftrace_match_addr(hash, ip, remove);
3413                 if (ret < 0)
3414                         goto out_regex_unlock;
3415         }
3416
3417         mutex_lock(&ftrace_lock);
3418         ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3419         if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
3420             && ftrace_enabled)
3421                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3422
3423         mutex_unlock(&ftrace_lock);
3424
3425  out_regex_unlock:
3426         mutex_unlock(&ops->regex_lock);
3427
3428         free_ftrace_hash(hash);
3429         return ret;
3430 }
3431
3432 static int
3433 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3434                 int reset, int enable)
3435 {
3436         return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
3437 }
3438
3439 /**
3440  * ftrace_set_filter_ip - set a function to filter on in ftrace by address
3441  * @ops - the ops to set the filter with
3442  * @ip - the address to add to or remove from the filter.
3443  * @remove - non zero to remove the ip from the filter
3444  * @reset - non zero to reset all filters before applying this filter.
3445  *
3446  * Filters denote which functions should be enabled when tracing is enabled
3447  * If @ip is NULL, it failes to update filter.
3448  */
3449 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3450                          int remove, int reset)
3451 {
3452         ftrace_ops_init(ops);
3453         return ftrace_set_addr(ops, ip, remove, reset, 1);
3454 }
3455 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
3456
3457 static int
3458 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3459                  int reset, int enable)
3460 {
3461         return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
3462 }
3463
3464 /**
3465  * ftrace_set_filter - set a function to filter on in ftrace
3466  * @ops - the ops to set the filter with
3467  * @buf - the string that holds the function filter text.
3468  * @len - the length of the string.
3469  * @reset - non zero to reset all filters before applying this filter.
3470  *
3471  * Filters denote which functions should be enabled when tracing is enabled.
3472  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3473  */
3474 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3475                        int len, int reset)
3476 {
3477         ftrace_ops_init(ops);
3478         return ftrace_set_regex(ops, buf, len, reset, 1);
3479 }
3480 EXPORT_SYMBOL_GPL(ftrace_set_filter);
3481
3482 /**
3483  * ftrace_set_notrace - set a function to not trace in ftrace
3484  * @ops - the ops to set the notrace filter with
3485  * @buf - the string that holds the function notrace text.
3486  * @len - the length of the string.
3487  * @reset - non zero to reset all filters before applying this filter.
3488  *
3489  * Notrace Filters denote which functions should not be enabled when tracing
3490  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3491  * for tracing.
3492  */
3493 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3494                         int len, int reset)
3495 {
3496         ftrace_ops_init(ops);
3497         return ftrace_set_regex(ops, buf, len, reset, 0);
3498 }
3499 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3500 /**
3501  * ftrace_set_filter - set a function to filter on in ftrace
3502  * @ops - the ops to set the filter with
3503  * @buf - the string that holds the function filter text.
3504  * @len - the length of the string.
3505  * @reset - non zero to reset all filters before applying this filter.
3506  *
3507  * Filters denote which functions should be enabled when tracing is enabled.
3508  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3509  */
3510 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3511 {
3512         ftrace_set_regex(&global_ops, buf, len, reset, 1);
3513 }
3514 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3515
3516 /**
3517  * ftrace_set_notrace - set a function to not trace in ftrace
3518  * @ops - the ops to set the notrace filter with
3519  * @buf - the string that holds the function notrace text.
3520  * @len - the length of the string.
3521  * @reset - non zero to reset all filters before applying this filter.
3522  *
3523  * Notrace Filters denote which functions should not be enabled when tracing
3524  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3525  * for tracing.
3526  */
3527 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3528 {
3529         ftrace_set_regex(&global_ops, buf, len, reset, 0);
3530 }
3531 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3532
3533 /*
3534  * command line interface to allow users to set filters on boot up.
3535  */
3536 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
3537 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3538 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3539
3540 /* Used by function selftest to not test if filter is set */
3541 bool ftrace_filter_param __initdata;
3542
3543 static int __init set_ftrace_notrace(char *str)
3544 {
3545         ftrace_filter_param = true;
3546         strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3547         return 1;
3548 }
3549 __setup("ftrace_notrace=", set_ftrace_notrace);
3550
3551 static int __init set_ftrace_filter(char *str)
3552 {
3553         ftrace_filter_param = true;
3554         strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3555         return 1;
3556 }
3557 __setup("ftrace_filter=", set_ftrace_filter);
3558
3559 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3560 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3561 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3562
3563 static int __init set_graph_function(char *str)
3564 {
3565         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3566         return 1;
3567 }
3568 __setup("ftrace_graph_filter=", set_graph_function);
3569
3570 static void __init set_ftrace_early_graph(char *buf)
3571 {
3572         int ret;
3573         char *func;
3574
3575         while (buf) {
3576                 func = strsep(&buf, ",");
3577                 /* we allow only one expression at a time */
3578                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3579                                       func);
3580                 if (ret)
3581                         printk(KERN_DEBUG "ftrace: function %s not "
3582                                           "traceable\n", func);
3583         }
3584 }
3585 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3586
3587 void __init
3588 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3589 {
3590         char *func;
3591
3592         ftrace_ops_init(ops);
3593
3594         while (buf) {
3595                 func = strsep(&buf, ",");
3596                 ftrace_set_regex(ops, func, strlen(func), 0, enable);
3597         }
3598 }
3599
3600 static void __init set_ftrace_early_filters(void)
3601 {
3602         if (ftrace_filter_buf[0])
3603                 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
3604         if (ftrace_notrace_buf[0])
3605                 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
3606 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3607         if (ftrace_graph_buf[0])
3608                 set_ftrace_early_graph(ftrace_graph_buf);
3609 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3610 }
3611
3612 int ftrace_regex_release(struct inode *inode, struct file *file)
3613 {
3614         struct seq_file *m = (struct seq_file *)file->private_data;
3615         struct ftrace_iterator *iter;
3616         struct ftrace_hash **orig_hash;
3617         struct trace_parser *parser;
3618         int filter_hash;
3619         int ret;
3620
3621         if (file->f_mode & FMODE_READ) {
3622                 iter = m->private;
3623                 seq_release(inode, file);
3624         } else
3625                 iter = file->private_data;
3626
3627         parser = &iter->parser;
3628         if (trace_parser_loaded(parser)) {
3629                 parser->buffer[parser->idx] = 0;
3630                 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3631         }
3632
3633         trace_parser_put(parser);
3634
3635         mutex_lock(&iter->ops->regex_lock);
3636
3637         if (file->f_mode & FMODE_WRITE) {
3638                 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3639
3640                 if (filter_hash)
3641                         orig_hash = &iter->ops->filter_hash;
3642                 else
3643                         orig_hash = &iter->ops->notrace_hash;
3644
3645                 mutex_lock(&ftrace_lock);
3646                 ret = ftrace_hash_move(iter->ops, filter_hash,
3647                                        orig_hash, iter->hash);
3648                 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3649                     && ftrace_enabled)
3650                         ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3651
3652                 mutex_unlock(&ftrace_lock);
3653         }
3654
3655         mutex_unlock(&iter->ops->regex_lock);
3656         free_ftrace_hash(iter->hash);
3657         kfree(iter);
3658
3659         return 0;
3660 }
3661
3662 static const struct file_operations ftrace_avail_fops = {
3663         .open = ftrace_avail_open,
3664         .read = seq_read,
3665         .llseek = seq_lseek,
3666         .release = seq_release_private,
3667 };
3668
3669 static const struct file_operations ftrace_enabled_fops = {
3670         .open = ftrace_enabled_open,
3671         .read = seq_read,
3672         .llseek = seq_lseek,
3673         .release = seq_release_private,
3674 };
3675
3676 static const struct file_operations ftrace_filter_fops = {
3677         .open = ftrace_filter_open,
3678         .read = seq_read,
3679         .write = ftrace_filter_write,
3680         .llseek = ftrace_filter_lseek,
3681         .release = ftrace_regex_release,
3682 };
3683
3684 static const struct file_operations ftrace_notrace_fops = {
3685         .open = ftrace_notrace_open,
3686         .read = seq_read,
3687         .write = ftrace_notrace_write,
3688         .llseek = ftrace_filter_lseek,
3689         .release = ftrace_regex_release,
3690 };
3691
3692 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3693
3694 static DEFINE_MUTEX(graph_lock);
3695
3696 int ftrace_graph_count;
3697 int ftrace_graph_filter_enabled;
3698 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3699
3700 static void *
3701 __g_next(struct seq_file *m, loff_t *pos)
3702 {
3703         if (*pos >= ftrace_graph_count)
3704                 return NULL;
3705         return &ftrace_graph_funcs[*pos];
3706 }
3707
3708 static void *
3709 g_next(struct seq_file *m, void *v, loff_t *pos)
3710 {
3711         (*pos)++;
3712         return __g_next(m, pos);
3713 }
3714
3715 static void *g_start(struct seq_file *m, loff_t *pos)
3716 {
3717         mutex_lock(&graph_lock);
3718
3719         /* Nothing, tell g_show to print all functions are enabled */
3720         if (!ftrace_graph_filter_enabled && !*pos)
3721                 return (void *)1;
3722
3723         return __g_next(m, pos);
3724 }
3725
3726 static void g_stop(struct seq_file *m, void *p)
3727 {
3728         mutex_unlock(&graph_lock);
3729 }
3730
3731 static int g_show(struct seq_file *m, void *v)
3732 {
3733         unsigned long *ptr = v;
3734
3735         if (!ptr)
3736                 return 0;
3737
3738         if (ptr == (unsigned long *)1) {
3739                 seq_printf(m, "#### all functions enabled ####\n");
3740                 return 0;
3741         }
3742
3743         seq_printf(m, "%ps\n", (void *)*ptr);
3744
3745         return 0;
3746 }
3747
3748 static const struct seq_operations ftrace_graph_seq_ops = {
3749         .start = g_start,
3750         .next = g_next,
3751         .stop = g_stop,
3752         .show = g_show,
3753 };
3754
3755 static int
3756 ftrace_graph_open(struct inode *inode, struct file *file)
3757 {
3758         int ret = 0;
3759
3760         if (unlikely(ftrace_disabled))
3761                 return -ENODEV;
3762
3763         mutex_lock(&graph_lock);
3764         if ((file->f_mode & FMODE_WRITE) &&
3765             (file->f_flags & O_TRUNC)) {
3766                 ftrace_graph_filter_enabled = 0;
3767                 ftrace_graph_count = 0;
3768                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3769         }
3770         mutex_unlock(&graph_lock);
3771
3772         if (file->f_mode & FMODE_READ)
3773                 ret = seq_open(file, &ftrace_graph_seq_ops);
3774
3775         return ret;
3776 }
3777
3778 static int
3779 ftrace_graph_release(struct inode *inode, struct file *file)
3780 {
3781         if (file->f_mode & FMODE_READ)
3782                 seq_release(inode, file);
3783         return 0;
3784 }
3785
3786 static int
3787 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3788 {
3789         struct dyn_ftrace *rec;
3790         struct ftrace_page *pg;
3791         int search_len;
3792         int fail = 1;
3793         int type, not;
3794         char *search;
3795         bool exists;
3796         int i;
3797
3798         /* decode regex */
3799         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3800         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3801                 return -EBUSY;
3802
3803         search_len = strlen(search);
3804
3805         mutex_lock(&ftrace_lock);
3806
3807         if (unlikely(ftrace_disabled)) {
3808                 mutex_unlock(&ftrace_lock);
3809                 return -ENODEV;
3810         }
3811
3812         do_for_each_ftrace_rec(pg, rec) {
3813
3814                 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3815                         /* if it is in the array */
3816                         exists = false;
3817                         for (i = 0; i < *idx; i++) {
3818                                 if (array[i] == rec->ip) {
3819                                         exists = true;
3820                                         break;
3821                                 }
3822                         }
3823
3824                         if (!not) {
3825                                 fail = 0;
3826                                 if (!exists) {
3827                                         array[(*idx)++] = rec->ip;
3828                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3829                                                 goto out;
3830                                 }
3831                         } else {
3832                                 if (exists) {
3833                                         array[i] = array[--(*idx)];
3834                                         array[*idx] = 0;
3835                                         fail = 0;
3836                                 }
3837                         }
3838                 }
3839         } while_for_each_ftrace_rec();
3840 out:
3841         mutex_unlock(&ftrace_lock);
3842
3843         if (fail)
3844                 return -EINVAL;
3845
3846         ftrace_graph_filter_enabled = !!(*idx);
3847
3848         return 0;
3849 }
3850
3851 static ssize_t
3852 ftrace_graph_write(struct file *file, const char __user *ubuf,
3853                    size_t cnt, loff_t *ppos)
3854 {
3855         struct trace_parser parser;
3856         ssize_t read, ret;
3857
3858         if (!cnt)
3859                 return 0;
3860
3861         mutex_lock(&graph_lock);
3862
3863         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3864                 ret = -ENOMEM;
3865                 goto out_unlock;
3866         }
3867
3868         read = trace_get_user(&parser, ubuf, cnt, ppos);
3869
3870         if (read >= 0 && trace_parser_loaded((&parser))) {
3871                 parser.buffer[parser.idx] = 0;
3872
3873                 /* we allow only one expression at a time */
3874                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3875                                         parser.buffer);
3876                 if (ret)
3877                         goto out_free;
3878         }
3879
3880         ret = read;
3881
3882 out_free:
3883         trace_parser_put(&parser);
3884 out_unlock:
3885         mutex_unlock(&graph_lock);
3886
3887         return ret;
3888 }
3889
3890 static const struct file_operations ftrace_graph_fops = {
3891         .open           = ftrace_graph_open,
3892         .read           = seq_read,
3893         .write          = ftrace_graph_write,
3894         .llseek         = ftrace_filter_lseek,
3895         .release        = ftrace_graph_release,
3896 };
3897 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3898
3899 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3900 {
3901
3902         trace_create_file("available_filter_functions", 0444,
3903                         d_tracer, NULL, &ftrace_avail_fops);
3904
3905         trace_create_file("enabled_functions", 0444,
3906                         d_tracer, NULL, &ftrace_enabled_fops);
3907
3908         trace_create_file("set_ftrace_filter", 0644, d_tracer,
3909                         NULL, &ftrace_filter_fops);
3910
3911         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3912                                     NULL, &ftrace_notrace_fops);
3913
3914 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3915         trace_create_file("set_graph_function", 0444, d_tracer,
3916                                     NULL,
3917                                     &ftrace_graph_fops);
3918 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3919
3920         return 0;
3921 }
3922
3923 static int ftrace_cmp_ips(const void *a, const void *b)
3924 {
3925         const unsigned long *ipa = a;
3926         const unsigned long *ipb = b;
3927
3928         if (*ipa > *ipb)
3929                 return 1;
3930         if (*ipa < *ipb)
3931                 return -1;
3932         return 0;
3933 }
3934
3935 static void ftrace_swap_ips(void *a, void *b, int size)
3936 {
3937         unsigned long *ipa = a;
3938         unsigned long *ipb = b;
3939         unsigned long t;
3940
3941         t = *ipa;
3942         *ipa = *ipb;
3943         *ipb = t;
3944 }
3945
3946 static int ftrace_process_locs(struct module *mod,
3947                                unsigned long *start,
3948                                unsigned long *end)
3949 {
3950         struct ftrace_page *start_pg;
3951         struct ftrace_page *pg;
3952         struct dyn_ftrace *rec;
3953         unsigned long count;
3954         unsigned long *p;
3955         unsigned long addr;
3956         unsigned long flags = 0; /* Shut up gcc */
3957         int ret = -ENOMEM;
3958
3959         count = end - start;
3960
3961         if (!count)
3962                 return 0;
3963
3964         sort(start, count, sizeof(*start),
3965              ftrace_cmp_ips, ftrace_swap_ips);
3966
3967         start_pg = ftrace_allocate_pages(count);
3968         if (!start_pg)
3969                 return -ENOMEM;
3970
3971         mutex_lock(&ftrace_lock);
3972
3973         /*
3974          * Core and each module needs their own pages, as
3975          * modules will free them when they are removed.
3976          * Force a new page to be allocated for modules.
3977          */
3978         if (!mod) {
3979                 WARN_ON(ftrace_pages || ftrace_pages_start);
3980                 /* First initialization */
3981                 ftrace_pages = ftrace_pages_start = start_pg;
3982         } else {
3983                 if (!ftrace_pages)
3984                         goto out;
3985
3986                 if (WARN_ON(ftrace_pages->next)) {
3987                         /* Hmm, we have free pages? */
3988                         while (ftrace_pages->next)
3989                                 ftrace_pages = ftrace_pages->next;
3990                 }
3991
3992                 ftrace_pages->next = start_pg;
3993         }
3994
3995         p = start;
3996         pg = start_pg;
3997         while (p < end) {
3998                 addr = ftrace_call_adjust(*p++);
3999                 /*
4000                  * Some architecture linkers will pad between
4001                  * the different mcount_loc sections of different
4002                  * object files to satisfy alignments.
4003                  * Skip any NULL pointers.
4004                  */
4005                 if (!addr)
4006                         continue;
4007
4008                 if (pg->index == pg->size) {
4009                         /* We should have allocated enough */
4010                         if (WARN_ON(!pg->next))
4011                                 break;
4012                         pg = pg->next;
4013                 }
4014
4015                 rec = &pg->records[pg->index++];
4016                 rec->ip = addr;
4017         }
4018
4019         /* We should have used all pages */
4020         WARN_ON(pg->next);
4021
4022         /* Assign the last page to ftrace_pages */
4023         ftrace_pages = pg;
4024
4025         /* These new locations need to be initialized */
4026         ftrace_new_pgs = start_pg;
4027
4028         /*
4029          * We only need to disable interrupts on start up
4030          * because we are modifying code that an interrupt
4031          * may execute, and the modification is not atomic.
4032          * But for modules, nothing runs the code we modify
4033          * until we are finished with it, and there's no
4034          * reason to cause large interrupt latencies while we do it.
4035          */
4036         if (!mod)
4037                 local_irq_save(flags);
4038         ftrace_update_code(mod);
4039         if (!mod)
4040                 local_irq_restore(flags);
4041         ret = 0;
4042  out:
4043         mutex_unlock(&ftrace_lock);
4044
4045         return ret;
4046 }
4047
4048 #ifdef CONFIG_MODULES
4049
4050 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4051
4052 void ftrace_release_mod(struct module *mod)
4053 {
4054         struct dyn_ftrace *rec;
4055         struct ftrace_page **last_pg;
4056         struct ftrace_page *pg;
4057         int order;
4058
4059         mutex_lock(&ftrace_lock);
4060
4061         if (ftrace_disabled)
4062                 goto out_unlock;
4063
4064         /*
4065          * Each module has its own ftrace_pages, remove
4066          * them from the list.
4067          */
4068         last_pg = &ftrace_pages_start;
4069         for (pg = ftrace_pages_start; pg; pg = *last_pg) {
4070                 rec = &pg->records[0];
4071                 if (within_module_core(rec->ip, mod)) {
4072                         /*
4073                          * As core pages are first, the first
4074                          * page should never be a module page.
4075                          */
4076                         if (WARN_ON(pg == ftrace_pages_start))
4077                                 goto out_unlock;
4078
4079                         /* Check if we are deleting the last page */
4080                         if (pg == ftrace_pages)
4081                                 ftrace_pages = next_to_ftrace_page(last_pg);
4082
4083                         *last_pg = pg->next;
4084                         order = get_count_order(pg->size / ENTRIES_PER_PAGE);
4085                         free_pages((unsigned long)pg->records, order);
4086                         kfree(pg);
4087                 } else
4088                         last_pg = &pg->next;
4089         }
4090  out_unlock:
4091         mutex_unlock(&ftrace_lock);
4092 }
4093
4094 static void ftrace_init_module(struct module *mod,
4095                                unsigned long *start, unsigned long *end)
4096 {
4097         if (ftrace_disabled || start == end)
4098                 return;
4099         ftrace_process_locs(mod, start, end);
4100 }
4101
4102 static int ftrace_module_notify_enter(struct notifier_block *self,
4103                                       unsigned long val, void *data)
4104 {
4105         struct module *mod = data;
4106
4107         if (val == MODULE_STATE_COMING)
4108                 ftrace_init_module(mod, mod->ftrace_callsites,
4109                                    mod->ftrace_callsites +
4110                                    mod->num_ftrace_callsites);
4111         return 0;
4112 }
4113
4114 static int ftrace_module_notify_exit(struct notifier_block *self,
4115                                      unsigned long val, void *data)
4116 {
4117         struct module *mod = data;
4118
4119         if (val == MODULE_STATE_GOING)
4120                 ftrace_release_mod(mod);
4121
4122         return 0;
4123 }
4124 #else
4125 static int ftrace_module_notify_enter(struct notifier_block *self,
4126                                       unsigned long val, void *data)
4127 {
4128         return 0;
4129 }
4130 static int ftrace_module_notify_exit(struct notifier_block *self,
4131                                      unsigned long val, void *data)
4132 {
4133         return 0;
4134 }
4135 #endif /* CONFIG_MODULES */
4136
4137 struct notifier_block ftrace_module_enter_nb = {
4138         .notifier_call = ftrace_module_notify_enter,
4139         .priority = INT_MAX,    /* Run before anything that can use kprobes */
4140 };
4141
4142 struct notifier_block ftrace_module_exit_nb = {
4143         .notifier_call = ftrace_module_notify_exit,
4144         .priority = INT_MIN,    /* Run after anything that can remove kprobes */
4145 };
4146
4147 extern unsigned long __start_mcount_loc[];
4148 extern unsigned long __stop_mcount_loc[];
4149
4150 void __init ftrace_init(void)
4151 {
4152         unsigned long count, addr, flags;
4153         int ret;
4154
4155         /* Keep the ftrace pointer to the stub */
4156         addr = (unsigned long)ftrace_stub;
4157
4158         local_irq_save(flags);
4159         ftrace_dyn_arch_init(&addr);
4160         local_irq_restore(flags);
4161
4162         /* ftrace_dyn_arch_init places the return code in addr */
4163         if (addr)
4164                 goto failed;
4165
4166         count = __stop_mcount_loc - __start_mcount_loc;
4167
4168         ret = ftrace_dyn_table_alloc(count);
4169         if (ret)
4170                 goto failed;
4171
4172         last_ftrace_enabled = ftrace_enabled = 1;
4173
4174         ret = ftrace_process_locs(NULL,
4175                                   __start_mcount_loc,
4176                                   __stop_mcount_loc);
4177
4178         ret = register_module_notifier(&ftrace_module_enter_nb);
4179         if (ret)
4180                 pr_warning("Failed to register trace ftrace module enter notifier\n");
4181
4182         ret = register_module_notifier(&ftrace_module_exit_nb);
4183         if (ret)
4184                 pr_warning("Failed to register trace ftrace module exit notifier\n");
4185
4186         set_ftrace_early_filters();
4187
4188         return;
4189  failed:
4190         ftrace_disabled = 1;
4191 }
4192
4193 #else
4194
4195 static struct ftrace_ops global_ops = {
4196         .func                   = ftrace_stub,
4197         .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4198         INIT_REGEX_LOCK(global_ops)
4199 };
4200
4201 static int __init ftrace_nodyn_init(void)
4202 {
4203         ftrace_enabled = 1;
4204         return 0;
4205 }
4206 core_initcall(ftrace_nodyn_init);
4207
4208 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4209 static inline void ftrace_startup_enable(int command) { }
4210 /* Keep as macros so we do not need to define the commands */
4211 # define ftrace_startup(ops, command)                   \
4212         ({                                              \
4213                 (ops)->flags |= FTRACE_OPS_FL_ENABLED;  \
4214                 0;                                      \
4215         })
4216 # define ftrace_shutdown(ops, command)  do { } while (0)
4217 # define ftrace_startup_sysctl()        do { } while (0)
4218 # define ftrace_shutdown_sysctl()       do { } while (0)
4219
4220 static inline int
4221 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
4222 {
4223         return 1;
4224 }
4225
4226 #endif /* CONFIG_DYNAMIC_FTRACE */
4227
4228 static void
4229 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4230                         struct ftrace_ops *op, struct pt_regs *regs)
4231 {
4232         if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4233                 return;
4234
4235         /*
4236          * Some of the ops may be dynamically allocated,
4237          * they must be freed after a synchronize_sched().
4238          */
4239         preempt_disable_notrace();
4240         trace_recursion_set(TRACE_CONTROL_BIT);
4241         do_for_each_ftrace_op(op, ftrace_control_list) {
4242                 if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4243                     !ftrace_function_local_disabled(op) &&
4244                     ftrace_ops_test(op, ip))
4245                         op->func(ip, parent_ip, op, regs);
4246         } while_for_each_ftrace_op(op);
4247         trace_recursion_clear(TRACE_CONTROL_BIT);
4248         preempt_enable_notrace();
4249 }
4250
4251 static struct ftrace_ops control_ops = {
4252         .func   = ftrace_ops_control_func,
4253         .flags  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4254         INIT_REGEX_LOCK(control_ops)
4255 };
4256
4257 static inline void
4258 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4259                        struct ftrace_ops *ignored, struct pt_regs *regs)
4260 {
4261         struct ftrace_ops *op;
4262         int bit;
4263
4264         if (function_trace_stop)
4265                 return;
4266
4267         bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4268         if (bit < 0)
4269                 return;
4270
4271         /*
4272          * Some of the ops may be dynamically allocated,
4273          * they must be freed after a synchronize_sched().
4274          */
4275         preempt_disable_notrace();
4276         do_for_each_ftrace_op(op, ftrace_ops_list) {
4277                 if (ftrace_ops_test(op, ip))
4278                         op->func(ip, parent_ip, op, regs);
4279         } while_for_each_ftrace_op(op);
4280         preempt_enable_notrace();
4281         trace_clear_recursion(bit);
4282 }
4283
4284 /*
4285  * Some archs only support passing ip and parent_ip. Even though
4286  * the list function ignores the op parameter, we do not want any
4287  * C side effects, where a function is called without the caller
4288  * sending a third parameter.
4289  * Archs are to support both the regs and ftrace_ops at the same time.
4290  * If they support ftrace_ops, it is assumed they support regs.
4291  * If call backs want to use regs, they must either check for regs
4292  * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
4293  * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
4294  * An architecture can pass partial regs with ftrace_ops and still
4295  * set the ARCH_SUPPORT_FTARCE_OPS.
4296  */
4297 #if ARCH_SUPPORTS_FTRACE_OPS
4298 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4299                                  struct ftrace_ops *op, struct pt_regs *regs)
4300 {
4301         __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
4302 }
4303 #else
4304 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4305 {
4306         __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
4307 }
4308 #endif
4309
4310 static void clear_ftrace_swapper(void)
4311 {
4312         struct task_struct *p;
4313         int cpu;
4314
4315         get_online_cpus();
4316         for_each_online_cpu(cpu) {
4317                 p = idle_task(cpu);
4318                 clear_tsk_trace_trace(p);
4319         }
4320         put_online_cpus();
4321 }
4322
4323 static void set_ftrace_swapper(void)
4324 {
4325         struct task_struct *p;
4326         int cpu;
4327
4328         get_online_cpus();
4329         for_each_online_cpu(cpu) {
4330                 p = idle_task(cpu);
4331                 set_tsk_trace_trace(p);
4332         }
4333         put_online_cpus();
4334 }
4335
4336 static void clear_ftrace_pid(struct pid *pid)
4337 {
4338         struct task_struct *p;
4339
4340         rcu_read_lock();
4341         do_each_pid_task(pid, PIDTYPE_PID, p) {
4342                 clear_tsk_trace_trace(p);
4343         } while_each_pid_task(pid, PIDTYPE_PID, p);
4344         rcu_read_unlock();
4345
4346         put_pid(pid);
4347 }
4348
4349 static void set_ftrace_pid(struct pid *pid)
4350 {
4351         struct task_struct *p;
4352
4353         rcu_read_lock();
4354         do_each_pid_task(pid, PIDTYPE_PID, p) {
4355                 set_tsk_trace_trace(p);
4356         } while_each_pid_task(pid, PIDTYPE_PID, p);
4357         rcu_read_unlock();
4358 }
4359
4360 static void clear_ftrace_pid_task(struct pid *pid)
4361 {
4362         if (pid == ftrace_swapper_pid)
4363                 clear_ftrace_swapper();
4364         else
4365                 clear_ftrace_pid(pid);
4366 }
4367
4368 static void set_ftrace_pid_task(struct pid *pid)
4369 {
4370         if (pid == ftrace_swapper_pid)
4371                 set_ftrace_swapper();
4372         else
4373                 set_ftrace_pid(pid);
4374 }
4375
4376 static int ftrace_pid_add(int p)
4377 {
4378         struct pid *pid;
4379         struct ftrace_pid *fpid;
4380         int ret = -EINVAL;
4381
4382         mutex_lock(&ftrace_lock);
4383
4384         if (!p)
4385                 pid = ftrace_swapper_pid;
4386         else
4387                 pid = find_get_pid(p);
4388
4389         if (!pid)
4390                 goto out;
4391
4392         ret = 0;
4393
4394         list_for_each_entry(fpid, &ftrace_pids, list)
4395                 if (fpid->pid == pid)
4396                         goto out_put;
4397
4398         ret = -ENOMEM;
4399
4400         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4401         if (!fpid)
4402                 goto out_put;
4403
4404         list_add(&fpid->list, &ftrace_pids);
4405         fpid->pid = pid;
4406
4407         set_ftrace_pid_task(pid);
4408
4409         ftrace_update_pid_func();
4410         ftrace_startup_enable(0);
4411
4412         mutex_unlock(&ftrace_lock);
4413         return 0;
4414
4415 out_put:
4416         if (pid != ftrace_swapper_pid)
4417                 put_pid(pid);
4418
4419 out:
4420         mutex_unlock(&ftrace_lock);
4421         return ret;
4422 }
4423
4424 static void ftrace_pid_reset(void)
4425 {
4426         struct ftrace_pid *fpid, *safe;
4427
4428         mutex_lock(&ftrace_lock);
4429         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
4430                 struct pid *pid = fpid->pid;
4431
4432                 clear_ftrace_pid_task(pid);
4433
4434                 list_del(&fpid->list);
4435                 kfree(fpid);
4436         }
4437
4438         ftrace_update_pid_func();
4439         ftrace_startup_enable(0);
4440
4441         mutex_unlock(&ftrace_lock);
4442 }
4443
4444 static void *fpid_start(struct seq_file *m, loff_t *pos)
4445 {
4446         mutex_lock(&ftrace_lock);
4447
4448         if (list_empty(&ftrace_pids) && (!*pos))
4449                 return (void *) 1;
4450
4451         return seq_list_start(&ftrace_pids, *pos);
4452 }
4453
4454 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4455 {
4456         if (v == (void *)1)
4457                 return NULL;
4458
4459         return seq_list_next(v, &ftrace_pids, pos);
4460 }
4461
4462 static void fpid_stop(struct seq_file *m, void *p)
4463 {
4464         mutex_unlock(&ftrace_lock);
4465 }
4466
4467 static int fpid_show(struct seq_file *m, void *v)
4468 {
4469         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4470
4471         if (v == (void *)1) {
4472                 seq_printf(m, "no pid\n");
4473                 return 0;
4474         }
4475
4476         if (fpid->pid == ftrace_swapper_pid)
4477                 seq_printf(m, "swapper tasks\n");
4478         else
4479                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
4480
4481         return 0;
4482 }
4483
4484 static const struct seq_operations ftrace_pid_sops = {
4485         .start = fpid_start,
4486         .next = fpid_next,
4487         .stop = fpid_stop,
4488         .show = fpid_show,
4489 };
4490
4491 static int
4492 ftrace_pid_open(struct inode *inode, struct file *file)
4493 {
4494         int ret = 0;
4495
4496         if ((file->f_mode & FMODE_WRITE) &&
4497             (file->f_flags & O_TRUNC))
4498                 ftrace_pid_reset();
4499
4500         if (file->f_mode & FMODE_READ)
4501                 ret = seq_open(file, &ftrace_pid_sops);
4502
4503         return ret;
4504 }
4505
4506 static ssize_t
4507 ftrace_pid_write(struct file *filp, const char __user *ubuf,
4508                    size_t cnt, loff_t *ppos)
4509 {
4510         char buf[64], *tmp;
4511         long val;
4512         int ret;
4513
4514         if (cnt >= sizeof(buf))
4515                 return -EINVAL;
4516
4517         if (copy_from_user(&buf, ubuf, cnt))
4518                 return -EFAULT;
4519
4520         buf[cnt] = 0;
4521
4522         /*
4523          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
4524          * to clean the filter quietly.
4525          */
4526         tmp = strstrip(buf);
4527         if (strlen(tmp) == 0)
4528                 return 1;
4529
4530         ret = kstrtol(tmp, 10, &val);
4531         if (ret < 0)
4532                 return ret;
4533
4534         ret = ftrace_pid_add(val);
4535
4536         return ret ? ret : cnt;
4537 }
4538
4539 static int
4540 ftrace_pid_release(struct inode *inode, struct file *file)
4541 {
4542         if (file->f_mode & FMODE_READ)
4543                 seq_release(inode, file);
4544
4545         return 0;
4546 }
4547
4548 static const struct file_operations ftrace_pid_fops = {
4549         .open           = ftrace_pid_open,
4550         .write          = ftrace_pid_write,
4551         .read           = seq_read,
4552         .llseek         = ftrace_filter_lseek,
4553         .release        = ftrace_pid_release,
4554 };
4555
4556 static __init int ftrace_init_debugfs(void)
4557 {
4558         struct dentry *d_tracer;
4559
4560         d_tracer = tracing_init_dentry();
4561         if (!d_tracer)
4562                 return 0;
4563
4564         ftrace_init_dyn_debugfs(d_tracer);
4565
4566         trace_create_file("set_ftrace_pid", 0644, d_tracer,
4567                             NULL, &ftrace_pid_fops);
4568
4569         ftrace_profile_debugfs(d_tracer);
4570
4571         return 0;
4572 }
4573 fs_initcall(ftrace_init_debugfs);
4574
4575 /**
4576  * ftrace_kill - kill ftrace
4577  *
4578  * This function should be used by panic code. It stops ftrace
4579  * but in a not so nice way. If you need to simply kill ftrace
4580  * from a non-atomic section, use ftrace_kill.
4581  */
4582 void ftrace_kill(void)
4583 {
4584         ftrace_disabled = 1;
4585         ftrace_enabled = 0;
4586         clear_ftrace_function();
4587 }
4588
4589 /**
4590  * Test if ftrace is dead or not.
4591  */
4592 int ftrace_is_dead(void)
4593 {
4594         return ftrace_disabled;
4595 }
4596
4597 /**
4598  * register_ftrace_function - register a function for profiling
4599  * @ops - ops structure that holds the function for profiling.
4600  *
4601  * Register a function to be called by all functions in the
4602  * kernel.
4603  *
4604  * Note: @ops->func and all the functions it calls must be labeled
4605  *       with "notrace", otherwise it will go into a
4606  *       recursive loop.
4607  */
4608 int register_ftrace_function(struct ftrace_ops *ops)
4609 {
4610         int ret = -1;
4611
4612         ftrace_ops_init(ops);
4613
4614         mutex_lock(&ftrace_lock);
4615
4616         ret = __register_ftrace_function(ops);
4617         if (!ret)
4618                 ret = ftrace_startup(ops, 0);
4619
4620         mutex_unlock(&ftrace_lock);
4621
4622         return ret;
4623 }
4624 EXPORT_SYMBOL_GPL(register_ftrace_function);
4625
4626 /**
4627  * unregister_ftrace_function - unregister a function for profiling.
4628  * @ops - ops structure that holds the function to unregister
4629  *
4630  * Unregister a function that was added to be called by ftrace profiling.
4631  */
4632 int unregister_ftrace_function(struct ftrace_ops *ops)
4633 {
4634         int ret;
4635
4636         mutex_lock(&ftrace_lock);
4637         ret = __unregister_ftrace_function(ops);
4638         if (!ret)
4639                 ftrace_shutdown(ops, 0);
4640         mutex_unlock(&ftrace_lock);
4641
4642         return ret;
4643 }
4644 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
4645
4646 int
4647 ftrace_enable_sysctl(struct ctl_table *table, int write,
4648                      void __user *buffer, size_t *lenp,
4649                      loff_t *ppos)
4650 {
4651         int ret = -ENODEV;
4652
4653         mutex_lock(&ftrace_lock);
4654
4655         if (unlikely(ftrace_disabled))
4656                 goto out;
4657
4658         ret = proc_dointvec(table, write, buffer, lenp, ppos);
4659
4660         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
4661                 goto out;
4662
4663         last_ftrace_enabled = !!ftrace_enabled;
4664
4665         if (ftrace_enabled) {
4666
4667                 ftrace_startup_sysctl();
4668
4669                 /* we are starting ftrace again */
4670                 if (ftrace_ops_list != &ftrace_list_end)
4671                         update_ftrace_function();
4672
4673         } else {
4674                 /* stopping ftrace calls (just send to ftrace_stub) */
4675                 ftrace_trace_function = ftrace_stub;
4676
4677                 ftrace_shutdown_sysctl();
4678         }
4679
4680  out:
4681         mutex_unlock(&ftrace_lock);
4682         return ret;
4683 }
4684
4685 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4686
4687 static int ftrace_graph_active;
4688 static struct notifier_block ftrace_suspend_notifier;
4689
4690 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4691 {
4692         return 0;
4693 }
4694
4695 /* The callbacks that hook a function */
4696 trace_func_graph_ret_t ftrace_graph_return =
4697                         (trace_func_graph_ret_t)ftrace_stub;
4698 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4699
4700 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4701 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4702 {
4703         int i;
4704         int ret = 0;
4705         unsigned long flags;
4706         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4707         struct task_struct *g, *t;
4708
4709         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4710                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4711                                         * sizeof(struct ftrace_ret_stack),
4712                                         GFP_KERNEL);
4713                 if (!ret_stack_list[i]) {
4714                         start = 0;
4715                         end = i;
4716                         ret = -ENOMEM;
4717                         goto free;
4718                 }
4719         }
4720
4721         read_lock_irqsave(&tasklist_lock, flags);
4722         do_each_thread(g, t) {
4723                 if (start == end) {
4724                         ret = -EAGAIN;
4725                         goto unlock;
4726                 }
4727
4728                 if (t->ret_stack == NULL) {
4729                         atomic_set(&t->tracing_graph_pause, 0);
4730                         atomic_set(&t->trace_overrun, 0);
4731                         t->curr_ret_stack = -1;
4732                         /* Make sure the tasks see the -1 first: */
4733                         smp_wmb();
4734                         t->ret_stack = ret_stack_list[start++];
4735                 }
4736         } while_each_thread(g, t);
4737
4738 unlock:
4739         read_unlock_irqrestore(&tasklist_lock, flags);
4740 free:
4741         for (i = start; i < end; i++)
4742                 kfree(ret_stack_list[i]);
4743         return ret;
4744 }
4745
4746 static void
4747 ftrace_graph_probe_sched_switch(void *ignore,
4748                         struct task_struct *prev, struct task_struct *next)
4749 {
4750         unsigned long long timestamp;
4751         int index;
4752
4753         /*
4754          * Does the user want to count the time a function was asleep.
4755          * If so, do not update the time stamps.
4756          */
4757         if (trace_flags & TRACE_ITER_SLEEP_TIME)
4758                 return;
4759
4760         timestamp = trace_clock_local();
4761
4762         prev->ftrace_timestamp = timestamp;
4763
4764         /* only process tasks that we timestamped */
4765         if (!next->ftrace_timestamp)
4766                 return;
4767
4768         /*
4769          * Update all the counters in next to make up for the
4770          * time next was sleeping.
4771          */
4772         timestamp -= next->ftrace_timestamp;
4773
4774         for (index = next->curr_ret_stack; index >= 0; index--)
4775                 next->ret_stack[index].calltime += timestamp;
4776 }
4777
4778 /* Allocate a return stack for each task */
4779 static int start_graph_tracing(void)
4780 {
4781         struct ftrace_ret_stack **ret_stack_list;
4782         int ret, cpu;
4783
4784         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4785                                 sizeof(struct ftrace_ret_stack *),
4786                                 GFP_KERNEL);
4787
4788         if (!ret_stack_list)
4789                 return -ENOMEM;
4790
4791         /* The cpu_boot init_task->ret_stack will never be freed */
4792         for_each_online_cpu(cpu) {
4793                 if (!idle_task(cpu)->ret_stack)
4794                         ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4795         }
4796
4797         do {
4798                 ret = alloc_retstack_tasklist(ret_stack_list);
4799         } while (ret == -EAGAIN);
4800
4801         if (!ret) {
4802                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4803                 if (ret)
4804                         pr_info("ftrace_graph: Couldn't activate tracepoint"
4805                                 " probe to kernel_sched_switch\n");
4806         }
4807
4808         kfree(ret_stack_list);
4809         return ret;
4810 }
4811
4812 /*
4813  * Hibernation protection.
4814  * The state of the current task is too much unstable during
4815  * suspend/restore to disk. We want to protect against that.
4816  */
4817 static int
4818 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4819                                                         void *unused)
4820 {
4821         switch (state) {
4822         case PM_HIBERNATION_PREPARE:
4823                 pause_graph_tracing();
4824                 break;
4825
4826         case PM_POST_HIBERNATION:
4827                 unpause_graph_tracing();
4828                 break;
4829         }
4830         return NOTIFY_DONE;
4831 }
4832
4833 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4834                         trace_func_graph_ent_t entryfunc)
4835 {
4836         int ret = 0;
4837
4838         mutex_lock(&ftrace_lock);
4839
4840         /* we currently allow only one tracer registered at a time */
4841         if (ftrace_graph_active) {
4842                 ret = -EBUSY;
4843                 goto out;
4844         }
4845
4846         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4847         register_pm_notifier(&ftrace_suspend_notifier);
4848
4849         ftrace_graph_active++;
4850         ret = start_graph_tracing();
4851         if (ret) {
4852                 ftrace_graph_active--;
4853                 goto out;
4854         }
4855
4856         ftrace_graph_return = retfunc;
4857         ftrace_graph_entry = entryfunc;
4858
4859         ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
4860
4861 out:
4862         mutex_unlock(&ftrace_lock);
4863         return ret;
4864 }
4865
4866 void unregister_ftrace_graph(void)
4867 {
4868         mutex_lock(&ftrace_lock);
4869
4870         if (unlikely(!ftrace_graph_active))
4871                 goto out;
4872
4873         ftrace_graph_active--;
4874         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4875         ftrace_graph_entry = ftrace_graph_entry_stub;
4876         ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
4877         unregister_pm_notifier(&ftrace_suspend_notifier);
4878         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4879
4880  out:
4881         mutex_unlock(&ftrace_lock);
4882 }
4883
4884 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4885
4886 static void
4887 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4888 {
4889         atomic_set(&t->tracing_graph_pause, 0);
4890         atomic_set(&t->trace_overrun, 0);
4891         t->ftrace_timestamp = 0;
4892         /* make curr_ret_stack visible before we add the ret_stack */
4893         smp_wmb();
4894         t->ret_stack = ret_stack;
4895 }
4896
4897 /*
4898  * Allocate a return stack for the idle task. May be the first
4899  * time through, or it may be done by CPU hotplug online.
4900  */
4901 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4902 {
4903         t->curr_ret_stack = -1;
4904         /*
4905          * The idle task has no parent, it either has its own
4906          * stack or no stack at all.
4907          */
4908         if (t->ret_stack)
4909                 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4910
4911         if (ftrace_graph_active) {
4912                 struct ftrace_ret_stack *ret_stack;
4913
4914                 ret_stack = per_cpu(idle_ret_stack, cpu);
4915                 if (!ret_stack) {
4916                         ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4917                                             * sizeof(struct ftrace_ret_stack),
4918                                             GFP_KERNEL);
4919                         if (!ret_stack)
4920                                 return;
4921                         per_cpu(idle_ret_stack, cpu) = ret_stack;
4922                 }
4923                 graph_init_task(t, ret_stack);
4924         }
4925 }
4926
4927 /* Allocate a return stack for newly created task */
4928 void ftrace_graph_init_task(struct task_struct *t)
4929 {
4930         /* Make sure we do not use the parent ret_stack */
4931         t->ret_stack = NULL;
4932         t->curr_ret_stack = -1;
4933
4934         if (ftrace_graph_active) {
4935                 struct ftrace_ret_stack *ret_stack;
4936
4937                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4938                                 * sizeof(struct ftrace_ret_stack),
4939                                 GFP_KERNEL);
4940                 if (!ret_stack)
4941                         return;
4942                 graph_init_task(t, ret_stack);
4943         }
4944 }
4945
4946 void ftrace_graph_exit_task(struct task_struct *t)
4947 {
4948         struct ftrace_ret_stack *ret_stack = t->ret_stack;
4949
4950         t->ret_stack = NULL;
4951         /* NULL must become visible to IRQs before we free it: */
4952         barrier();
4953
4954         kfree(ret_stack);
4955 }
4956
4957 void ftrace_graph_stop(void)
4958 {
4959         ftrace_stop();
4960 }
4961 #endif