Merge tag 'topic/i915-hda-componentized-2015-01-12' into drm-intel-next-queued
[firefly-linux-kernel-4.4.55.git] / kernel / trace / trace_functions.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Based on code from the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 Nadia Yvette Chambers
11  */
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/slab.h>
17 #include <linux/fs.h>
18
19 #include "trace.h"
20
21 static void tracing_start_function_trace(struct trace_array *tr);
22 static void tracing_stop_function_trace(struct trace_array *tr);
23 static void
24 function_trace_call(unsigned long ip, unsigned long parent_ip,
25                     struct ftrace_ops *op, struct pt_regs *pt_regs);
26 static void
27 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
28                           struct ftrace_ops *op, struct pt_regs *pt_regs);
29 static struct tracer_flags func_flags;
30
31 /* Our option */
32 enum {
33         TRACE_FUNC_OPT_STACK    = 0x1,
34 };
35
36 static int allocate_ftrace_ops(struct trace_array *tr)
37 {
38         struct ftrace_ops *ops;
39
40         ops = kzalloc(sizeof(*ops), GFP_KERNEL);
41         if (!ops)
42                 return -ENOMEM;
43
44         /* Currently only the non stack verision is supported */
45         ops->func = function_trace_call;
46         ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
47
48         tr->ops = ops;
49         ops->private = tr;
50         return 0;
51 }
52
53
54 int ftrace_create_function_files(struct trace_array *tr,
55                                  struct dentry *parent)
56 {
57         int ret;
58
59         /*
60          * The top level array uses the "global_ops", and the files are
61          * created on boot up.
62          */
63         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
64                 return 0;
65
66         ret = allocate_ftrace_ops(tr);
67         if (ret)
68                 return ret;
69
70         ftrace_create_filter_files(tr->ops, parent);
71
72         return 0;
73 }
74
75 void ftrace_destroy_function_files(struct trace_array *tr)
76 {
77         ftrace_destroy_filter_files(tr->ops);
78         kfree(tr->ops);
79         tr->ops = NULL;
80 }
81
82 static int function_trace_init(struct trace_array *tr)
83 {
84         ftrace_func_t func;
85
86         /*
87          * Instance trace_arrays get their ops allocated
88          * at instance creation. Unless it failed
89          * the allocation.
90          */
91         if (!tr->ops)
92                 return -ENOMEM;
93
94         /* Currently only the global instance can do stack tracing */
95         if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
96             func_flags.val & TRACE_FUNC_OPT_STACK)
97                 func = function_stack_trace_call;
98         else
99                 func = function_trace_call;
100
101         ftrace_init_array_ops(tr, func);
102
103         tr->trace_buffer.cpu = get_cpu();
104         put_cpu();
105
106         tracing_start_cmdline_record();
107         tracing_start_function_trace(tr);
108         return 0;
109 }
110
111 static void function_trace_reset(struct trace_array *tr)
112 {
113         tracing_stop_function_trace(tr);
114         tracing_stop_cmdline_record();
115         ftrace_reset_array_ops(tr);
116 }
117
118 static void function_trace_start(struct trace_array *tr)
119 {
120         tracing_reset_online_cpus(&tr->trace_buffer);
121 }
122
123 static void
124 function_trace_call(unsigned long ip, unsigned long parent_ip,
125                     struct ftrace_ops *op, struct pt_regs *pt_regs)
126 {
127         struct trace_array *tr = op->private;
128         struct trace_array_cpu *data;
129         unsigned long flags;
130         int bit;
131         int cpu;
132         int pc;
133
134         if (unlikely(!tr->function_enabled))
135                 return;
136
137         pc = preempt_count();
138         preempt_disable_notrace();
139
140         bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
141         if (bit < 0)
142                 goto out;
143
144         cpu = smp_processor_id();
145         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
146         if (!atomic_read(&data->disabled)) {
147                 local_save_flags(flags);
148                 trace_function(tr, ip, parent_ip, flags, pc);
149         }
150         trace_clear_recursion(bit);
151
152  out:
153         preempt_enable_notrace();
154 }
155
156 static void
157 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
158                           struct ftrace_ops *op, struct pt_regs *pt_regs)
159 {
160         struct trace_array *tr = op->private;
161         struct trace_array_cpu *data;
162         unsigned long flags;
163         long disabled;
164         int cpu;
165         int pc;
166
167         if (unlikely(!tr->function_enabled))
168                 return;
169
170         /*
171          * Need to use raw, since this must be called before the
172          * recursive protection is performed.
173          */
174         local_irq_save(flags);
175         cpu = raw_smp_processor_id();
176         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
177         disabled = atomic_inc_return(&data->disabled);
178
179         if (likely(disabled == 1)) {
180                 pc = preempt_count();
181                 trace_function(tr, ip, parent_ip, flags, pc);
182                 /*
183                  * skip over 5 funcs:
184                  *    __ftrace_trace_stack,
185                  *    __trace_stack,
186                  *    function_stack_trace_call
187                  *    ftrace_list_func
188                  *    ftrace_call
189                  */
190                 __trace_stack(tr, flags, 5, pc);
191         }
192
193         atomic_dec(&data->disabled);
194         local_irq_restore(flags);
195 }
196
197 static struct tracer_opt func_opts[] = {
198 #ifdef CONFIG_STACKTRACE
199         { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
200 #endif
201         { } /* Always set a last empty entry */
202 };
203
204 static struct tracer_flags func_flags = {
205         .val = 0, /* By default: all flags disabled */
206         .opts = func_opts
207 };
208
209 static void tracing_start_function_trace(struct trace_array *tr)
210 {
211         tr->function_enabled = 0;
212         register_ftrace_function(tr->ops);
213         tr->function_enabled = 1;
214 }
215
216 static void tracing_stop_function_trace(struct trace_array *tr)
217 {
218         tr->function_enabled = 0;
219         unregister_ftrace_function(tr->ops);
220 }
221
222 static int
223 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
224 {
225         switch (bit) {
226         case TRACE_FUNC_OPT_STACK:
227                 /* do nothing if already set */
228                 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
229                         break;
230
231                 unregister_ftrace_function(tr->ops);
232
233                 if (set) {
234                         tr->ops->func = function_stack_trace_call;
235                         register_ftrace_function(tr->ops);
236                 } else {
237                         tr->ops->func = function_trace_call;
238                         register_ftrace_function(tr->ops);
239                 }
240
241                 break;
242         default:
243                 return -EINVAL;
244         }
245
246         return 0;
247 }
248
249 static struct tracer function_trace __tracer_data =
250 {
251         .name           = "function",
252         .init           = function_trace_init,
253         .reset          = function_trace_reset,
254         .start          = function_trace_start,
255         .flags          = &func_flags,
256         .set_flag       = func_set_flag,
257         .allow_instances = true,
258 #ifdef CONFIG_FTRACE_SELFTEST
259         .selftest       = trace_selftest_startup_function,
260 #endif
261 };
262
263 #ifdef CONFIG_DYNAMIC_FTRACE
264 static void update_traceon_count(void **data, bool on)
265 {
266         long *count = (long *)data;
267         long old_count = *count;
268
269         /*
270          * Tracing gets disabled (or enabled) once per count.
271          * This function can be called at the same time on multiple CPUs.
272          * It is fine if both disable (or enable) tracing, as disabling
273          * (or enabling) the second time doesn't do anything as the
274          * state of the tracer is already disabled (or enabled).
275          * What needs to be synchronized in this case is that the count
276          * only gets decremented once, even if the tracer is disabled
277          * (or enabled) twice, as the second one is really a nop.
278          *
279          * The memory barriers guarantee that we only decrement the
280          * counter once. First the count is read to a local variable
281          * and a read barrier is used to make sure that it is loaded
282          * before checking if the tracer is in the state we want.
283          * If the tracer is not in the state we want, then the count
284          * is guaranteed to be the old count.
285          *
286          * Next the tracer is set to the state we want (disabled or enabled)
287          * then a write memory barrier is used to make sure that
288          * the new state is visible before changing the counter by
289          * one minus the old counter. This guarantees that another CPU
290          * executing this code will see the new state before seeing
291          * the new counter value, and would not do anything if the new
292          * counter is seen.
293          *
294          * Note, there is no synchronization between this and a user
295          * setting the tracing_on file. But we currently don't care
296          * about that.
297          */
298         if (!old_count)
299                 return;
300
301         /* Make sure we see count before checking tracing state */
302         smp_rmb();
303
304         if (on == !!tracing_is_on())
305                 return;
306
307         if (on)
308                 tracing_on();
309         else
310                 tracing_off();
311
312         /* unlimited? */
313         if (old_count == -1)
314                 return;
315
316         /* Make sure tracing state is visible before updating count */
317         smp_wmb();
318
319         *count = old_count - 1;
320 }
321
322 static void
323 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
324 {
325         update_traceon_count(data, 1);
326 }
327
328 static void
329 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
330 {
331         update_traceon_count(data, 0);
332 }
333
334 static void
335 ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
336 {
337         if (tracing_is_on())
338                 return;
339
340         tracing_on();
341 }
342
343 static void
344 ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
345 {
346         if (!tracing_is_on())
347                 return;
348
349         tracing_off();
350 }
351
352 /*
353  * Skip 4:
354  *   ftrace_stacktrace()
355  *   function_trace_probe_call()
356  *   ftrace_ops_list_func()
357  *   ftrace_call()
358  */
359 #define STACK_SKIP 4
360
361 static void
362 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
363 {
364         trace_dump_stack(STACK_SKIP);
365 }
366
367 static void
368 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
369 {
370         long *count = (long *)data;
371         long old_count;
372         long new_count;
373
374         /*
375          * Stack traces should only execute the number of times the
376          * user specified in the counter.
377          */
378         do {
379
380                 if (!tracing_is_on())
381                         return;
382
383                 old_count = *count;
384
385                 if (!old_count)
386                         return;
387
388                 /* unlimited? */
389                 if (old_count == -1) {
390                         trace_dump_stack(STACK_SKIP);
391                         return;
392                 }
393
394                 new_count = old_count - 1;
395                 new_count = cmpxchg(count, old_count, new_count);
396                 if (new_count == old_count)
397                         trace_dump_stack(STACK_SKIP);
398
399         } while (new_count != old_count);
400 }
401
402 static int update_count(void **data)
403 {
404         unsigned long *count = (long *)data;
405
406         if (!*count)
407                 return 0;
408
409         if (*count != -1)
410                 (*count)--;
411
412         return 1;
413 }
414
415 static void
416 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
417 {
418         if (update_count(data))
419                 ftrace_dump(DUMP_ALL);
420 }
421
422 /* Only dump the current CPU buffer. */
423 static void
424 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
425 {
426         if (update_count(data))
427                 ftrace_dump(DUMP_ORIG);
428 }
429
430 static int
431 ftrace_probe_print(const char *name, struct seq_file *m,
432                    unsigned long ip, void *data)
433 {
434         long count = (long)data;
435
436         seq_printf(m, "%ps:%s", (void *)ip, name);
437
438         if (count == -1)
439                 seq_puts(m, ":unlimited\n");
440         else
441                 seq_printf(m, ":count=%ld\n", count);
442
443         return 0;
444 }
445
446 static int
447 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
448                          struct ftrace_probe_ops *ops, void *data)
449 {
450         return ftrace_probe_print("traceon", m, ip, data);
451 }
452
453 static int
454 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
455                          struct ftrace_probe_ops *ops, void *data)
456 {
457         return ftrace_probe_print("traceoff", m, ip, data);
458 }
459
460 static int
461 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
462                         struct ftrace_probe_ops *ops, void *data)
463 {
464         return ftrace_probe_print("stacktrace", m, ip, data);
465 }
466
467 static int
468 ftrace_dump_print(struct seq_file *m, unsigned long ip,
469                         struct ftrace_probe_ops *ops, void *data)
470 {
471         return ftrace_probe_print("dump", m, ip, data);
472 }
473
474 static int
475 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
476                         struct ftrace_probe_ops *ops, void *data)
477 {
478         return ftrace_probe_print("cpudump", m, ip, data);
479 }
480
481 static struct ftrace_probe_ops traceon_count_probe_ops = {
482         .func                   = ftrace_traceon_count,
483         .print                  = ftrace_traceon_print,
484 };
485
486 static struct ftrace_probe_ops traceoff_count_probe_ops = {
487         .func                   = ftrace_traceoff_count,
488         .print                  = ftrace_traceoff_print,
489 };
490
491 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
492         .func                   = ftrace_stacktrace_count,
493         .print                  = ftrace_stacktrace_print,
494 };
495
496 static struct ftrace_probe_ops dump_probe_ops = {
497         .func                   = ftrace_dump_probe,
498         .print                  = ftrace_dump_print,
499 };
500
501 static struct ftrace_probe_ops cpudump_probe_ops = {
502         .func                   = ftrace_cpudump_probe,
503         .print                  = ftrace_cpudump_print,
504 };
505
506 static struct ftrace_probe_ops traceon_probe_ops = {
507         .func                   = ftrace_traceon,
508         .print                  = ftrace_traceon_print,
509 };
510
511 static struct ftrace_probe_ops traceoff_probe_ops = {
512         .func                   = ftrace_traceoff,
513         .print                  = ftrace_traceoff_print,
514 };
515
516 static struct ftrace_probe_ops stacktrace_probe_ops = {
517         .func                   = ftrace_stacktrace,
518         .print                  = ftrace_stacktrace_print,
519 };
520
521 static int
522 ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
523                             struct ftrace_hash *hash, char *glob,
524                             char *cmd, char *param, int enable)
525 {
526         void *count = (void *)-1;
527         char *number;
528         int ret;
529
530         /* hash funcs only work with set_ftrace_filter */
531         if (!enable)
532                 return -EINVAL;
533
534         if (glob[0] == '!') {
535                 unregister_ftrace_function_probe_func(glob+1, ops);
536                 return 0;
537         }
538
539         if (!param)
540                 goto out_reg;
541
542         number = strsep(&param, ":");
543
544         if (!strlen(number))
545                 goto out_reg;
546
547         /*
548          * We use the callback data field (which is a pointer)
549          * as our counter.
550          */
551         ret = kstrtoul(number, 0, (unsigned long *)&count);
552         if (ret)
553                 return ret;
554
555  out_reg:
556         ret = register_ftrace_function_probe(glob, ops, count);
557
558         return ret < 0 ? ret : 0;
559 }
560
561 static int
562 ftrace_trace_onoff_callback(struct ftrace_hash *hash,
563                             char *glob, char *cmd, char *param, int enable)
564 {
565         struct ftrace_probe_ops *ops;
566
567         /* we register both traceon and traceoff to this callback */
568         if (strcmp(cmd, "traceon") == 0)
569                 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
570         else
571                 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
572
573         return ftrace_trace_probe_callback(ops, hash, glob, cmd,
574                                            param, enable);
575 }
576
577 static int
578 ftrace_stacktrace_callback(struct ftrace_hash *hash,
579                            char *glob, char *cmd, char *param, int enable)
580 {
581         struct ftrace_probe_ops *ops;
582
583         ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
584
585         return ftrace_trace_probe_callback(ops, hash, glob, cmd,
586                                            param, enable);
587 }
588
589 static int
590 ftrace_dump_callback(struct ftrace_hash *hash,
591                            char *glob, char *cmd, char *param, int enable)
592 {
593         struct ftrace_probe_ops *ops;
594
595         ops = &dump_probe_ops;
596
597         /* Only dump once. */
598         return ftrace_trace_probe_callback(ops, hash, glob, cmd,
599                                            "1", enable);
600 }
601
602 static int
603 ftrace_cpudump_callback(struct ftrace_hash *hash,
604                            char *glob, char *cmd, char *param, int enable)
605 {
606         struct ftrace_probe_ops *ops;
607
608         ops = &cpudump_probe_ops;
609
610         /* Only dump once. */
611         return ftrace_trace_probe_callback(ops, hash, glob, cmd,
612                                            "1", enable);
613 }
614
615 static struct ftrace_func_command ftrace_traceon_cmd = {
616         .name                   = "traceon",
617         .func                   = ftrace_trace_onoff_callback,
618 };
619
620 static struct ftrace_func_command ftrace_traceoff_cmd = {
621         .name                   = "traceoff",
622         .func                   = ftrace_trace_onoff_callback,
623 };
624
625 static struct ftrace_func_command ftrace_stacktrace_cmd = {
626         .name                   = "stacktrace",
627         .func                   = ftrace_stacktrace_callback,
628 };
629
630 static struct ftrace_func_command ftrace_dump_cmd = {
631         .name                   = "dump",
632         .func                   = ftrace_dump_callback,
633 };
634
635 static struct ftrace_func_command ftrace_cpudump_cmd = {
636         .name                   = "cpudump",
637         .func                   = ftrace_cpudump_callback,
638 };
639
640 static int __init init_func_cmd_traceon(void)
641 {
642         int ret;
643
644         ret = register_ftrace_command(&ftrace_traceoff_cmd);
645         if (ret)
646                 return ret;
647
648         ret = register_ftrace_command(&ftrace_traceon_cmd);
649         if (ret)
650                 goto out_free_traceoff;
651
652         ret = register_ftrace_command(&ftrace_stacktrace_cmd);
653         if (ret)
654                 goto out_free_traceon;
655
656         ret = register_ftrace_command(&ftrace_dump_cmd);
657         if (ret)
658                 goto out_free_stacktrace;
659
660         ret = register_ftrace_command(&ftrace_cpudump_cmd);
661         if (ret)
662                 goto out_free_dump;
663
664         return 0;
665
666  out_free_dump:
667         unregister_ftrace_command(&ftrace_dump_cmd);
668  out_free_stacktrace:
669         unregister_ftrace_command(&ftrace_stacktrace_cmd);
670  out_free_traceon:
671         unregister_ftrace_command(&ftrace_traceon_cmd);
672  out_free_traceoff:
673         unregister_ftrace_command(&ftrace_traceoff_cmd);
674
675         return ret;
676 }
677 #else
678 static inline int init_func_cmd_traceon(void)
679 {
680         return 0;
681 }
682 #endif /* CONFIG_DYNAMIC_FTRACE */
683
684 static __init int init_function_trace(void)
685 {
686         init_func_cmd_traceon();
687         return register_tracer(&function_trace);
688 }
689 core_initcall(init_function_trace);