ksoftirqd: Enable IRQs and call cond_resched() before poking RCU
[firefly-linux-kernel-4.4.55.git] / kernel / trace / trace_functions.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Based on code from the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 Nadia Yvette Chambers
11  */
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/fs.h>
17
18 #include "trace.h"
19
20 /* function tracing enabled */
21 static int                      ftrace_function_enabled;
22
23 static struct trace_array       *func_trace;
24
25 static void tracing_start_function_trace(void);
26 static void tracing_stop_function_trace(void);
27
28 static int function_trace_init(struct trace_array *tr)
29 {
30         func_trace = tr;
31         tr->trace_buffer.cpu = get_cpu();
32         put_cpu();
33
34         tracing_start_cmdline_record();
35         tracing_start_function_trace();
36         return 0;
37 }
38
39 static void function_trace_reset(struct trace_array *tr)
40 {
41         tracing_stop_function_trace();
42         tracing_stop_cmdline_record();
43 }
44
45 static void function_trace_start(struct trace_array *tr)
46 {
47         tracing_reset_online_cpus(&tr->trace_buffer);
48 }
49
50 /* Our option */
51 enum {
52         TRACE_FUNC_OPT_STACK    = 0x1,
53 };
54
55 static struct tracer_flags func_flags;
56
57 static void
58 function_trace_call(unsigned long ip, unsigned long parent_ip,
59                     struct ftrace_ops *op, struct pt_regs *pt_regs)
60 {
61         struct trace_array *tr = func_trace;
62         struct trace_array_cpu *data;
63         unsigned long flags;
64         int bit;
65         int cpu;
66         int pc;
67
68         if (unlikely(!ftrace_function_enabled))
69                 return;
70
71         pc = preempt_count();
72         preempt_disable_notrace();
73
74         bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
75         if (bit < 0)
76                 goto out;
77
78         cpu = smp_processor_id();
79         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
80         if (!atomic_read(&data->disabled)) {
81                 local_save_flags(flags);
82                 trace_function(tr, ip, parent_ip, flags, pc);
83         }
84         trace_clear_recursion(bit);
85
86  out:
87         preempt_enable_notrace();
88 }
89
90 static void
91 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
92                           struct ftrace_ops *op, struct pt_regs *pt_regs)
93 {
94         struct trace_array *tr = func_trace;
95         struct trace_array_cpu *data;
96         unsigned long flags;
97         long disabled;
98         int cpu;
99         int pc;
100
101         if (unlikely(!ftrace_function_enabled))
102                 return;
103
104         /*
105          * Need to use raw, since this must be called before the
106          * recursive protection is performed.
107          */
108         local_irq_save(flags);
109         cpu = raw_smp_processor_id();
110         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
111         disabled = atomic_inc_return(&data->disabled);
112
113         if (likely(disabled == 1)) {
114                 pc = preempt_count();
115                 trace_function(tr, ip, parent_ip, flags, pc);
116                 /*
117                  * skip over 5 funcs:
118                  *    __ftrace_trace_stack,
119                  *    __trace_stack,
120                  *    function_stack_trace_call
121                  *    ftrace_list_func
122                  *    ftrace_call
123                  */
124                 __trace_stack(tr, flags, 5, pc);
125         }
126
127         atomic_dec(&data->disabled);
128         local_irq_restore(flags);
129 }
130
131
132 static struct ftrace_ops trace_ops __read_mostly =
133 {
134         .func = function_trace_call,
135         .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
136 };
137
138 static struct ftrace_ops trace_stack_ops __read_mostly =
139 {
140         .func = function_stack_trace_call,
141         .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
142 };
143
144 static struct tracer_opt func_opts[] = {
145 #ifdef CONFIG_STACKTRACE
146         { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
147 #endif
148         { } /* Always set a last empty entry */
149 };
150
151 static struct tracer_flags func_flags = {
152         .val = 0, /* By default: all flags disabled */
153         .opts = func_opts
154 };
155
156 static void tracing_start_function_trace(void)
157 {
158         ftrace_function_enabled = 0;
159
160         if (func_flags.val & TRACE_FUNC_OPT_STACK)
161                 register_ftrace_function(&trace_stack_ops);
162         else
163                 register_ftrace_function(&trace_ops);
164
165         ftrace_function_enabled = 1;
166 }
167
168 static void tracing_stop_function_trace(void)
169 {
170         ftrace_function_enabled = 0;
171
172         if (func_flags.val & TRACE_FUNC_OPT_STACK)
173                 unregister_ftrace_function(&trace_stack_ops);
174         else
175                 unregister_ftrace_function(&trace_ops);
176 }
177
178 static int func_set_flag(u32 old_flags, u32 bit, int set)
179 {
180         switch (bit) {
181         case TRACE_FUNC_OPT_STACK:
182                 /* do nothing if already set */
183                 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
184                         break;
185
186                 if (set) {
187                         unregister_ftrace_function(&trace_ops);
188                         register_ftrace_function(&trace_stack_ops);
189                 } else {
190                         unregister_ftrace_function(&trace_stack_ops);
191                         register_ftrace_function(&trace_ops);
192                 }
193
194                 break;
195         default:
196                 return -EINVAL;
197         }
198
199         return 0;
200 }
201
202 static struct tracer function_trace __read_mostly =
203 {
204         .name           = "function",
205         .init           = function_trace_init,
206         .reset          = function_trace_reset,
207         .start          = function_trace_start,
208         .wait_pipe      = poll_wait_pipe,
209         .flags          = &func_flags,
210         .set_flag       = func_set_flag,
211 #ifdef CONFIG_FTRACE_SELFTEST
212         .selftest       = trace_selftest_startup_function,
213 #endif
214 };
215
216 #ifdef CONFIG_DYNAMIC_FTRACE
217 static int update_count(void **data)
218 {
219         unsigned long *count = (long *)data;
220
221         if (!*count)
222                 return 0;
223
224         if (*count != -1)
225                 (*count)--;
226
227         return 1;
228 }
229
230 static void
231 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
232 {
233         if (tracing_is_on())
234                 return;
235
236         if (update_count(data))
237                 tracing_on();
238 }
239
240 static void
241 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
242 {
243         if (!tracing_is_on())
244                 return;
245
246         if (update_count(data))
247                 tracing_off();
248 }
249
250 static void
251 ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
252 {
253         if (tracing_is_on())
254                 return;
255
256         tracing_on();
257 }
258
259 static void
260 ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
261 {
262         if (!tracing_is_on())
263                 return;
264
265         tracing_off();
266 }
267
268 /*
269  * Skip 4:
270  *   ftrace_stacktrace()
271  *   function_trace_probe_call()
272  *   ftrace_ops_list_func()
273  *   ftrace_call()
274  */
275 #define STACK_SKIP 4
276
277 static void
278 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
279 {
280         trace_dump_stack(STACK_SKIP);
281 }
282
283 static void
284 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
285 {
286         if (!tracing_is_on())
287                 return;
288
289         if (update_count(data))
290                 trace_dump_stack(STACK_SKIP);
291 }
292
293 static int
294 ftrace_probe_print(const char *name, struct seq_file *m,
295                    unsigned long ip, void *data)
296 {
297         long count = (long)data;
298
299         seq_printf(m, "%ps:%s", (void *)ip, name);
300
301         if (count == -1)
302                 seq_printf(m, ":unlimited\n");
303         else
304                 seq_printf(m, ":count=%ld\n", count);
305
306         return 0;
307 }
308
309 static int
310 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
311                          struct ftrace_probe_ops *ops, void *data)
312 {
313         return ftrace_probe_print("traceon", m, ip, data);
314 }
315
316 static int
317 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
318                          struct ftrace_probe_ops *ops, void *data)
319 {
320         return ftrace_probe_print("traceoff", m, ip, data);
321 }
322
323 static int
324 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
325                         struct ftrace_probe_ops *ops, void *data)
326 {
327         return ftrace_probe_print("stacktrace", m, ip, data);
328 }
329
330 static struct ftrace_probe_ops traceon_count_probe_ops = {
331         .func                   = ftrace_traceon_count,
332         .print                  = ftrace_traceon_print,
333 };
334
335 static struct ftrace_probe_ops traceoff_count_probe_ops = {
336         .func                   = ftrace_traceoff_count,
337         .print                  = ftrace_traceoff_print,
338 };
339
340 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
341         .func                   = ftrace_stacktrace_count,
342         .print                  = ftrace_stacktrace_print,
343 };
344
345 static struct ftrace_probe_ops traceon_probe_ops = {
346         .func                   = ftrace_traceon,
347         .print                  = ftrace_traceon_print,
348 };
349
350 static struct ftrace_probe_ops traceoff_probe_ops = {
351         .func                   = ftrace_traceoff,
352         .print                  = ftrace_traceoff_print,
353 };
354
355 static struct ftrace_probe_ops stacktrace_probe_ops = {
356         .func                   = ftrace_stacktrace,
357         .print                  = ftrace_stacktrace_print,
358 };
359
360 static int
361 ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
362                             struct ftrace_hash *hash, char *glob,
363                             char *cmd, char *param, int enable)
364 {
365         void *count = (void *)-1;
366         char *number;
367         int ret;
368
369         /* hash funcs only work with set_ftrace_filter */
370         if (!enable)
371                 return -EINVAL;
372
373         if (glob[0] == '!') {
374                 unregister_ftrace_function_probe_func(glob+1, ops);
375                 return 0;
376         }
377
378         if (!param)
379                 goto out_reg;
380
381         number = strsep(&param, ":");
382
383         if (!strlen(number))
384                 goto out_reg;
385
386         /*
387          * We use the callback data field (which is a pointer)
388          * as our counter.
389          */
390         ret = kstrtoul(number, 0, (unsigned long *)&count);
391         if (ret)
392                 return ret;
393
394  out_reg:
395         ret = register_ftrace_function_probe(glob, ops, count);
396
397         return ret < 0 ? ret : 0;
398 }
399
400 static int
401 ftrace_trace_onoff_callback(struct ftrace_hash *hash,
402                             char *glob, char *cmd, char *param, int enable)
403 {
404         struct ftrace_probe_ops *ops;
405
406         /* we register both traceon and traceoff to this callback */
407         if (strcmp(cmd, "traceon") == 0)
408                 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
409         else
410                 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
411
412         return ftrace_trace_probe_callback(ops, hash, glob, cmd,
413                                            param, enable);
414 }
415
416 static int
417 ftrace_stacktrace_callback(struct ftrace_hash *hash,
418                            char *glob, char *cmd, char *param, int enable)
419 {
420         struct ftrace_probe_ops *ops;
421
422         ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
423
424         return ftrace_trace_probe_callback(ops, hash, glob, cmd,
425                                            param, enable);
426 }
427
428 static struct ftrace_func_command ftrace_traceon_cmd = {
429         .name                   = "traceon",
430         .func                   = ftrace_trace_onoff_callback,
431 };
432
433 static struct ftrace_func_command ftrace_traceoff_cmd = {
434         .name                   = "traceoff",
435         .func                   = ftrace_trace_onoff_callback,
436 };
437
438 static struct ftrace_func_command ftrace_stacktrace_cmd = {
439         .name                   = "stacktrace",
440         .func                   = ftrace_stacktrace_callback,
441 };
442
443 static int __init init_func_cmd_traceon(void)
444 {
445         int ret;
446
447         ret = register_ftrace_command(&ftrace_traceoff_cmd);
448         if (ret)
449                 return ret;
450
451         ret = register_ftrace_command(&ftrace_traceon_cmd);
452         if (ret)
453                 unregister_ftrace_command(&ftrace_traceoff_cmd);
454
455         ret = register_ftrace_command(&ftrace_stacktrace_cmd);
456         if (ret) {
457                 unregister_ftrace_command(&ftrace_traceoff_cmd);
458                 unregister_ftrace_command(&ftrace_traceon_cmd);
459         }
460         return ret;
461 }
462 #else
463 static inline int init_func_cmd_traceon(void)
464 {
465         return 0;
466 }
467 #endif /* CONFIG_DYNAMIC_FTRACE */
468
469 static __init int init_function_trace(void)
470 {
471         init_func_cmd_traceon();
472         return register_tracer(&function_trace);
473 }
474 core_initcall(init_function_trace);