Merge branch 'linux-linaro-lsk' into linux-linaro-lsk-android
[firefly-linux-kernel-4.4.55.git] / kernel / trace / trace_functions_graph.c
1 /*
2  *
3  * Function graph tracer.
4  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  */
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14
15 #include "trace.h"
16 #include "trace_output.h"
17
18 /* When set, irq functions will be ignored */
19 static int ftrace_graph_skip_irqs;
20
21 struct fgraph_cpu_data {
22         pid_t           last_pid;
23         int             depth;
24         int             depth_irq;
25         int             ignore;
26         unsigned long   enter_funcs[FTRACE_RETFUNC_DEPTH];
27 };
28
29 struct fgraph_data {
30         struct fgraph_cpu_data __percpu *cpu_data;
31
32         /* Place to preserve last processed entry. */
33         struct ftrace_graph_ent_entry   ent;
34         struct ftrace_graph_ret_entry   ret;
35         int                             failed;
36         int                             cpu;
37 };
38
39 #define TRACE_GRAPH_INDENT      2
40
41 /* Flag options */
42 #define TRACE_GRAPH_PRINT_OVERRUN       0x1
43 #define TRACE_GRAPH_PRINT_CPU           0x2
44 #define TRACE_GRAPH_PRINT_OVERHEAD      0x4
45 #define TRACE_GRAPH_PRINT_PROC          0x8
46 #define TRACE_GRAPH_PRINT_DURATION      0x10
47 #define TRACE_GRAPH_PRINT_ABS_TIME      0x20
48 #define TRACE_GRAPH_PRINT_IRQS          0x40
49 #define TRACE_GRAPH_PRINT_FLAT          0x80
50
51
52 static unsigned int max_depth;
53
54 static struct tracer_opt trace_opts[] = {
55         /* Display overruns? (for self-debug purpose) */
56         { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
57         /* Display CPU ? */
58         { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
59         /* Display Overhead ? */
60         { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
61         /* Display proc name/pid */
62         { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
63         /* Display duration of execution */
64         { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
65         /* Display absolute time of an entry */
66         { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
67         /* Display interrupts */
68         { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
69         /* Use standard trace formatting rather than hierarchical */
70         { TRACER_OPT(funcgraph-flat, TRACE_GRAPH_PRINT_FLAT) },
71         { } /* Empty entry */
72 };
73
74 static struct tracer_flags tracer_flags = {
75         /* Don't display overruns and proc by default */
76         .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
77                TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
78         .opts = trace_opts
79 };
80
81 static struct trace_array *graph_array;
82
83 /*
84  * DURATION column is being also used to display IRQ signs,
85  * following values are used by print_graph_irq and others
86  * to fill in space into DURATION column.
87  */
88 enum {
89         DURATION_FILL_FULL  = -1,
90         DURATION_FILL_START = -2,
91         DURATION_FILL_END   = -3,
92 };
93
94 static enum print_line_t
95 print_graph_duration(unsigned long long duration, struct trace_seq *s,
96                      u32 flags);
97
98 /* Add a function return address to the trace stack on thread info.*/
99 int
100 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
101                          unsigned long frame_pointer)
102 {
103         unsigned long long calltime;
104         int index;
105
106         if (!current->ret_stack)
107                 return -EBUSY;
108
109         /*
110          * We must make sure the ret_stack is tested before we read
111          * anything else.
112          */
113         smp_rmb();
114
115         /* The return trace stack is full */
116         if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
117                 atomic_inc(&current->trace_overrun);
118                 return -EBUSY;
119         }
120
121         calltime = trace_clock_local();
122
123         index = ++current->curr_ret_stack;
124         barrier();
125         current->ret_stack[index].ret = ret;
126         current->ret_stack[index].func = func;
127         current->ret_stack[index].calltime = calltime;
128         current->ret_stack[index].subtime = 0;
129         current->ret_stack[index].fp = frame_pointer;
130         *depth = index;
131
132         return 0;
133 }
134
135 /* Retrieve a function return address to the trace stack on thread info.*/
136 static void
137 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
138                         unsigned long frame_pointer)
139 {
140         int index;
141
142         index = current->curr_ret_stack;
143
144         if (unlikely(index < 0)) {
145                 ftrace_graph_stop();
146                 WARN_ON(1);
147                 /* Might as well panic, otherwise we have no where to go */
148                 *ret = (unsigned long)panic;
149                 return;
150         }
151
152 #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
153         /*
154          * The arch may choose to record the frame pointer used
155          * and check it here to make sure that it is what we expect it
156          * to be. If gcc does not set the place holder of the return
157          * address in the frame pointer, and does a copy instead, then
158          * the function graph trace will fail. This test detects this
159          * case.
160          *
161          * Currently, x86_32 with optimize for size (-Os) makes the latest
162          * gcc do the above.
163          *
164          * Note, -mfentry does not use frame pointers, and this test
165          *  is not needed if CC_USING_FENTRY is set.
166          */
167         if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
168                 ftrace_graph_stop();
169                 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
170                      "  from func %ps return to %lx\n",
171                      current->ret_stack[index].fp,
172                      frame_pointer,
173                      (void *)current->ret_stack[index].func,
174                      current->ret_stack[index].ret);
175                 *ret = (unsigned long)panic;
176                 return;
177         }
178 #endif
179
180         *ret = current->ret_stack[index].ret;
181         trace->func = current->ret_stack[index].func;
182         trace->calltime = current->ret_stack[index].calltime;
183         trace->overrun = atomic_read(&current->trace_overrun);
184         trace->depth = index;
185 }
186
187 /*
188  * Send the trace to the ring-buffer.
189  * @return the original return address.
190  */
191 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
192 {
193         struct ftrace_graph_ret trace;
194         unsigned long ret;
195
196         ftrace_pop_return_trace(&trace, &ret, frame_pointer);
197         trace.rettime = trace_clock_local();
198         barrier();
199         current->curr_ret_stack--;
200
201         /*
202          * The trace should run after decrementing the ret counter
203          * in case an interrupt were to come in. We don't want to
204          * lose the interrupt if max_depth is set.
205          */
206         ftrace_graph_return(&trace);
207
208         if (unlikely(!ret)) {
209                 ftrace_graph_stop();
210                 WARN_ON(1);
211                 /* Might as well panic. What else to do? */
212                 ret = (unsigned long)panic;
213         }
214
215         return ret;
216 }
217
218 int __trace_graph_entry(struct trace_array *tr,
219                                 struct ftrace_graph_ent *trace,
220                                 unsigned long flags,
221                                 int pc)
222 {
223         struct ftrace_event_call *call = &event_funcgraph_entry;
224         struct ring_buffer_event *event;
225         struct ring_buffer *buffer = tr->trace_buffer.buffer;
226         struct ftrace_graph_ent_entry *entry;
227
228         if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
229                 return 0;
230
231         event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
232                                           sizeof(*entry), flags, pc);
233         if (!event)
234                 return 0;
235         entry   = ring_buffer_event_data(event);
236         entry->graph_ent                        = *trace;
237         if (!filter_current_check_discard(buffer, call, entry, event))
238                 __buffer_unlock_commit(buffer, event);
239
240         return 1;
241 }
242
243 static inline int ftrace_graph_ignore_irqs(void)
244 {
245         if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
246                 return 0;
247
248         return in_irq();
249 }
250
251 int trace_graph_entry(struct ftrace_graph_ent *trace)
252 {
253         struct trace_array *tr = graph_array;
254         struct trace_array_cpu *data;
255         unsigned long flags;
256         long disabled;
257         int ret;
258         int cpu;
259         int pc;
260
261         if (!ftrace_trace_task(current))
262                 return 0;
263
264         /* trace it when it is-nested-in or is a function enabled. */
265         if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
266              ftrace_graph_ignore_irqs()) ||
267             (max_depth && trace->depth >= max_depth))
268                 return 0;
269
270         local_irq_save(flags);
271         cpu = raw_smp_processor_id();
272         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
273         disabled = atomic_inc_return(&data->disabled);
274         if (likely(disabled == 1)) {
275                 pc = preempt_count();
276                 ret = __trace_graph_entry(tr, trace, flags, pc);
277         } else {
278                 ret = 0;
279         }
280
281         atomic_dec(&data->disabled);
282         local_irq_restore(flags);
283
284         return ret;
285 }
286
287 int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
288 {
289         if (tracing_thresh)
290                 return 1;
291         else
292                 return trace_graph_entry(trace);
293 }
294
295 static void
296 __trace_graph_function(struct trace_array *tr,
297                 unsigned long ip, unsigned long flags, int pc)
298 {
299         u64 time = trace_clock_local();
300         struct ftrace_graph_ent ent = {
301                 .func  = ip,
302                 .depth = 0,
303         };
304         struct ftrace_graph_ret ret = {
305                 .func     = ip,
306                 .depth    = 0,
307                 .calltime = time,
308                 .rettime  = time,
309         };
310
311         __trace_graph_entry(tr, &ent, flags, pc);
312         __trace_graph_return(tr, &ret, flags, pc);
313 }
314
315 void
316 trace_graph_function(struct trace_array *tr,
317                 unsigned long ip, unsigned long parent_ip,
318                 unsigned long flags, int pc)
319 {
320         __trace_graph_function(tr, ip, flags, pc);
321 }
322
323 void __trace_graph_return(struct trace_array *tr,
324                                 struct ftrace_graph_ret *trace,
325                                 unsigned long flags,
326                                 int pc)
327 {
328         struct ftrace_event_call *call = &event_funcgraph_exit;
329         struct ring_buffer_event *event;
330         struct ring_buffer *buffer = tr->trace_buffer.buffer;
331         struct ftrace_graph_ret_entry *entry;
332
333         if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
334                 return;
335
336         event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
337                                           sizeof(*entry), flags, pc);
338         if (!event)
339                 return;
340         entry   = ring_buffer_event_data(event);
341         entry->ret                              = *trace;
342         if (!filter_current_check_discard(buffer, call, entry, event))
343                 __buffer_unlock_commit(buffer, event);
344 }
345
346 void trace_graph_return(struct ftrace_graph_ret *trace)
347 {
348         struct trace_array *tr = graph_array;
349         struct trace_array_cpu *data;
350         unsigned long flags;
351         long disabled;
352         int cpu;
353         int pc;
354
355         local_irq_save(flags);
356         cpu = raw_smp_processor_id();
357         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
358         disabled = atomic_inc_return(&data->disabled);
359         if (likely(disabled == 1)) {
360                 pc = preempt_count();
361                 __trace_graph_return(tr, trace, flags, pc);
362         }
363         atomic_dec(&data->disabled);
364         local_irq_restore(flags);
365 }
366
367 void set_graph_array(struct trace_array *tr)
368 {
369         graph_array = tr;
370
371         /* Make graph_array visible before we start tracing */
372
373         smp_mb();
374 }
375
376 void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
377 {
378         if (tracing_thresh &&
379             (trace->rettime - trace->calltime < tracing_thresh))
380                 return;
381         else
382                 trace_graph_return(trace);
383 }
384
385 static int graph_trace_init(struct trace_array *tr)
386 {
387         int ret;
388
389         set_graph_array(tr);
390         if (tracing_thresh)
391                 ret = register_ftrace_graph(&trace_graph_thresh_return,
392                                             &trace_graph_thresh_entry);
393         else
394                 ret = register_ftrace_graph(&trace_graph_return,
395                                             &trace_graph_entry);
396         if (ret)
397                 return ret;
398         tracing_start_cmdline_record();
399
400         return 0;
401 }
402
403 static void graph_trace_reset(struct trace_array *tr)
404 {
405         tracing_stop_cmdline_record();
406         unregister_ftrace_graph();
407 }
408
409 static int max_bytes_for_cpu;
410
411 static enum print_line_t
412 print_graph_cpu(struct trace_seq *s, int cpu)
413 {
414         int ret;
415
416         /*
417          * Start with a space character - to make it stand out
418          * to the right a bit when trace output is pasted into
419          * email:
420          */
421         ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
422         if (!ret)
423                 return TRACE_TYPE_PARTIAL_LINE;
424
425         return TRACE_TYPE_HANDLED;
426 }
427
428 #define TRACE_GRAPH_PROCINFO_LENGTH     14
429
430 static enum print_line_t
431 print_graph_proc(struct trace_seq *s, pid_t pid)
432 {
433         char comm[TASK_COMM_LEN];
434         /* sign + log10(MAX_INT) + '\0' */
435         char pid_str[11];
436         int spaces = 0;
437         int ret;
438         int len;
439         int i;
440
441         trace_find_cmdline(pid, comm);
442         comm[7] = '\0';
443         sprintf(pid_str, "%d", pid);
444
445         /* 1 stands for the "-" character */
446         len = strlen(comm) + strlen(pid_str) + 1;
447
448         if (len < TRACE_GRAPH_PROCINFO_LENGTH)
449                 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
450
451         /* First spaces to align center */
452         for (i = 0; i < spaces / 2; i++) {
453                 ret = trace_seq_printf(s, " ");
454                 if (!ret)
455                         return TRACE_TYPE_PARTIAL_LINE;
456         }
457
458         ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
459         if (!ret)
460                 return TRACE_TYPE_PARTIAL_LINE;
461
462         /* Last spaces to align center */
463         for (i = 0; i < spaces - (spaces / 2); i++) {
464                 ret = trace_seq_printf(s, " ");
465                 if (!ret)
466                         return TRACE_TYPE_PARTIAL_LINE;
467         }
468         return TRACE_TYPE_HANDLED;
469 }
470
471
472 static enum print_line_t
473 print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
474 {
475         if (!trace_seq_putc(s, ' '))
476                 return 0;
477
478         return trace_print_lat_fmt(s, entry);
479 }
480
481 /* If the pid changed since the last trace, output this event */
482 static enum print_line_t
483 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
484 {
485         pid_t prev_pid;
486         pid_t *last_pid;
487         int ret;
488
489         if (!data)
490                 return TRACE_TYPE_HANDLED;
491
492         last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
493
494         if (*last_pid == pid)
495                 return TRACE_TYPE_HANDLED;
496
497         prev_pid = *last_pid;
498         *last_pid = pid;
499
500         if (prev_pid == -1)
501                 return TRACE_TYPE_HANDLED;
502 /*
503  * Context-switch trace line:
504
505  ------------------------------------------
506  | 1)  migration/0--1  =>  sshd-1755
507  ------------------------------------------
508
509  */
510         ret = trace_seq_printf(s,
511                 " ------------------------------------------\n");
512         if (!ret)
513                 return TRACE_TYPE_PARTIAL_LINE;
514
515         ret = print_graph_cpu(s, cpu);
516         if (ret == TRACE_TYPE_PARTIAL_LINE)
517                 return TRACE_TYPE_PARTIAL_LINE;
518
519         ret = print_graph_proc(s, prev_pid);
520         if (ret == TRACE_TYPE_PARTIAL_LINE)
521                 return TRACE_TYPE_PARTIAL_LINE;
522
523         ret = trace_seq_printf(s, " => ");
524         if (!ret)
525                 return TRACE_TYPE_PARTIAL_LINE;
526
527         ret = print_graph_proc(s, pid);
528         if (ret == TRACE_TYPE_PARTIAL_LINE)
529                 return TRACE_TYPE_PARTIAL_LINE;
530
531         ret = trace_seq_printf(s,
532                 "\n ------------------------------------------\n\n");
533         if (!ret)
534                 return TRACE_TYPE_PARTIAL_LINE;
535
536         return TRACE_TYPE_HANDLED;
537 }
538
539 static struct ftrace_graph_ret_entry *
540 get_return_for_leaf(struct trace_iterator *iter,
541                 struct ftrace_graph_ent_entry *curr)
542 {
543         struct fgraph_data *data = iter->private;
544         struct ring_buffer_iter *ring_iter = NULL;
545         struct ring_buffer_event *event;
546         struct ftrace_graph_ret_entry *next;
547
548         /*
549          * If the previous output failed to write to the seq buffer,
550          * then we just reuse the data from before.
551          */
552         if (data && data->failed) {
553                 curr = &data->ent;
554                 next = &data->ret;
555         } else {
556
557                 ring_iter = trace_buffer_iter(iter, iter->cpu);
558
559                 /* First peek to compare current entry and the next one */
560                 if (ring_iter)
561                         event = ring_buffer_iter_peek(ring_iter, NULL);
562                 else {
563                         /*
564                          * We need to consume the current entry to see
565                          * the next one.
566                          */
567                         ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
568                                             NULL, NULL);
569                         event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
570                                                  NULL, NULL);
571                 }
572
573                 if (!event)
574                         return NULL;
575
576                 next = ring_buffer_event_data(event);
577
578                 if (data) {
579                         /*
580                          * Save current and next entries for later reference
581                          * if the output fails.
582                          */
583                         data->ent = *curr;
584                         /*
585                          * If the next event is not a return type, then
586                          * we only care about what type it is. Otherwise we can
587                          * safely copy the entire event.
588                          */
589                         if (next->ent.type == TRACE_GRAPH_RET)
590                                 data->ret = *next;
591                         else
592                                 data->ret.ent.type = next->ent.type;
593                 }
594         }
595
596         if (next->ent.type != TRACE_GRAPH_RET)
597                 return NULL;
598
599         if (curr->ent.pid != next->ent.pid ||
600                         curr->graph_ent.func != next->ret.func)
601                 return NULL;
602
603         /* this is a leaf, now advance the iterator */
604         if (ring_iter)
605                 ring_buffer_read(ring_iter, NULL);
606
607         return next;
608 }
609
610 static int print_graph_abs_time(u64 t, struct trace_seq *s)
611 {
612         unsigned long usecs_rem;
613
614         usecs_rem = do_div(t, NSEC_PER_SEC);
615         usecs_rem /= 1000;
616
617         return trace_seq_printf(s, "%5lu.%06lu |  ",
618                         (unsigned long)t, usecs_rem);
619 }
620
621 static enum print_line_t
622 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
623                 enum trace_type type, int cpu, pid_t pid, u32 flags)
624 {
625         int ret;
626         struct trace_seq *s = &iter->seq;
627
628         if (addr < (unsigned long)__irqentry_text_start ||
629                 addr >= (unsigned long)__irqentry_text_end)
630                 return TRACE_TYPE_UNHANDLED;
631
632         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
633                 /* Absolute time */
634                 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
635                         ret = print_graph_abs_time(iter->ts, s);
636                         if (!ret)
637                                 return TRACE_TYPE_PARTIAL_LINE;
638                 }
639
640                 /* Cpu */
641                 if (flags & TRACE_GRAPH_PRINT_CPU) {
642                         ret = print_graph_cpu(s, cpu);
643                         if (ret == TRACE_TYPE_PARTIAL_LINE)
644                                 return TRACE_TYPE_PARTIAL_LINE;
645                 }
646
647                 /* Proc */
648                 if (flags & TRACE_GRAPH_PRINT_PROC) {
649                         ret = print_graph_proc(s, pid);
650                         if (ret == TRACE_TYPE_PARTIAL_LINE)
651                                 return TRACE_TYPE_PARTIAL_LINE;
652                         ret = trace_seq_printf(s, " | ");
653                         if (!ret)
654                                 return TRACE_TYPE_PARTIAL_LINE;
655                 }
656         }
657
658         /* No overhead */
659         ret = print_graph_duration(DURATION_FILL_START, s, flags);
660         if (ret != TRACE_TYPE_HANDLED)
661                 return ret;
662
663         if (type == TRACE_GRAPH_ENT)
664                 ret = trace_seq_printf(s, "==========>");
665         else
666                 ret = trace_seq_printf(s, "<==========");
667
668         if (!ret)
669                 return TRACE_TYPE_PARTIAL_LINE;
670
671         ret = print_graph_duration(DURATION_FILL_END, s, flags);
672         if (ret != TRACE_TYPE_HANDLED)
673                 return ret;
674
675         ret = trace_seq_printf(s, "\n");
676
677         if (!ret)
678                 return TRACE_TYPE_PARTIAL_LINE;
679         return TRACE_TYPE_HANDLED;
680 }
681
682 enum print_line_t
683 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
684 {
685         unsigned long nsecs_rem = do_div(duration, 1000);
686         /* log10(ULONG_MAX) + '\0' */
687         char msecs_str[21];
688         char nsecs_str[5];
689         int ret, len;
690         int i;
691
692         sprintf(msecs_str, "%lu", (unsigned long) duration);
693
694         /* Print msecs */
695         ret = trace_seq_printf(s, "%s", msecs_str);
696         if (!ret)
697                 return TRACE_TYPE_PARTIAL_LINE;
698
699         len = strlen(msecs_str);
700
701         /* Print nsecs (we don't want to exceed 7 numbers) */
702         if (len < 7) {
703                 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
704
705                 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
706                 ret = trace_seq_printf(s, ".%s", nsecs_str);
707                 if (!ret)
708                         return TRACE_TYPE_PARTIAL_LINE;
709                 len += strlen(nsecs_str);
710         }
711
712         ret = trace_seq_printf(s, " us ");
713         if (!ret)
714                 return TRACE_TYPE_PARTIAL_LINE;
715
716         /* Print remaining spaces to fit the row's width */
717         for (i = len; i < 7; i++) {
718                 ret = trace_seq_printf(s, " ");
719                 if (!ret)
720                         return TRACE_TYPE_PARTIAL_LINE;
721         }
722         return TRACE_TYPE_HANDLED;
723 }
724
725 static enum print_line_t
726 print_graph_duration(unsigned long long duration, struct trace_seq *s,
727                      u32 flags)
728 {
729         int ret = -1;
730
731         if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
732             !(trace_flags & TRACE_ITER_CONTEXT_INFO))
733                         return TRACE_TYPE_HANDLED;
734
735         /* No real adata, just filling the column with spaces */
736         switch (duration) {
737         case DURATION_FILL_FULL:
738                 ret = trace_seq_printf(s, "              |  ");
739                 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
740         case DURATION_FILL_START:
741                 ret = trace_seq_printf(s, "  ");
742                 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
743         case DURATION_FILL_END:
744                 ret = trace_seq_printf(s, " |");
745                 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
746         }
747
748         /* Signal a overhead of time execution to the output */
749         if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
750                 /* Duration exceeded 100 msecs */
751                 if (duration > 100000ULL)
752                         ret = trace_seq_printf(s, "! ");
753                 /* Duration exceeded 10 msecs */
754                 else if (duration > 10000ULL)
755                         ret = trace_seq_printf(s, "+ ");
756         }
757
758         /*
759          * The -1 means we either did not exceed the duration tresholds
760          * or we dont want to print out the overhead. Either way we need
761          * to fill out the space.
762          */
763         if (ret == -1)
764                 ret = trace_seq_printf(s, "  ");
765
766         /* Catching here any failure happenned above */
767         if (!ret)
768                 return TRACE_TYPE_PARTIAL_LINE;
769
770         ret = trace_print_graph_duration(duration, s);
771         if (ret != TRACE_TYPE_HANDLED)
772                 return ret;
773
774         ret = trace_seq_printf(s, "|  ");
775         if (!ret)
776                 return TRACE_TYPE_PARTIAL_LINE;
777
778         return TRACE_TYPE_HANDLED;
779 }
780
781 /* Case of a leaf function on its call entry */
782 static enum print_line_t
783 print_graph_entry_leaf(struct trace_iterator *iter,
784                 struct ftrace_graph_ent_entry *entry,
785                 struct ftrace_graph_ret_entry *ret_entry,
786                 struct trace_seq *s, u32 flags)
787 {
788         struct fgraph_data *data = iter->private;
789         struct ftrace_graph_ret *graph_ret;
790         struct ftrace_graph_ent *call;
791         unsigned long long duration;
792         int ret;
793         int i;
794
795         graph_ret = &ret_entry->ret;
796         call = &entry->graph_ent;
797         duration = graph_ret->rettime - graph_ret->calltime;
798
799         if (data) {
800                 struct fgraph_cpu_data *cpu_data;
801                 int cpu = iter->cpu;
802
803                 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
804
805                 /*
806                  * Comments display at + 1 to depth. Since
807                  * this is a leaf function, keep the comments
808                  * equal to this depth.
809                  */
810                 cpu_data->depth = call->depth - 1;
811
812                 /* No need to keep this function around for this depth */
813                 if (call->depth < FTRACE_RETFUNC_DEPTH)
814                         cpu_data->enter_funcs[call->depth] = 0;
815         }
816
817         /* Overhead and duration */
818         ret = print_graph_duration(duration, s, flags);
819         if (ret == TRACE_TYPE_PARTIAL_LINE)
820                 return TRACE_TYPE_PARTIAL_LINE;
821
822         /* Function */
823         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
824                 ret = trace_seq_printf(s, " ");
825                 if (!ret)
826                         return TRACE_TYPE_PARTIAL_LINE;
827         }
828
829         ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
830         if (!ret)
831                 return TRACE_TYPE_PARTIAL_LINE;
832
833         return TRACE_TYPE_HANDLED;
834 }
835
836 static enum print_line_t
837 print_graph_entry_nested(struct trace_iterator *iter,
838                          struct ftrace_graph_ent_entry *entry,
839                          struct trace_seq *s, int cpu, u32 flags)
840 {
841         struct ftrace_graph_ent *call = &entry->graph_ent;
842         struct fgraph_data *data = iter->private;
843         int ret;
844         int i;
845
846         if (data) {
847                 struct fgraph_cpu_data *cpu_data;
848                 int cpu = iter->cpu;
849
850                 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
851                 cpu_data->depth = call->depth;
852
853                 /* Save this function pointer to see if the exit matches */
854                 if (call->depth < FTRACE_RETFUNC_DEPTH)
855                         cpu_data->enter_funcs[call->depth] = call->func;
856         }
857
858         /* No time */
859         ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
860         if (ret != TRACE_TYPE_HANDLED)
861                 return ret;
862
863         /* Function */
864         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
865                 ret = trace_seq_printf(s, " ");
866                 if (!ret)
867                         return TRACE_TYPE_PARTIAL_LINE;
868         }
869
870         ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
871         if (!ret)
872                 return TRACE_TYPE_PARTIAL_LINE;
873
874         /*
875          * we already consumed the current entry to check the next one
876          * and see if this is a leaf.
877          */
878         return TRACE_TYPE_NO_CONSUME;
879 }
880
881 static enum print_line_t
882 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
883                      int type, unsigned long addr, u32 flags)
884 {
885         struct fgraph_data *data = iter->private;
886         struct trace_entry *ent = iter->ent;
887         int cpu = iter->cpu;
888         int ret;
889
890         /* Pid */
891         if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
892                 return TRACE_TYPE_PARTIAL_LINE;
893
894         if (type) {
895                 /* Interrupt */
896                 ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
897                 if (ret == TRACE_TYPE_PARTIAL_LINE)
898                         return TRACE_TYPE_PARTIAL_LINE;
899         }
900
901         if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
902                 return 0;
903
904         /* Absolute time */
905         if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
906                 ret = print_graph_abs_time(iter->ts, s);
907                 if (!ret)
908                         return TRACE_TYPE_PARTIAL_LINE;
909         }
910
911         /* Cpu */
912         if (flags & TRACE_GRAPH_PRINT_CPU) {
913                 ret = print_graph_cpu(s, cpu);
914                 if (ret == TRACE_TYPE_PARTIAL_LINE)
915                         return TRACE_TYPE_PARTIAL_LINE;
916         }
917
918         /* Proc */
919         if (flags & TRACE_GRAPH_PRINT_PROC) {
920                 ret = print_graph_proc(s, ent->pid);
921                 if (ret == TRACE_TYPE_PARTIAL_LINE)
922                         return TRACE_TYPE_PARTIAL_LINE;
923
924                 ret = trace_seq_printf(s, " | ");
925                 if (!ret)
926                         return TRACE_TYPE_PARTIAL_LINE;
927         }
928
929         /* Latency format */
930         if (trace_flags & TRACE_ITER_LATENCY_FMT) {
931                 ret = print_graph_lat_fmt(s, ent);
932                 if (ret == TRACE_TYPE_PARTIAL_LINE)
933                         return TRACE_TYPE_PARTIAL_LINE;
934         }
935
936         return 0;
937 }
938
939 /*
940  * Entry check for irq code
941  *
942  * returns 1 if
943  *  - we are inside irq code
944  *  - we just entered irq code
945  *
946  * retunns 0 if
947  *  - funcgraph-interrupts option is set
948  *  - we are not inside irq code
949  */
950 static int
951 check_irq_entry(struct trace_iterator *iter, u32 flags,
952                 unsigned long addr, int depth)
953 {
954         int cpu = iter->cpu;
955         int *depth_irq;
956         struct fgraph_data *data = iter->private;
957
958         /*
959          * If we are either displaying irqs, or we got called as
960          * a graph event and private data does not exist,
961          * then we bypass the irq check.
962          */
963         if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
964             (!data))
965                 return 0;
966
967         depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
968
969         /*
970          * We are inside the irq code
971          */
972         if (*depth_irq >= 0)
973                 return 1;
974
975         if ((addr < (unsigned long)__irqentry_text_start) ||
976             (addr >= (unsigned long)__irqentry_text_end))
977                 return 0;
978
979         /*
980          * We are entering irq code.
981          */
982         *depth_irq = depth;
983         return 1;
984 }
985
986 /*
987  * Return check for irq code
988  *
989  * returns 1 if
990  *  - we are inside irq code
991  *  - we just left irq code
992  *
993  * returns 0 if
994  *  - funcgraph-interrupts option is set
995  *  - we are not inside irq code
996  */
997 static int
998 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
999 {
1000         int cpu = iter->cpu;
1001         int *depth_irq;
1002         struct fgraph_data *data = iter->private;
1003
1004         /*
1005          * If we are either displaying irqs, or we got called as
1006          * a graph event and private data does not exist,
1007          * then we bypass the irq check.
1008          */
1009         if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1010             (!data))
1011                 return 0;
1012
1013         depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1014
1015         /*
1016          * We are not inside the irq code.
1017          */
1018         if (*depth_irq == -1)
1019                 return 0;
1020
1021         /*
1022          * We are inside the irq code, and this is returning entry.
1023          * Let's not trace it and clear the entry depth, since
1024          * we are out of irq code.
1025          *
1026          * This condition ensures that we 'leave the irq code' once
1027          * we are out of the entry depth. Thus protecting us from
1028          * the RETURN entry loss.
1029          */
1030         if (*depth_irq >= depth) {
1031                 *depth_irq = -1;
1032                 return 1;
1033         }
1034
1035         /*
1036          * We are inside the irq code, and this is not the entry.
1037          */
1038         return 1;
1039 }
1040
1041 static enum print_line_t
1042 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1043                         struct trace_iterator *iter, u32 flags)
1044 {
1045         struct fgraph_data *data = iter->private;
1046         struct ftrace_graph_ent *call = &field->graph_ent;
1047         struct ftrace_graph_ret_entry *leaf_ret;
1048         static enum print_line_t ret;
1049         int cpu = iter->cpu;
1050
1051         if (check_irq_entry(iter, flags, call->func, call->depth))
1052                 return TRACE_TYPE_HANDLED;
1053
1054         if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
1055                 return TRACE_TYPE_PARTIAL_LINE;
1056
1057         leaf_ret = get_return_for_leaf(iter, field);
1058         if (leaf_ret)
1059                 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1060         else
1061                 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1062
1063         if (data) {
1064                 /*
1065                  * If we failed to write our output, then we need to make
1066                  * note of it. Because we already consumed our entry.
1067                  */
1068                 if (s->full) {
1069                         data->failed = 1;
1070                         data->cpu = cpu;
1071                 } else
1072                         data->failed = 0;
1073         }
1074
1075         return ret;
1076 }
1077
1078 static enum print_line_t
1079 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1080                    struct trace_entry *ent, struct trace_iterator *iter,
1081                    u32 flags)
1082 {
1083         unsigned long long duration = trace->rettime - trace->calltime;
1084         struct fgraph_data *data = iter->private;
1085         pid_t pid = ent->pid;
1086         int cpu = iter->cpu;
1087         int func_match = 1;
1088         int ret;
1089         int i;
1090
1091         if (check_irq_return(iter, flags, trace->depth))
1092                 return TRACE_TYPE_HANDLED;
1093
1094         if (data) {
1095                 struct fgraph_cpu_data *cpu_data;
1096                 int cpu = iter->cpu;
1097
1098                 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1099
1100                 /*
1101                  * Comments display at + 1 to depth. This is the
1102                  * return from a function, we now want the comments
1103                  * to display at the same level of the bracket.
1104                  */
1105                 cpu_data->depth = trace->depth - 1;
1106
1107                 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
1108                         if (cpu_data->enter_funcs[trace->depth] != trace->func)
1109                                 func_match = 0;
1110                         cpu_data->enter_funcs[trace->depth] = 0;
1111                 }
1112         }
1113
1114         if (print_graph_prologue(iter, s, 0, 0, flags))
1115                 return TRACE_TYPE_PARTIAL_LINE;
1116
1117         /* Overhead and duration */
1118         ret = print_graph_duration(duration, s, flags);
1119         if (ret == TRACE_TYPE_PARTIAL_LINE)
1120                 return TRACE_TYPE_PARTIAL_LINE;
1121
1122         /* Closing brace */
1123         for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
1124                 ret = trace_seq_printf(s, " ");
1125                 if (!ret)
1126                         return TRACE_TYPE_PARTIAL_LINE;
1127         }
1128
1129         /*
1130          * If the return function does not have a matching entry,
1131          * then the entry was lost. Instead of just printing
1132          * the '}' and letting the user guess what function this
1133          * belongs to, write out the function name.
1134          */
1135         if (func_match) {
1136                 ret = trace_seq_printf(s, "}\n");
1137                 if (!ret)
1138                         return TRACE_TYPE_PARTIAL_LINE;
1139         } else {
1140                 ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1141                 if (!ret)
1142                         return TRACE_TYPE_PARTIAL_LINE;
1143         }
1144
1145         /* Overrun */
1146         if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
1147                 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
1148                                         trace->overrun);
1149                 if (!ret)
1150                         return TRACE_TYPE_PARTIAL_LINE;
1151         }
1152
1153         ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1154                               cpu, pid, flags);
1155         if (ret == TRACE_TYPE_PARTIAL_LINE)
1156                 return TRACE_TYPE_PARTIAL_LINE;
1157
1158         return TRACE_TYPE_HANDLED;
1159 }
1160
1161 static enum print_line_t
1162 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1163                     struct trace_iterator *iter, u32 flags)
1164 {
1165         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1166         struct fgraph_data *data = iter->private;
1167         struct trace_event *event;
1168         int depth = 0;
1169         int ret;
1170         int i;
1171
1172         if (data)
1173                 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1174
1175         if (print_graph_prologue(iter, s, 0, 0, flags))
1176                 return TRACE_TYPE_PARTIAL_LINE;
1177
1178         /* No time */
1179         ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
1180         if (ret != TRACE_TYPE_HANDLED)
1181                 return ret;
1182
1183         /* Indentation */
1184         if (depth > 0)
1185                 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
1186                         ret = trace_seq_printf(s, " ");
1187                         if (!ret)
1188                                 return TRACE_TYPE_PARTIAL_LINE;
1189                 }
1190
1191         /* The comment */
1192         ret = trace_seq_printf(s, "/* ");
1193         if (!ret)
1194                 return TRACE_TYPE_PARTIAL_LINE;
1195
1196         switch (iter->ent->type) {
1197         case TRACE_BPRINT:
1198                 ret = trace_print_bprintk_msg_only(iter);
1199                 if (ret != TRACE_TYPE_HANDLED)
1200                         return ret;
1201                 break;
1202         case TRACE_PRINT:
1203                 ret = trace_print_printk_msg_only(iter);
1204                 if (ret != TRACE_TYPE_HANDLED)
1205                         return ret;
1206                 break;
1207         default:
1208                 event = ftrace_find_event(ent->type);
1209                 if (!event)
1210                         return TRACE_TYPE_UNHANDLED;
1211
1212                 ret = event->funcs->trace(iter, sym_flags, event);
1213                 if (ret != TRACE_TYPE_HANDLED)
1214                         return ret;
1215         }
1216
1217         /* Strip ending newline */
1218         if (s->buffer[s->len - 1] == '\n') {
1219                 s->buffer[s->len - 1] = '\0';
1220                 s->len--;
1221         }
1222
1223         ret = trace_seq_printf(s, " */\n");
1224         if (!ret)
1225                 return TRACE_TYPE_PARTIAL_LINE;
1226
1227         return TRACE_TYPE_HANDLED;
1228 }
1229
1230
1231 enum print_line_t
1232 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1233 {
1234         struct ftrace_graph_ent_entry *field;
1235         struct fgraph_data *data = iter->private;
1236         struct trace_entry *entry = iter->ent;
1237         struct trace_seq *s = &iter->seq;
1238         int cpu = iter->cpu;
1239         int ret;
1240
1241         if (flags & TRACE_GRAPH_PRINT_FLAT)
1242                 return TRACE_TYPE_UNHANDLED;
1243
1244         if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1245                 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1246                 return TRACE_TYPE_HANDLED;
1247         }
1248
1249         /*
1250          * If the last output failed, there's a possibility we need
1251          * to print out the missing entry which would never go out.
1252          */
1253         if (data && data->failed) {
1254                 field = &data->ent;
1255                 iter->cpu = data->cpu;
1256                 ret = print_graph_entry(field, s, iter, flags);
1257                 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1258                         per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1259                         ret = TRACE_TYPE_NO_CONSUME;
1260                 }
1261                 iter->cpu = cpu;
1262                 return ret;
1263         }
1264
1265         switch (entry->type) {
1266         case TRACE_GRAPH_ENT: {
1267                 /*
1268                  * print_graph_entry() may consume the current event,
1269                  * thus @field may become invalid, so we need to save it.
1270                  * sizeof(struct ftrace_graph_ent_entry) is very small,
1271                  * it can be safely saved at the stack.
1272                  */
1273                 struct ftrace_graph_ent_entry saved;
1274                 trace_assign_type(field, entry);
1275                 saved = *field;
1276                 return print_graph_entry(&saved, s, iter, flags);
1277         }
1278         case TRACE_GRAPH_RET: {
1279                 struct ftrace_graph_ret_entry *field;
1280                 trace_assign_type(field, entry);
1281                 return print_graph_return(&field->ret, s, entry, iter, flags);
1282         }
1283         case TRACE_STACK:
1284         case TRACE_FN:
1285                 /* dont trace stack and functions as comments */
1286                 return TRACE_TYPE_UNHANDLED;
1287
1288         default:
1289                 return print_graph_comment(s, entry, iter, flags);
1290         }
1291
1292         return TRACE_TYPE_HANDLED;
1293 }
1294
1295 static enum print_line_t
1296 print_graph_function(struct trace_iterator *iter)
1297 {
1298         return print_graph_function_flags(iter, tracer_flags.val);
1299 }
1300
1301 static void print_lat_header(struct seq_file *s, u32 flags)
1302 {
1303         static const char spaces[] = "                " /* 16 spaces */
1304                 "    "                                  /* 4 spaces */
1305                 "                 ";                    /* 17 spaces */
1306         int size = 0;
1307
1308         if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1309                 size += 16;
1310         if (flags & TRACE_GRAPH_PRINT_CPU)
1311                 size += 4;
1312         if (flags & TRACE_GRAPH_PRINT_PROC)
1313                 size += 17;
1314
1315         seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1316         seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1317         seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1318         seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1319         seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1320 }
1321
1322 static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
1323 {
1324         int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1325
1326         if (lat)
1327                 print_lat_header(s, flags);
1328
1329         /* 1st line */
1330         seq_printf(s, "#");
1331         if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1332                 seq_printf(s, "     TIME       ");
1333         if (flags & TRACE_GRAPH_PRINT_CPU)
1334                 seq_printf(s, " CPU");
1335         if (flags & TRACE_GRAPH_PRINT_PROC)
1336                 seq_printf(s, "  TASK/PID       ");
1337         if (lat)
1338                 seq_printf(s, "||||");
1339         if (flags & TRACE_GRAPH_PRINT_DURATION)
1340                 seq_printf(s, "  DURATION   ");
1341         seq_printf(s, "               FUNCTION CALLS\n");
1342
1343         /* 2nd line */
1344         seq_printf(s, "#");
1345         if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1346                 seq_printf(s, "      |         ");
1347         if (flags & TRACE_GRAPH_PRINT_CPU)
1348                 seq_printf(s, " |  ");
1349         if (flags & TRACE_GRAPH_PRINT_PROC)
1350                 seq_printf(s, "   |    |        ");
1351         if (lat)
1352                 seq_printf(s, "||||");
1353         if (flags & TRACE_GRAPH_PRINT_DURATION)
1354                 seq_printf(s, "   |   |      ");
1355         seq_printf(s, "               |   |   |   |\n");
1356 }
1357
1358 void print_graph_headers(struct seq_file *s)
1359 {
1360         print_graph_headers_flags(s, tracer_flags.val);
1361 }
1362
1363 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1364 {
1365         struct trace_iterator *iter = s->private;
1366
1367         if (flags & TRACE_GRAPH_PRINT_FLAT) {
1368                 trace_default_header(s);
1369                 return;
1370         }
1371
1372         if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
1373                 return;
1374
1375         if (trace_flags & TRACE_ITER_LATENCY_FMT) {
1376                 /* print nothing if the buffers are empty */
1377                 if (trace_empty(iter))
1378                         return;
1379
1380                 print_trace_header(s, iter);
1381         }
1382
1383         __print_graph_headers_flags(s, flags);
1384 }
1385
1386 void graph_trace_open(struct trace_iterator *iter)
1387 {
1388         /* pid and depth on the last trace processed */
1389         struct fgraph_data *data;
1390         int cpu;
1391
1392         iter->private = NULL;
1393
1394         data = kzalloc(sizeof(*data), GFP_KERNEL);
1395         if (!data)
1396                 goto out_err;
1397
1398         data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
1399         if (!data->cpu_data)
1400                 goto out_err_free;
1401
1402         for_each_possible_cpu(cpu) {
1403                 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1404                 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1405                 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1406                 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1407
1408                 *pid = -1;
1409                 *depth = 0;
1410                 *ignore = 0;
1411                 *depth_irq = -1;
1412         }
1413
1414         iter->private = data;
1415
1416         return;
1417
1418  out_err_free:
1419         kfree(data);
1420  out_err:
1421         pr_warning("function graph tracer: not enough memory\n");
1422 }
1423
1424 void graph_trace_close(struct trace_iterator *iter)
1425 {
1426         struct fgraph_data *data = iter->private;
1427
1428         if (data) {
1429                 free_percpu(data->cpu_data);
1430                 kfree(data);
1431         }
1432 }
1433
1434 static int func_graph_set_flag(u32 old_flags, u32 bit, int set)
1435 {
1436         if (bit == TRACE_GRAPH_PRINT_IRQS)
1437                 ftrace_graph_skip_irqs = !set;
1438
1439         return 0;
1440 }
1441
1442 static struct tracer graph_trace __read_mostly = {
1443         .name           = "function_graph",
1444         .open           = graph_trace_open,
1445         .pipe_open      = graph_trace_open,
1446         .close          = graph_trace_close,
1447         .pipe_close     = graph_trace_close,
1448         .wait_pipe      = poll_wait_pipe,
1449         .init           = graph_trace_init,
1450         .reset          = graph_trace_reset,
1451         .print_line     = print_graph_function,
1452         .print_header   = print_graph_headers,
1453         .flags          = &tracer_flags,
1454         .set_flag       = func_graph_set_flag,
1455 #ifdef CONFIG_FTRACE_SELFTEST
1456         .selftest       = trace_selftest_startup_function_graph,
1457 #endif
1458 };
1459
1460
1461 static ssize_t
1462 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1463                   loff_t *ppos)
1464 {
1465         unsigned long val;
1466         int ret;
1467
1468         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1469         if (ret)
1470                 return ret;
1471
1472         max_depth = val;
1473
1474         *ppos += cnt;
1475
1476         return cnt;
1477 }
1478
1479 static ssize_t
1480 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1481                  loff_t *ppos)
1482 {
1483         char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1484         int n;
1485
1486         n = sprintf(buf, "%d\n", max_depth);
1487
1488         return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1489 }
1490
1491 static const struct file_operations graph_depth_fops = {
1492         .open           = tracing_open_generic,
1493         .write          = graph_depth_write,
1494         .read           = graph_depth_read,
1495         .llseek         = generic_file_llseek,
1496 };
1497
1498 static __init int init_graph_debugfs(void)
1499 {
1500         struct dentry *d_tracer;
1501
1502         d_tracer = tracing_init_dentry();
1503         if (!d_tracer)
1504                 return 0;
1505
1506         trace_create_file("max_graph_depth", 0644, d_tracer,
1507                           NULL, &graph_depth_fops);
1508
1509         return 0;
1510 }
1511 fs_initcall(init_graph_debugfs);
1512
1513 static __init int init_graph_trace(void)
1514 {
1515         max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1516
1517         return register_tracer(&graph_trace);
1518 }
1519
1520 core_initcall(init_graph_trace);