2 * trace task wakeup timings
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
12 #include <linux/module.h>
14 #include <linux/debugfs.h>
15 #include <linux/kallsyms.h>
16 #include <linux/uaccess.h>
17 #include <linux/ftrace.h>
18 #include <linux/sched/rt.h>
19 #include <trace/events/sched.h>
22 static struct trace_array *wakeup_trace;
23 static int __read_mostly tracer_enabled;
25 static struct task_struct *wakeup_task;
26 static int wakeup_cpu;
27 static int wakeup_current_cpu;
28 static unsigned wakeup_prio = -1;
31 static arch_spinlock_t wakeup_lock =
32 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
34 static void wakeup_reset(struct trace_array *tr);
35 static void __wakeup_reset(struct trace_array *tr);
36 static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
37 static void wakeup_graph_return(struct ftrace_graph_ret *trace);
39 static int save_flags;
40 static bool function_enabled;
42 #define TRACE_DISPLAY_GRAPH 1
44 static struct tracer_opt trace_opts[] = {
45 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
46 /* display latency trace as call graph */
47 { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
52 static struct tracer_flags tracer_flags = {
57 #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
59 #ifdef CONFIG_FUNCTION_TRACER
62 * Prologue for the wakeup function tracers.
64 * Returns 1 if it is OK to continue, and preemption
65 * is disabled and data->disabled is incremented.
66 * 0 if the trace is to be ignored, and preemption
67 * is not disabled and data->disabled is
70 * Note, this function is also used outside this ifdef but
71 * inside the #ifdef of the function graph tracer below.
72 * This is OK, since the function graph tracer is
73 * dependent on the function tracer.
76 func_prolog_preempt_disable(struct trace_array *tr,
77 struct trace_array_cpu **data,
83 if (likely(!wakeup_task))
86 *pc = preempt_count();
87 preempt_disable_notrace();
89 cpu = raw_smp_processor_id();
90 if (cpu != wakeup_current_cpu)
93 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
94 disabled = atomic_inc_return(&(*data)->disabled);
95 if (unlikely(disabled != 1))
101 atomic_dec(&(*data)->disabled);
104 preempt_enable_notrace();
109 * wakeup uses its own tracer function to keep the overhead down:
112 wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
113 struct ftrace_ops *op, struct pt_regs *pt_regs)
115 struct trace_array *tr = wakeup_trace;
116 struct trace_array_cpu *data;
120 if (!func_prolog_preempt_disable(tr, &data, &pc))
123 local_irq_save(flags);
124 trace_function(tr, ip, parent_ip, flags, pc);
125 local_irq_restore(flags);
127 atomic_dec(&data->disabled);
128 preempt_enable_notrace();
131 static struct ftrace_ops trace_ops __read_mostly =
133 .func = wakeup_tracer_call,
134 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
136 #endif /* CONFIG_FUNCTION_TRACER */
138 static int register_wakeup_function(int graph, int set)
142 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
143 if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION)))
147 ret = register_ftrace_graph(&wakeup_graph_return,
148 &wakeup_graph_entry);
150 ret = register_ftrace_function(&trace_ops);
153 function_enabled = true;
158 static void unregister_wakeup_function(int graph)
160 if (!function_enabled)
164 unregister_ftrace_graph();
166 unregister_ftrace_function(&trace_ops);
168 function_enabled = false;
171 static void wakeup_function_set(int set)
174 register_wakeup_function(is_graph(), 1);
176 unregister_wakeup_function(is_graph());
179 static int wakeup_flag_changed(struct tracer *tracer, u32 mask, int set)
181 if (mask & TRACE_ITER_FUNCTION)
182 wakeup_function_set(set);
184 return trace_keep_overwrite(tracer, mask, set);
187 static int start_func_tracer(int graph)
191 ret = register_wakeup_function(graph, 0);
193 if (!ret && tracing_is_enabled())
201 static void stop_func_tracer(int graph)
205 unregister_wakeup_function(graph);
208 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
209 static int wakeup_set_flag(u32 old_flags, u32 bit, int set)
212 if (!(bit & TRACE_DISPLAY_GRAPH))
215 if (!(is_graph() ^ set))
218 stop_func_tracer(!set);
220 wakeup_reset(wakeup_trace);
221 tracing_max_latency = 0;
223 return start_func_tracer(set);
226 static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
228 struct trace_array *tr = wakeup_trace;
229 struct trace_array_cpu *data;
233 if (!func_prolog_preempt_disable(tr, &data, &pc))
236 local_save_flags(flags);
237 ret = __trace_graph_entry(tr, trace, flags, pc);
238 atomic_dec(&data->disabled);
239 preempt_enable_notrace();
244 static void wakeup_graph_return(struct ftrace_graph_ret *trace)
246 struct trace_array *tr = wakeup_trace;
247 struct trace_array_cpu *data;
251 if (!func_prolog_preempt_disable(tr, &data, &pc))
254 local_save_flags(flags);
255 __trace_graph_return(tr, trace, flags, pc);
256 atomic_dec(&data->disabled);
258 preempt_enable_notrace();
262 static void wakeup_trace_open(struct trace_iterator *iter)
265 graph_trace_open(iter);
268 static void wakeup_trace_close(struct trace_iterator *iter)
271 graph_trace_close(iter);
274 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
275 TRACE_GRAPH_PRINT_ABS_TIME | \
276 TRACE_GRAPH_PRINT_DURATION)
278 static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
281 * In graph mode call the graph tracer output function,
282 * otherwise go with the TRACE_FN event handler
285 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
287 return TRACE_TYPE_UNHANDLED;
290 static void wakeup_print_header(struct seq_file *s)
293 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
295 trace_default_header(s);
299 __trace_function(struct trace_array *tr,
300 unsigned long ip, unsigned long parent_ip,
301 unsigned long flags, int pc)
304 trace_graph_function(tr, ip, parent_ip, flags, pc);
306 trace_function(tr, ip, parent_ip, flags, pc);
309 #define __trace_function trace_function
311 static int wakeup_set_flag(u32 old_flags, u32 bit, int set)
316 static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
321 static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
323 return TRACE_TYPE_UNHANDLED;
326 static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
327 static void wakeup_trace_open(struct trace_iterator *iter) { }
328 static void wakeup_trace_close(struct trace_iterator *iter) { }
330 #ifdef CONFIG_FUNCTION_TRACER
331 static void wakeup_print_header(struct seq_file *s)
333 trace_default_header(s);
336 static void wakeup_print_header(struct seq_file *s)
338 trace_latency_header(s);
340 #endif /* CONFIG_FUNCTION_TRACER */
341 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
344 * Should this new latency be reported/recorded?
346 static int report_latency(cycle_t delta)
348 if (tracing_thresh) {
349 if (delta < tracing_thresh)
352 if (delta <= tracing_max_latency)
359 probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
361 if (task != wakeup_task)
364 wakeup_current_cpu = cpu;
368 probe_wakeup_sched_switch(void *ignore,
369 struct task_struct *prev, struct task_struct *next)
371 struct trace_array_cpu *data;
372 cycle_t T0, T1, delta;
378 tracing_record_cmdline(prev);
380 if (unlikely(!tracer_enabled))
384 * When we start a new trace, we set wakeup_task to NULL
385 * and then set tracer_enabled = 1. We want to make sure
386 * that another CPU does not see the tracer_enabled = 1
387 * and the wakeup_task with an older task, that might
388 * actually be the same as next.
392 if (next != wakeup_task)
395 pc = preempt_count();
397 /* disable local data, not wakeup_cpu data */
398 cpu = raw_smp_processor_id();
399 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
400 if (likely(disabled != 1))
403 local_irq_save(flags);
404 arch_spin_lock(&wakeup_lock);
406 /* We could race with grabbing wakeup_lock */
407 if (unlikely(!tracer_enabled || next != wakeup_task))
410 /* The task we are waiting for is waking up */
411 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
413 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
414 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
416 T0 = data->preempt_timestamp;
417 T1 = ftrace_now(cpu);
420 if (!report_latency(delta))
423 if (likely(!is_tracing_stopped())) {
424 tracing_max_latency = delta;
425 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
429 __wakeup_reset(wakeup_trace);
430 arch_spin_unlock(&wakeup_lock);
431 local_irq_restore(flags);
433 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
436 static void __wakeup_reset(struct trace_array *tr)
442 put_task_struct(wakeup_task);
447 static void wakeup_reset(struct trace_array *tr)
451 tracing_reset_online_cpus(&tr->trace_buffer);
453 local_irq_save(flags);
454 arch_spin_lock(&wakeup_lock);
456 arch_spin_unlock(&wakeup_lock);
457 local_irq_restore(flags);
461 probe_wakeup(void *ignore, struct task_struct *p, int success)
463 struct trace_array_cpu *data;
464 int cpu = smp_processor_id();
469 if (likely(!tracer_enabled))
472 tracing_record_cmdline(p);
473 tracing_record_cmdline(current);
475 if ((wakeup_rt && !rt_task(p)) ||
476 p->prio >= wakeup_prio ||
477 p->prio >= current->prio)
480 pc = preempt_count();
481 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
482 if (unlikely(disabled != 1))
485 /* interrupts should be off from try_to_wake_up */
486 arch_spin_lock(&wakeup_lock);
488 /* check for races. */
489 if (!tracer_enabled || p->prio >= wakeup_prio)
492 /* reset the trace */
493 __wakeup_reset(wakeup_trace);
495 wakeup_cpu = task_cpu(p);
496 wakeup_current_cpu = wakeup_cpu;
497 wakeup_prio = p->prio;
500 get_task_struct(wakeup_task);
502 local_save_flags(flags);
504 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
505 data->preempt_timestamp = ftrace_now(cpu);
506 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
509 * We must be careful in using CALLER_ADDR2. But since wake_up
510 * is not called by an assembly function (where as schedule is)
511 * it should be safe to use it here.
513 __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
516 arch_spin_unlock(&wakeup_lock);
518 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
521 static void start_wakeup_tracer(struct trace_array *tr)
525 ret = register_trace_sched_wakeup(probe_wakeup, NULL);
527 pr_info("wakeup trace: Couldn't activate tracepoint"
528 " probe to kernel_sched_wakeup\n");
532 ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
534 pr_info("wakeup trace: Couldn't activate tracepoint"
535 " probe to kernel_sched_wakeup_new\n");
539 ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
541 pr_info("sched trace: Couldn't activate tracepoint"
542 " probe to kernel_sched_switch\n");
543 goto fail_deprobe_wake_new;
546 ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
548 pr_info("wakeup trace: Couldn't activate tracepoint"
549 " probe to kernel_sched_migrate_task\n");
556 * Don't let the tracer_enabled = 1 show up before
557 * the wakeup_task is reset. This may be overkill since
558 * wakeup_reset does a spin_unlock after setting the
559 * wakeup_task to NULL, but I want to be safe.
560 * This is a slow path anyway.
564 if (start_func_tracer(is_graph()))
565 printk(KERN_ERR "failed to start wakeup tracer\n");
568 fail_deprobe_wake_new:
569 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
571 unregister_trace_sched_wakeup(probe_wakeup, NULL);
574 static void stop_wakeup_tracer(struct trace_array *tr)
577 stop_func_tracer(is_graph());
578 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
579 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
580 unregister_trace_sched_wakeup(probe_wakeup, NULL);
581 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
584 static int __wakeup_tracer_init(struct trace_array *tr)
586 save_flags = trace_flags;
588 /* non overwrite screws up the latency tracers */
589 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
590 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
592 tracing_max_latency = 0;
594 start_wakeup_tracer(tr);
598 static int wakeup_tracer_init(struct trace_array *tr)
601 return __wakeup_tracer_init(tr);
604 static int wakeup_rt_tracer_init(struct trace_array *tr)
607 return __wakeup_tracer_init(tr);
610 static void wakeup_tracer_reset(struct trace_array *tr)
612 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
613 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
615 stop_wakeup_tracer(tr);
616 /* make sure we put back any tasks we are tracing */
619 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
620 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
623 static void wakeup_tracer_start(struct trace_array *tr)
629 static void wakeup_tracer_stop(struct trace_array *tr)
634 static struct tracer wakeup_tracer __read_mostly =
637 .init = wakeup_tracer_init,
638 .reset = wakeup_tracer_reset,
639 .start = wakeup_tracer_start,
640 .stop = wakeup_tracer_stop,
642 .print_header = wakeup_print_header,
643 .print_line = wakeup_print_line,
644 .flags = &tracer_flags,
645 .set_flag = wakeup_set_flag,
646 .flag_changed = wakeup_flag_changed,
647 #ifdef CONFIG_FTRACE_SELFTEST
648 .selftest = trace_selftest_startup_wakeup,
650 .open = wakeup_trace_open,
651 .close = wakeup_trace_close,
655 static struct tracer wakeup_rt_tracer __read_mostly =
658 .init = wakeup_rt_tracer_init,
659 .reset = wakeup_tracer_reset,
660 .start = wakeup_tracer_start,
661 .stop = wakeup_tracer_stop,
662 .wait_pipe = poll_wait_pipe,
664 .print_header = wakeup_print_header,
665 .print_line = wakeup_print_line,
666 .flags = &tracer_flags,
667 .set_flag = wakeup_set_flag,
668 .flag_changed = wakeup_flag_changed,
669 #ifdef CONFIG_FTRACE_SELFTEST
670 .selftest = trace_selftest_startup_wakeup,
672 .open = wakeup_trace_open,
673 .close = wakeup_trace_close,
677 __init static int init_wakeup_tracer(void)
681 ret = register_tracer(&wakeup_tracer);
685 ret = register_tracer(&wakeup_rt_tracer);
691 core_initcall(init_wakeup_tracer);