3211890ee7d53a4a1b1eb16387562b38c8e02ed2
[firefly-linux-kernel-4.4.55.git] / include / trace / events / sched.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM sched
3
4 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_SCHED_H
6
7 #include <linux/sched.h>
8 #include <linux/tracepoint.h>
9 #include <linux/binfmts.h>
10
11 /*
12  * Tracepoint for calling kthread_stop, performed to end a kthread:
13  */
14 TRACE_EVENT(sched_kthread_stop,
15
16         TP_PROTO(struct task_struct *t),
17
18         TP_ARGS(t),
19
20         TP_STRUCT__entry(
21                 __array(        char,   comm,   TASK_COMM_LEN   )
22                 __field(        pid_t,  pid                     )
23         ),
24
25         TP_fast_assign(
26                 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
27                 __entry->pid    = t->pid;
28         ),
29
30         TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
31 );
32
33 /*
34  * Tracepoint for the return value of the kthread stopping:
35  */
36 TRACE_EVENT(sched_kthread_stop_ret,
37
38         TP_PROTO(int ret),
39
40         TP_ARGS(ret),
41
42         TP_STRUCT__entry(
43                 __field(        int,    ret     )
44         ),
45
46         TP_fast_assign(
47                 __entry->ret    = ret;
48         ),
49
50         TP_printk("ret=%d", __entry->ret)
51 );
52
53 /*
54  * Tracepoint for waking up a task:
55  */
56 DECLARE_EVENT_CLASS(sched_wakeup_template,
57
58         TP_PROTO(struct task_struct *p),
59
60         TP_ARGS(__perf_task(p)),
61
62         TP_STRUCT__entry(
63                 __array(        char,   comm,   TASK_COMM_LEN   )
64                 __field(        pid_t,  pid                     )
65                 __field(        int,    prio                    )
66                 __field(        int,    success                 )
67                 __field(        int,    target_cpu              )
68         ),
69
70         TP_fast_assign(
71                 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
72                 __entry->pid            = p->pid;
73                 __entry->prio           = p->prio;
74                 __entry->success        = 1; /* rudiment, kill when possible */
75                 __entry->target_cpu     = task_cpu(p);
76         ),
77
78         TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
79                   __entry->comm, __entry->pid, __entry->prio,
80                   __entry->target_cpu)
81 );
82
83 /*
84  * Tracepoint called when waking a task; this tracepoint is guaranteed to be
85  * called from the waking context.
86  */
87 DEFINE_EVENT(sched_wakeup_template, sched_waking,
88              TP_PROTO(struct task_struct *p),
89              TP_ARGS(p));
90
91 /*
92  * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
93  * It it not always called from the waking context.
94  */
95 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
96              TP_PROTO(struct task_struct *p),
97              TP_ARGS(p));
98
99 /*
100  * Tracepoint for waking up a new task:
101  */
102 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
103              TP_PROTO(struct task_struct *p),
104              TP_ARGS(p));
105
106 #ifdef CREATE_TRACE_POINTS
107 static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
108 {
109 #ifdef CONFIG_SCHED_DEBUG
110         BUG_ON(p != current);
111 #endif /* CONFIG_SCHED_DEBUG */
112
113         /*
114          * Preemption ignores task state, therefore preempted tasks are always
115          * RUNNING (we will not have dequeued if state != RUNNING).
116          */
117         return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
118 }
119 #endif /* CREATE_TRACE_POINTS */
120
121 /*
122  * Tracepoint for task switches, performed by the scheduler:
123  */
124 TRACE_EVENT(sched_switch,
125
126         TP_PROTO(bool preempt,
127                  struct task_struct *prev,
128                  struct task_struct *next),
129
130         TP_ARGS(preempt, prev, next),
131
132         TP_STRUCT__entry(
133                 __array(        char,   prev_comm,      TASK_COMM_LEN   )
134                 __field(        pid_t,  prev_pid                        )
135                 __field(        int,    prev_prio                       )
136                 __field(        long,   prev_state                      )
137                 __array(        char,   next_comm,      TASK_COMM_LEN   )
138                 __field(        pid_t,  next_pid                        )
139                 __field(        int,    next_prio                       )
140         ),
141
142         TP_fast_assign(
143                 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
144                 __entry->prev_pid       = prev->pid;
145                 __entry->prev_prio      = prev->prio;
146                 __entry->prev_state     = __trace_sched_switch_state(preempt, prev);
147                 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
148                 __entry->next_pid       = next->pid;
149                 __entry->next_prio      = next->prio;
150         ),
151
152         TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
153                 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
154                 __entry->prev_state & (TASK_STATE_MAX-1) ?
155                   __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
156                                 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
157                                 { 16, "Z" }, { 32, "X" }, { 64, "x" },
158                                 { 128, "K" }, { 256, "W" }, { 512, "P" },
159                                 { 1024, "N" }) : "R",
160                 __entry->prev_state & TASK_STATE_MAX ? "+" : "",
161                 __entry->next_comm, __entry->next_pid, __entry->next_prio)
162 );
163
164 /*
165  * Tracepoint for a task being migrated:
166  */
167 TRACE_EVENT(sched_migrate_task,
168
169         TP_PROTO(struct task_struct *p, int dest_cpu),
170
171         TP_ARGS(p, dest_cpu),
172
173         TP_STRUCT__entry(
174                 __array(        char,   comm,   TASK_COMM_LEN   )
175                 __field(        pid_t,  pid                     )
176                 __field(        int,    prio                    )
177                 __field(        int,    orig_cpu                )
178                 __field(        int,    dest_cpu                )
179         ),
180
181         TP_fast_assign(
182                 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
183                 __entry->pid            = p->pid;
184                 __entry->prio           = p->prio;
185                 __entry->orig_cpu       = task_cpu(p);
186                 __entry->dest_cpu       = dest_cpu;
187         ),
188
189         TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
190                   __entry->comm, __entry->pid, __entry->prio,
191                   __entry->orig_cpu, __entry->dest_cpu)
192 );
193
194 DECLARE_EVENT_CLASS(sched_process_template,
195
196         TP_PROTO(struct task_struct *p),
197
198         TP_ARGS(p),
199
200         TP_STRUCT__entry(
201                 __array(        char,   comm,   TASK_COMM_LEN   )
202                 __field(        pid_t,  pid                     )
203                 __field(        int,    prio                    )
204         ),
205
206         TP_fast_assign(
207                 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
208                 __entry->pid            = p->pid;
209                 __entry->prio           = p->prio;
210         ),
211
212         TP_printk("comm=%s pid=%d prio=%d",
213                   __entry->comm, __entry->pid, __entry->prio)
214 );
215
216 /*
217  * Tracepoint for freeing a task:
218  */
219 DEFINE_EVENT(sched_process_template, sched_process_free,
220              TP_PROTO(struct task_struct *p),
221              TP_ARGS(p));
222
223
224 /*
225  * Tracepoint for a task exiting:
226  */
227 DEFINE_EVENT(sched_process_template, sched_process_exit,
228              TP_PROTO(struct task_struct *p),
229              TP_ARGS(p));
230
231 /*
232  * Tracepoint for waiting on task to unschedule:
233  */
234 DEFINE_EVENT(sched_process_template, sched_wait_task,
235         TP_PROTO(struct task_struct *p),
236         TP_ARGS(p));
237
238 /*
239  * Tracepoint for a waiting task:
240  */
241 TRACE_EVENT(sched_process_wait,
242
243         TP_PROTO(struct pid *pid),
244
245         TP_ARGS(pid),
246
247         TP_STRUCT__entry(
248                 __array(        char,   comm,   TASK_COMM_LEN   )
249                 __field(        pid_t,  pid                     )
250                 __field(        int,    prio                    )
251         ),
252
253         TP_fast_assign(
254                 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
255                 __entry->pid            = pid_nr(pid);
256                 __entry->prio           = current->prio;
257         ),
258
259         TP_printk("comm=%s pid=%d prio=%d",
260                   __entry->comm, __entry->pid, __entry->prio)
261 );
262
263 /*
264  * Tracepoint for do_fork:
265  */
266 TRACE_EVENT(sched_process_fork,
267
268         TP_PROTO(struct task_struct *parent, struct task_struct *child),
269
270         TP_ARGS(parent, child),
271
272         TP_STRUCT__entry(
273                 __array(        char,   parent_comm,    TASK_COMM_LEN   )
274                 __field(        pid_t,  parent_pid                      )
275                 __array(        char,   child_comm,     TASK_COMM_LEN   )
276                 __field(        pid_t,  child_pid                       )
277         ),
278
279         TP_fast_assign(
280                 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
281                 __entry->parent_pid     = parent->pid;
282                 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
283                 __entry->child_pid      = child->pid;
284         ),
285
286         TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
287                 __entry->parent_comm, __entry->parent_pid,
288                 __entry->child_comm, __entry->child_pid)
289 );
290
291 /*
292  * Tracepoint for exec:
293  */
294 TRACE_EVENT(sched_process_exec,
295
296         TP_PROTO(struct task_struct *p, pid_t old_pid,
297                  struct linux_binprm *bprm),
298
299         TP_ARGS(p, old_pid, bprm),
300
301         TP_STRUCT__entry(
302                 __string(       filename,       bprm->filename  )
303                 __field(        pid_t,          pid             )
304                 __field(        pid_t,          old_pid         )
305         ),
306
307         TP_fast_assign(
308                 __assign_str(filename, bprm->filename);
309                 __entry->pid            = p->pid;
310                 __entry->old_pid        = old_pid;
311         ),
312
313         TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
314                   __entry->pid, __entry->old_pid)
315 );
316
317 /*
318  * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
319  *     adding sched_stat support to SCHED_FIFO/RR would be welcome.
320  */
321 DECLARE_EVENT_CLASS(sched_stat_template,
322
323         TP_PROTO(struct task_struct *tsk, u64 delay),
324
325         TP_ARGS(__perf_task(tsk), __perf_count(delay)),
326
327         TP_STRUCT__entry(
328                 __array( char,  comm,   TASK_COMM_LEN   )
329                 __field( pid_t, pid                     )
330                 __field( u64,   delay                   )
331         ),
332
333         TP_fast_assign(
334                 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
335                 __entry->pid    = tsk->pid;
336                 __entry->delay  = delay;
337         ),
338
339         TP_printk("comm=%s pid=%d delay=%Lu [ns]",
340                         __entry->comm, __entry->pid,
341                         (unsigned long long)__entry->delay)
342 );
343
344
345 /*
346  * Tracepoint for accounting wait time (time the task is runnable
347  * but not actually running due to scheduler contention).
348  */
349 DEFINE_EVENT(sched_stat_template, sched_stat_wait,
350              TP_PROTO(struct task_struct *tsk, u64 delay),
351              TP_ARGS(tsk, delay));
352
353 /*
354  * Tracepoint for accounting sleep time (time the task is not runnable,
355  * including iowait, see below).
356  */
357 DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
358              TP_PROTO(struct task_struct *tsk, u64 delay),
359              TP_ARGS(tsk, delay));
360
361 /*
362  * Tracepoint for accounting iowait time (time the task is not runnable
363  * due to waiting on IO to complete).
364  */
365 DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
366              TP_PROTO(struct task_struct *tsk, u64 delay),
367              TP_ARGS(tsk, delay));
368
369 /*
370  * Tracepoint for accounting blocked time (time the task is in uninterruptible).
371  */
372 DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
373              TP_PROTO(struct task_struct *tsk, u64 delay),
374              TP_ARGS(tsk, delay));
375
376 /*
377  * Tracepoint for recording the cause of uninterruptible sleep.
378  */
379 TRACE_EVENT(sched_blocked_reason,
380
381         TP_PROTO(struct task_struct *tsk),
382
383         TP_ARGS(tsk),
384
385         TP_STRUCT__entry(
386                 __field( pid_t, pid     )
387                 __field( void*, caller  )
388                 __field( bool, io_wait  )
389         ),
390
391         TP_fast_assign(
392                 __entry->pid    = tsk->pid;
393                 __entry->caller = (void*)get_wchan(tsk);
394                 __entry->io_wait = tsk->in_iowait;
395         ),
396
397         TP_printk("pid=%d iowait=%d caller=%pS", __entry->pid, __entry->io_wait, __entry->caller)
398 );
399
400 /*
401  * Tracepoint for accounting runtime (time the task is executing
402  * on a CPU).
403  */
404 DECLARE_EVENT_CLASS(sched_stat_runtime,
405
406         TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
407
408         TP_ARGS(tsk, __perf_count(runtime), vruntime),
409
410         TP_STRUCT__entry(
411                 __array( char,  comm,   TASK_COMM_LEN   )
412                 __field( pid_t, pid                     )
413                 __field( u64,   runtime                 )
414                 __field( u64,   vruntime                        )
415         ),
416
417         TP_fast_assign(
418                 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
419                 __entry->pid            = tsk->pid;
420                 __entry->runtime        = runtime;
421                 __entry->vruntime       = vruntime;
422         ),
423
424         TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
425                         __entry->comm, __entry->pid,
426                         (unsigned long long)__entry->runtime,
427                         (unsigned long long)__entry->vruntime)
428 );
429
430 DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
431              TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
432              TP_ARGS(tsk, runtime, vruntime));
433
434 /*
435  * Tracepoint for showing priority inheritance modifying a tasks
436  * priority.
437  */
438 TRACE_EVENT(sched_pi_setprio,
439
440         TP_PROTO(struct task_struct *tsk, int newprio),
441
442         TP_ARGS(tsk, newprio),
443
444         TP_STRUCT__entry(
445                 __array( char,  comm,   TASK_COMM_LEN   )
446                 __field( pid_t, pid                     )
447                 __field( int,   oldprio                 )
448                 __field( int,   newprio                 )
449         ),
450
451         TP_fast_assign(
452                 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
453                 __entry->pid            = tsk->pid;
454                 __entry->oldprio        = tsk->prio;
455                 __entry->newprio        = newprio;
456         ),
457
458         TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
459                         __entry->comm, __entry->pid,
460                         __entry->oldprio, __entry->newprio)
461 );
462
463 #ifdef CONFIG_DETECT_HUNG_TASK
464 TRACE_EVENT(sched_process_hang,
465         TP_PROTO(struct task_struct *tsk),
466         TP_ARGS(tsk),
467
468         TP_STRUCT__entry(
469                 __array( char,  comm,   TASK_COMM_LEN   )
470                 __field( pid_t, pid                     )
471         ),
472
473         TP_fast_assign(
474                 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
475                 __entry->pid = tsk->pid;
476         ),
477
478         TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
479 );
480 #endif /* CONFIG_DETECT_HUNG_TASK */
481
482 DECLARE_EVENT_CLASS(sched_move_task_template,
483
484         TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
485
486         TP_ARGS(tsk, src_cpu, dst_cpu),
487
488         TP_STRUCT__entry(
489                 __field( pid_t, pid                     )
490                 __field( pid_t, tgid                    )
491                 __field( pid_t, ngid                    )
492                 __field( int,   src_cpu                 )
493                 __field( int,   src_nid                 )
494                 __field( int,   dst_cpu                 )
495                 __field( int,   dst_nid                 )
496         ),
497
498         TP_fast_assign(
499                 __entry->pid            = task_pid_nr(tsk);
500                 __entry->tgid           = task_tgid_nr(tsk);
501                 __entry->ngid           = task_numa_group_id(tsk);
502                 __entry->src_cpu        = src_cpu;
503                 __entry->src_nid        = cpu_to_node(src_cpu);
504                 __entry->dst_cpu        = dst_cpu;
505                 __entry->dst_nid        = cpu_to_node(dst_cpu);
506         ),
507
508         TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
509                         __entry->pid, __entry->tgid, __entry->ngid,
510                         __entry->src_cpu, __entry->src_nid,
511                         __entry->dst_cpu, __entry->dst_nid)
512 );
513
514 /*
515  * Tracks migration of tasks from one runqueue to another. Can be used to
516  * detect if automatic NUMA balancing is bouncing between nodes
517  */
518 DEFINE_EVENT(sched_move_task_template, sched_move_numa,
519         TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
520
521         TP_ARGS(tsk, src_cpu, dst_cpu)
522 );
523
524 DEFINE_EVENT(sched_move_task_template, sched_stick_numa,
525         TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
526
527         TP_ARGS(tsk, src_cpu, dst_cpu)
528 );
529
530 TRACE_EVENT(sched_swap_numa,
531
532         TP_PROTO(struct task_struct *src_tsk, int src_cpu,
533                  struct task_struct *dst_tsk, int dst_cpu),
534
535         TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
536
537         TP_STRUCT__entry(
538                 __field( pid_t, src_pid                 )
539                 __field( pid_t, src_tgid                )
540                 __field( pid_t, src_ngid                )
541                 __field( int,   src_cpu                 )
542                 __field( int,   src_nid                 )
543                 __field( pid_t, dst_pid                 )
544                 __field( pid_t, dst_tgid                )
545                 __field( pid_t, dst_ngid                )
546                 __field( int,   dst_cpu                 )
547                 __field( int,   dst_nid                 )
548         ),
549
550         TP_fast_assign(
551                 __entry->src_pid        = task_pid_nr(src_tsk);
552                 __entry->src_tgid       = task_tgid_nr(src_tsk);
553                 __entry->src_ngid       = task_numa_group_id(src_tsk);
554                 __entry->src_cpu        = src_cpu;
555                 __entry->src_nid        = cpu_to_node(src_cpu);
556                 __entry->dst_pid        = task_pid_nr(dst_tsk);
557                 __entry->dst_tgid       = task_tgid_nr(dst_tsk);
558                 __entry->dst_ngid       = task_numa_group_id(dst_tsk);
559                 __entry->dst_cpu        = dst_cpu;
560                 __entry->dst_nid        = cpu_to_node(dst_cpu);
561         ),
562
563         TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
564                         __entry->src_pid, __entry->src_tgid, __entry->src_ngid,
565                         __entry->src_cpu, __entry->src_nid,
566                         __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
567                         __entry->dst_cpu, __entry->dst_nid)
568 );
569
570 /*
571  * Tracepoint for waking a polling cpu without an IPI.
572  */
573 TRACE_EVENT(sched_wake_idle_without_ipi,
574
575         TP_PROTO(int cpu),
576
577         TP_ARGS(cpu),
578
579         TP_STRUCT__entry(
580                 __field(        int,    cpu     )
581         ),
582
583         TP_fast_assign(
584                 __entry->cpu    = cpu;
585         ),
586
587         TP_printk("cpu=%d", __entry->cpu)
588 );
589 #endif /* _TRACE_SCHED_H */
590
591 /* This part must be outside protection */
592 #include <trace/define_trace.h>