2 #define TRACE_SYSTEM sched
4 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
7 #include <linux/sched.h>
8 #include <linux/tracepoint.h>
9 #include <linux/binfmts.h>
12 * Tracepoint for calling kthread_stop, performed to end a kthread:
14 TRACE_EVENT(sched_kthread_stop,
16 TP_PROTO(struct task_struct *t),
21 __array( char, comm, TASK_COMM_LEN )
26 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
27 __entry->pid = t->pid;
30 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
34 * Tracepoint for the return value of the kthread stopping:
36 TRACE_EVENT(sched_kthread_stop_ret,
50 TP_printk("ret=%d", __entry->ret)
54 * Tracepoint for waking up a task:
56 DECLARE_EVENT_CLASS(sched_wakeup_template,
58 TP_PROTO(struct task_struct *p),
60 TP_ARGS(__perf_task(p)),
63 __array( char, comm, TASK_COMM_LEN )
66 __field( int, success )
67 __field( int, target_cpu )
71 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
72 __entry->pid = p->pid;
73 __entry->prio = p->prio;
74 __entry->success = 1; /* rudiment, kill when possible */
75 __entry->target_cpu = task_cpu(p);
78 TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
79 __entry->comm, __entry->pid, __entry->prio,
84 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
85 * called from the waking context.
87 DEFINE_EVENT(sched_wakeup_template, sched_waking,
88 TP_PROTO(struct task_struct *p),
92 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
93 * It it not always called from the waking context.
95 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
96 TP_PROTO(struct task_struct *p),
100 * Tracepoint for waking up a new task:
102 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
103 TP_PROTO(struct task_struct *p),
106 #ifdef CREATE_TRACE_POINTS
107 static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
109 #ifdef CONFIG_SCHED_DEBUG
110 BUG_ON(p != current);
111 #endif /* CONFIG_SCHED_DEBUG */
114 * Preemption ignores task state, therefore preempted tasks are always
115 * RUNNING (we will not have dequeued if state != RUNNING).
117 return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
119 #endif /* CREATE_TRACE_POINTS */
122 * Tracepoint for task switches, performed by the scheduler:
124 TRACE_EVENT(sched_switch,
126 TP_PROTO(bool preempt,
127 struct task_struct *prev,
128 struct task_struct *next),
130 TP_ARGS(preempt, prev, next),
133 __array( char, prev_comm, TASK_COMM_LEN )
134 __field( pid_t, prev_pid )
135 __field( int, prev_prio )
136 __field( long, prev_state )
137 __array( char, next_comm, TASK_COMM_LEN )
138 __field( pid_t, next_pid )
139 __field( int, next_prio )
143 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
144 __entry->prev_pid = prev->pid;
145 __entry->prev_prio = prev->prio;
146 __entry->prev_state = __trace_sched_switch_state(preempt, prev);
147 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
148 __entry->next_pid = next->pid;
149 __entry->next_prio = next->prio;
152 TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
153 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
154 __entry->prev_state & (TASK_STATE_MAX-1) ?
155 __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
156 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
157 { 16, "Z" }, { 32, "X" }, { 64, "x" },
158 { 128, "K" }, { 256, "W" }, { 512, "P" },
159 { 1024, "N" }) : "R",
160 __entry->prev_state & TASK_STATE_MAX ? "+" : "",
161 __entry->next_comm, __entry->next_pid, __entry->next_prio)
165 * Tracepoint for a task being migrated:
167 TRACE_EVENT(sched_migrate_task,
169 TP_PROTO(struct task_struct *p, int dest_cpu),
171 TP_ARGS(p, dest_cpu),
174 __array( char, comm, TASK_COMM_LEN )
175 __field( pid_t, pid )
177 __field( int, orig_cpu )
178 __field( int, dest_cpu )
182 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
183 __entry->pid = p->pid;
184 __entry->prio = p->prio;
185 __entry->orig_cpu = task_cpu(p);
186 __entry->dest_cpu = dest_cpu;
189 TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
190 __entry->comm, __entry->pid, __entry->prio,
191 __entry->orig_cpu, __entry->dest_cpu)
195 * Tracepoint for a CPU going offline/online:
197 TRACE_EVENT(sched_cpu_hotplug,
199 TP_PROTO(int affected_cpu, int error, int status),
201 TP_ARGS(affected_cpu, error, status),
204 __field( int, affected_cpu )
205 __field( int, error )
206 __field( int, status )
210 __entry->affected_cpu = affected_cpu;
211 __entry->error = error;
212 __entry->status = status;
215 TP_printk("cpu %d %s error=%d", __entry->affected_cpu,
216 __entry->status ? "online" : "offline", __entry->error)
219 DECLARE_EVENT_CLASS(sched_process_template,
221 TP_PROTO(struct task_struct *p),
226 __array( char, comm, TASK_COMM_LEN )
227 __field( pid_t, pid )
232 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
233 __entry->pid = p->pid;
234 __entry->prio = p->prio;
237 TP_printk("comm=%s pid=%d prio=%d",
238 __entry->comm, __entry->pid, __entry->prio)
242 * Tracepoint for freeing a task:
244 DEFINE_EVENT(sched_process_template, sched_process_free,
245 TP_PROTO(struct task_struct *p),
250 * Tracepoint for a task exiting:
252 DEFINE_EVENT(sched_process_template, sched_process_exit,
253 TP_PROTO(struct task_struct *p),
257 * Tracepoint for waiting on task to unschedule:
259 DEFINE_EVENT(sched_process_template, sched_wait_task,
260 TP_PROTO(struct task_struct *p),
264 * Tracepoint for a waiting task:
266 TRACE_EVENT(sched_process_wait,
268 TP_PROTO(struct pid *pid),
273 __array( char, comm, TASK_COMM_LEN )
274 __field( pid_t, pid )
279 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
280 __entry->pid = pid_nr(pid);
281 __entry->prio = current->prio;
284 TP_printk("comm=%s pid=%d prio=%d",
285 __entry->comm, __entry->pid, __entry->prio)
289 * Tracepoint for do_fork:
291 TRACE_EVENT(sched_process_fork,
293 TP_PROTO(struct task_struct *parent, struct task_struct *child),
295 TP_ARGS(parent, child),
298 __array( char, parent_comm, TASK_COMM_LEN )
299 __field( pid_t, parent_pid )
300 __array( char, child_comm, TASK_COMM_LEN )
301 __field( pid_t, child_pid )
305 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
306 __entry->parent_pid = parent->pid;
307 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
308 __entry->child_pid = child->pid;
311 TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
312 __entry->parent_comm, __entry->parent_pid,
313 __entry->child_comm, __entry->child_pid)
317 * Tracepoint for exec:
319 TRACE_EVENT(sched_process_exec,
321 TP_PROTO(struct task_struct *p, pid_t old_pid,
322 struct linux_binprm *bprm),
324 TP_ARGS(p, old_pid, bprm),
327 __string( filename, bprm->filename )
328 __field( pid_t, pid )
329 __field( pid_t, old_pid )
333 __assign_str(filename, bprm->filename);
334 __entry->pid = p->pid;
335 __entry->old_pid = old_pid;
338 TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
339 __entry->pid, __entry->old_pid)
343 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
344 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
346 DECLARE_EVENT_CLASS(sched_stat_template,
348 TP_PROTO(struct task_struct *tsk, u64 delay),
350 TP_ARGS(__perf_task(tsk), __perf_count(delay)),
353 __array( char, comm, TASK_COMM_LEN )
354 __field( pid_t, pid )
355 __field( u64, delay )
359 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
360 __entry->pid = tsk->pid;
361 __entry->delay = delay;
364 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
365 __entry->comm, __entry->pid,
366 (unsigned long long)__entry->delay)
371 * Tracepoint for accounting wait time (time the task is runnable
372 * but not actually running due to scheduler contention).
374 DEFINE_EVENT(sched_stat_template, sched_stat_wait,
375 TP_PROTO(struct task_struct *tsk, u64 delay),
376 TP_ARGS(tsk, delay));
379 * Tracepoint for accounting sleep time (time the task is not runnable,
380 * including iowait, see below).
382 DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
383 TP_PROTO(struct task_struct *tsk, u64 delay),
384 TP_ARGS(tsk, delay));
387 * Tracepoint for accounting iowait time (time the task is not runnable
388 * due to waiting on IO to complete).
390 DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
391 TP_PROTO(struct task_struct *tsk, u64 delay),
392 TP_ARGS(tsk, delay));
395 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
397 DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
398 TP_PROTO(struct task_struct *tsk, u64 delay),
399 TP_ARGS(tsk, delay));
402 * Tracepoint for recording the cause of uninterruptible sleep.
404 TRACE_EVENT(sched_blocked_reason,
406 TP_PROTO(struct task_struct *tsk),
411 __field( pid_t, pid )
412 __field( void*, caller )
413 __field( bool, io_wait )
417 __entry->pid = tsk->pid;
418 __entry->caller = (void*)get_wchan(tsk);
419 __entry->io_wait = tsk->in_iowait;
422 TP_printk("pid=%d iowait=%d caller=%pS", __entry->pid, __entry->io_wait, __entry->caller)
426 * Tracepoint for accounting runtime (time the task is executing
429 DECLARE_EVENT_CLASS(sched_stat_runtime,
431 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
433 TP_ARGS(tsk, __perf_count(runtime), vruntime),
436 __array( char, comm, TASK_COMM_LEN )
437 __field( pid_t, pid )
438 __field( u64, runtime )
439 __field( u64, vruntime )
443 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
444 __entry->pid = tsk->pid;
445 __entry->runtime = runtime;
446 __entry->vruntime = vruntime;
449 TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
450 __entry->comm, __entry->pid,
451 (unsigned long long)__entry->runtime,
452 (unsigned long long)__entry->vruntime)
455 DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
456 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
457 TP_ARGS(tsk, runtime, vruntime));
460 * Tracepoint for showing priority inheritance modifying a tasks
463 TRACE_EVENT(sched_pi_setprio,
465 TP_PROTO(struct task_struct *tsk, int newprio),
467 TP_ARGS(tsk, newprio),
470 __array( char, comm, TASK_COMM_LEN )
471 __field( pid_t, pid )
472 __field( int, oldprio )
473 __field( int, newprio )
477 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
478 __entry->pid = tsk->pid;
479 __entry->oldprio = tsk->prio;
480 __entry->newprio = newprio;
483 TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
484 __entry->comm, __entry->pid,
485 __entry->oldprio, __entry->newprio)
488 #ifdef CONFIG_DETECT_HUNG_TASK
489 TRACE_EVENT(sched_process_hang,
490 TP_PROTO(struct task_struct *tsk),
494 __array( char, comm, TASK_COMM_LEN )
495 __field( pid_t, pid )
499 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
500 __entry->pid = tsk->pid;
503 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
505 #endif /* CONFIG_DETECT_HUNG_TASK */
507 DECLARE_EVENT_CLASS(sched_move_task_template,
509 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
511 TP_ARGS(tsk, src_cpu, dst_cpu),
514 __field( pid_t, pid )
515 __field( pid_t, tgid )
516 __field( pid_t, ngid )
517 __field( int, src_cpu )
518 __field( int, src_nid )
519 __field( int, dst_cpu )
520 __field( int, dst_nid )
524 __entry->pid = task_pid_nr(tsk);
525 __entry->tgid = task_tgid_nr(tsk);
526 __entry->ngid = task_numa_group_id(tsk);
527 __entry->src_cpu = src_cpu;
528 __entry->src_nid = cpu_to_node(src_cpu);
529 __entry->dst_cpu = dst_cpu;
530 __entry->dst_nid = cpu_to_node(dst_cpu);
533 TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
534 __entry->pid, __entry->tgid, __entry->ngid,
535 __entry->src_cpu, __entry->src_nid,
536 __entry->dst_cpu, __entry->dst_nid)
540 * Tracks migration of tasks from one runqueue to another. Can be used to
541 * detect if automatic NUMA balancing is bouncing between nodes
543 DEFINE_EVENT(sched_move_task_template, sched_move_numa,
544 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
546 TP_ARGS(tsk, src_cpu, dst_cpu)
549 DEFINE_EVENT(sched_move_task_template, sched_stick_numa,
550 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
552 TP_ARGS(tsk, src_cpu, dst_cpu)
555 TRACE_EVENT(sched_swap_numa,
557 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
558 struct task_struct *dst_tsk, int dst_cpu),
560 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
563 __field( pid_t, src_pid )
564 __field( pid_t, src_tgid )
565 __field( pid_t, src_ngid )
566 __field( int, src_cpu )
567 __field( int, src_nid )
568 __field( pid_t, dst_pid )
569 __field( pid_t, dst_tgid )
570 __field( pid_t, dst_ngid )
571 __field( int, dst_cpu )
572 __field( int, dst_nid )
576 __entry->src_pid = task_pid_nr(src_tsk);
577 __entry->src_tgid = task_tgid_nr(src_tsk);
578 __entry->src_ngid = task_numa_group_id(src_tsk);
579 __entry->src_cpu = src_cpu;
580 __entry->src_nid = cpu_to_node(src_cpu);
581 __entry->dst_pid = task_pid_nr(dst_tsk);
582 __entry->dst_tgid = task_tgid_nr(dst_tsk);
583 __entry->dst_ngid = task_numa_group_id(dst_tsk);
584 __entry->dst_cpu = dst_cpu;
585 __entry->dst_nid = cpu_to_node(dst_cpu);
588 TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
589 __entry->src_pid, __entry->src_tgid, __entry->src_ngid,
590 __entry->src_cpu, __entry->src_nid,
591 __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
592 __entry->dst_cpu, __entry->dst_nid)
596 * Tracepoint for waking a polling cpu without an IPI.
598 TRACE_EVENT(sched_wake_idle_without_ipi,
612 TP_printk("cpu=%d", __entry->cpu)
615 TRACE_EVENT(sched_contrib_scale_f,
617 TP_PROTO(int cpu, unsigned long freq_scale_factor,
618 unsigned long cpu_scale_factor),
620 TP_ARGS(cpu, freq_scale_factor, cpu_scale_factor),
624 __field(unsigned long, freq_scale_factor)
625 __field(unsigned long, cpu_scale_factor)
630 __entry->freq_scale_factor = freq_scale_factor;
631 __entry->cpu_scale_factor = cpu_scale_factor;
634 TP_printk("cpu=%d freq_scale_factor=%lu cpu_scale_factor=%lu",
635 __entry->cpu, __entry->freq_scale_factor,
636 __entry->cpu_scale_factor)
641 #ifdef CONFIG_SCHED_WALT
642 extern unsigned int sysctl_sched_use_walt_cpu_util;
643 extern unsigned int sysctl_sched_use_walt_task_util;
644 extern unsigned int walt_ravg_window;
645 extern unsigned int walt_disabled;
649 * Tracepoint for accounting sched averages for tasks.
651 TRACE_EVENT(sched_load_avg_task,
653 TP_PROTO(struct task_struct *tsk, struct sched_avg *avg, void *_ravg),
655 TP_ARGS(tsk, avg, _ravg),
658 __array( char, comm, TASK_COMM_LEN )
659 __field( pid_t, pid )
661 __field( unsigned long, load_avg )
662 __field( unsigned long, util_avg )
663 __field( unsigned long, util_avg_pelt )
664 __field( unsigned long, util_avg_walt )
665 __field( u64, load_sum )
666 __field( u32, util_sum )
667 __field( u32, period_contrib )
671 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
672 __entry->pid = tsk->pid;
673 __entry->cpu = task_cpu(tsk);
674 __entry->load_avg = avg->load_avg;
675 __entry->util_avg = avg->util_avg;
676 __entry->load_sum = avg->load_sum;
677 __entry->util_sum = avg->util_sum;
678 __entry->period_contrib = avg->period_contrib;
679 __entry->util_avg_pelt = avg->util_avg;
680 __entry->util_avg_walt = 0;
681 #ifdef CONFIG_SCHED_WALT
682 __entry->util_avg_walt = (((unsigned long)((struct ravg*)_ravg)->demand) << SCHED_LOAD_SHIFT);
683 do_div(__entry->util_avg_walt, walt_ravg_window);
684 if (!walt_disabled && sysctl_sched_use_walt_task_util)
685 __entry->util_avg = __entry->util_avg_walt;
688 TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu util_avg=%lu "
689 "util_avg_pelt=%lu util_avg_walt=%lu load_sum=%llu"
690 " util_sum=%u period_contrib=%u",
696 __entry->util_avg_pelt,
697 __entry->util_avg_walt,
698 (u64)__entry->load_sum,
699 (u32)__entry->util_sum,
700 (u32)__entry->period_contrib)
704 * Tracepoint for accounting sched averages for cpus.
706 TRACE_EVENT(sched_load_avg_cpu,
708 TP_PROTO(int cpu, struct cfs_rq *cfs_rq),
710 TP_ARGS(cpu, cfs_rq),
714 __field( unsigned long, load_avg )
715 __field( unsigned long, util_avg )
716 __field( unsigned long, util_avg_pelt )
717 __field( unsigned long, util_avg_walt )
722 __entry->load_avg = cfs_rq->avg.load_avg;
723 __entry->util_avg = cfs_rq->avg.util_avg;
724 __entry->util_avg_pelt = cfs_rq->avg.util_avg;
725 __entry->util_avg_walt = 0;
726 #ifdef CONFIG_SCHED_WALT
727 __entry->util_avg_walt =
728 cpu_rq(cpu)->prev_runnable_sum << SCHED_LOAD_SHIFT;
729 do_div(__entry->util_avg_walt, walt_ravg_window);
730 if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
731 __entry->util_avg = __entry->util_avg_walt;
735 TP_printk("cpu=%d load_avg=%lu util_avg=%lu "
736 "util_avg_pelt=%lu util_avg_walt=%lu",
737 __entry->cpu, __entry->load_avg, __entry->util_avg,
738 __entry->util_avg_pelt, __entry->util_avg_walt)
742 * Tracepoint for sched_tune_config settings
744 TRACE_EVENT(sched_tune_config,
751 __field( int, boost )
755 __entry->boost = boost;
758 TP_printk("boost=%d ", __entry->boost)
762 * Tracepoint for accounting CPU boosted utilization
764 TRACE_EVENT(sched_boost_cpu,
766 TP_PROTO(int cpu, unsigned long util, long margin),
768 TP_ARGS(cpu, util, margin),
772 __field( unsigned long, util )
773 __field(long, margin )
778 __entry->util = util;
779 __entry->margin = margin;
782 TP_printk("cpu=%d util=%lu margin=%ld",
789 * Tracepoint for schedtune_tasks_update
791 TRACE_EVENT(sched_tune_tasks_update,
793 TP_PROTO(struct task_struct *tsk, int cpu, int tasks, int idx,
794 int boost, int max_boost),
796 TP_ARGS(tsk, cpu, tasks, idx, boost, max_boost),
799 __array( char, comm, TASK_COMM_LEN )
800 __field( pid_t, pid )
802 __field( int, tasks )
804 __field( int, boost )
805 __field( int, max_boost )
809 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
810 __entry->pid = tsk->pid;
812 __entry->tasks = tasks;
814 __entry->boost = boost;
815 __entry->max_boost = max_boost;
818 TP_printk("pid=%d comm=%s "
819 "cpu=%d tasks=%d idx=%d boost=%d max_boost=%d",
820 __entry->pid, __entry->comm,
821 __entry->cpu, __entry->tasks, __entry->idx,
822 __entry->boost, __entry->max_boost)
826 * Tracepoint for schedtune_boostgroup_update
828 TRACE_EVENT(sched_tune_boostgroup_update,
830 TP_PROTO(int cpu, int variation, int max_boost),
832 TP_ARGS(cpu, variation, max_boost),
836 __field( int, variation )
837 __field( int, max_boost )
842 __entry->variation = variation;
843 __entry->max_boost = max_boost;
846 TP_printk("cpu=%d variation=%d max_boost=%d",
847 __entry->cpu, __entry->variation, __entry->max_boost)
851 * Tracepoint for accounting task boosted utilization
853 TRACE_EVENT(sched_boost_task,
855 TP_PROTO(struct task_struct *tsk, unsigned long util, long margin),
857 TP_ARGS(tsk, util, margin),
860 __array( char, comm, TASK_COMM_LEN )
861 __field( pid_t, pid )
862 __field( unsigned long, util )
863 __field( long, margin )
868 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
869 __entry->pid = tsk->pid;
870 __entry->util = util;
871 __entry->margin = margin;
874 TP_printk("comm=%s pid=%d util=%lu margin=%ld",
875 __entry->comm, __entry->pid,
881 * Tracepoint for accounting sched group energy
883 TRACE_EVENT(sched_energy_diff,
885 TP_PROTO(struct task_struct *tsk, int scpu, int dcpu, int udelta,
886 int nrgb, int nrga, int nrgd, int capb, int capa, int capd,
889 TP_ARGS(tsk, scpu, dcpu, udelta,
890 nrgb, nrga, nrgd, capb, capa, capd,
894 __array( char, comm, TASK_COMM_LEN )
895 __field( pid_t, pid )
898 __field( int, udelta )
910 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
911 __entry->pid = tsk->pid;
912 __entry->scpu = scpu;
913 __entry->dcpu = dcpu;
914 __entry->udelta = udelta;
915 __entry->nrgb = nrgb;
916 __entry->nrga = nrga;
917 __entry->nrgd = nrgd;
918 __entry->capb = capb;
919 __entry->capa = capa;
920 __entry->capd = capd;
921 __entry->nrgn = nrgn;
922 __entry->nrgp = nrgp;
925 TP_printk("pid=%d comm=%s "
926 "src_cpu=%d dst_cpu=%d usage_delta=%d "
927 "nrg_before=%d nrg_after=%d nrg_diff=%d "
928 "cap_before=%d cap_after=%d cap_delta=%d "
929 "nrg_delta=%d nrg_payoff=%d",
930 __entry->pid, __entry->comm,
931 __entry->scpu, __entry->dcpu, __entry->udelta,
932 __entry->nrgb, __entry->nrga, __entry->nrgd,
933 __entry->capb, __entry->capa, __entry->capd,
934 __entry->nrgn, __entry->nrgp)
938 * Tracepoint for schedtune_tasks_update
940 TRACE_EVENT(sched_tune_filter,
942 TP_PROTO(int nrg_delta, int cap_delta,
943 int nrg_gain, int cap_gain,
944 int payoff, int region),
946 TP_ARGS(nrg_delta, cap_delta, nrg_gain, cap_gain, payoff, region),
949 __field( int, nrg_delta )
950 __field( int, cap_delta )
951 __field( int, nrg_gain )
952 __field( int, cap_gain )
953 __field( int, payoff )
954 __field( int, region )
958 __entry->nrg_delta = nrg_delta;
959 __entry->cap_delta = cap_delta;
960 __entry->nrg_gain = nrg_gain;
961 __entry->cap_gain = cap_gain;
962 __entry->payoff = payoff;
963 __entry->region = region;
966 TP_printk("nrg_delta=%d cap_delta=%d nrg_gain=%d cap_gain=%d payoff=%d region=%d",
967 __entry->nrg_delta, __entry->cap_delta,
968 __entry->nrg_gain, __entry->cap_gain,
969 __entry->payoff, __entry->region)
973 * Tracepoint for system overutilized flag
975 TRACE_EVENT(sched_overutilized,
977 TP_PROTO(bool overutilized),
979 TP_ARGS(overutilized),
982 __field( bool, overutilized )
986 __entry->overutilized = overutilized;
989 TP_printk("overutilized=%d",
990 __entry->overutilized ? 1 : 0)
992 #ifdef CONFIG_SCHED_WALT
995 TRACE_EVENT(walt_update_task_ravg,
997 TP_PROTO(struct task_struct *p, struct rq *rq, int evt,
998 u64 wallclock, u64 irqtime),
1000 TP_ARGS(p, rq, evt, wallclock, irqtime),
1003 __array( char, comm, TASK_COMM_LEN )
1004 __field( pid_t, pid )
1005 __field( pid_t, cur_pid )
1006 __field(unsigned int, cur_freq )
1007 __field( u64, wallclock )
1008 __field( u64, mark_start )
1009 __field( u64, delta_m )
1010 __field( u64, win_start )
1011 __field( u64, delta )
1012 __field( u64, irqtime )
1014 __field(unsigned int, demand )
1015 __field(unsigned int, sum )
1019 __field(unsigned long, util )
1020 __field( u32, curr_window )
1021 __field( u32, prev_window )
1022 __field( u64, nt_cs )
1023 __field( u64, nt_ps )
1024 __field( u32, active_windows )
1028 __entry->wallclock = wallclock;
1029 __entry->win_start = rq->window_start;
1030 __entry->delta = (wallclock - rq->window_start);
1032 __entry->cpu = rq->cpu;
1033 __entry->cur_pid = rq->curr->pid;
1034 __entry->cur_freq = rq->cur_freq;
1035 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
1036 __entry->pid = p->pid;
1037 __entry->mark_start = p->ravg.mark_start;
1038 __entry->delta_m = (wallclock - p->ravg.mark_start);
1039 __entry->demand = p->ravg.demand;
1040 __entry->sum = p->ravg.sum;
1041 __entry->irqtime = irqtime;
1042 __entry->cs = rq->curr_runnable_sum;
1043 __entry->ps = rq->prev_runnable_sum;
1044 __entry->util = rq->prev_runnable_sum << SCHED_LOAD_SHIFT;
1045 do_div(__entry->util, walt_ravg_window);
1046 __entry->curr_window = p->ravg.curr_window;
1047 __entry->prev_window = p->ravg.prev_window;
1048 __entry->nt_cs = rq->nt_curr_runnable_sum;
1049 __entry->nt_ps = rq->nt_prev_runnable_sum;
1050 __entry->active_windows = p->ravg.active_windows;
1053 TP_printk("wc %llu ws %llu delta %llu event %d cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u sum %u irqtime %llu"
1054 " cs %llu ps %llu util %lu cur_window %u prev_window %u active_wins %u"
1055 , __entry->wallclock, __entry->win_start, __entry->delta,
1056 __entry->evt, __entry->cpu,
1057 __entry->cur_freq, __entry->cur_pid,
1058 __entry->pid, __entry->comm, __entry->mark_start,
1059 __entry->delta_m, __entry->demand,
1060 __entry->sum, __entry->irqtime,
1061 __entry->cs, __entry->ps, __entry->util,
1062 __entry->curr_window, __entry->prev_window,
1063 __entry->active_windows
1067 TRACE_EVENT(walt_update_history,
1069 TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int samples,
1072 TP_ARGS(rq, p, runtime, samples, evt),
1075 __array( char, comm, TASK_COMM_LEN )
1076 __field( pid_t, pid )
1077 __field(unsigned int, runtime )
1078 __field( int, samples )
1080 __field( u64, demand )
1081 __field( u64, walt_avg )
1082 __field(unsigned int, pelt_avg )
1083 __array( u32, hist, RAVG_HIST_SIZE_MAX)
1088 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
1089 __entry->pid = p->pid;
1090 __entry->runtime = runtime;
1091 __entry->samples = samples;
1093 __entry->demand = p->ravg.demand;
1094 __entry->walt_avg = (__entry->demand << 10);
1095 do_div(__entry->walt_avg, walt_ravg_window);
1096 __entry->pelt_avg = p->se.avg.util_avg;
1097 memcpy(__entry->hist, p->ravg.sum_history,
1098 RAVG_HIST_SIZE_MAX * sizeof(u32));
1099 __entry->cpu = rq->cpu;
1102 TP_printk("%d (%s): runtime %u samples %d event %d demand %llu"
1103 " walt %llu pelt %u (hist: %u %u %u %u %u) cpu %d",
1104 __entry->pid, __entry->comm,
1105 __entry->runtime, __entry->samples, __entry->evt,
1109 __entry->hist[0], __entry->hist[1],
1110 __entry->hist[2], __entry->hist[3],
1111 __entry->hist[4], __entry->cpu)
1114 TRACE_EVENT(walt_migration_update_sum,
1116 TP_PROTO(struct rq *rq, struct task_struct *p),
1125 __field( s64, nt_cs )
1126 __field( s64, nt_ps )
1130 __entry->cpu = cpu_of(rq);
1131 __entry->cs = rq->curr_runnable_sum;
1132 __entry->ps = rq->prev_runnable_sum;
1133 __entry->nt_cs = (s64)rq->nt_curr_runnable_sum;
1134 __entry->nt_ps = (s64)rq->nt_prev_runnable_sum;
1135 __entry->pid = p->pid;
1138 TP_printk("cpu %d: cs %llu ps %llu nt_cs %lld nt_ps %lld pid %d",
1139 __entry->cpu, __entry->cs, __entry->ps,
1140 __entry->nt_cs, __entry->nt_ps, __entry->pid)
1142 #endif /* CONFIG_SCHED_WALT */
1144 #endif /* CONFIG_SMP */
1146 #endif /* _TRACE_SCHED_H */
1148 /* This part must be outside protection */
1149 #include <trace/define_trace.h>