DEBUG: sched: add tracepoint for task load/util signals
authorJuri Lelli <juri.lelli@arm.com>
Mon, 9 Nov 2015 12:07:27 +0000 (12:07 +0000)
committerPunit Agrawal <punit.agrawal@arm.com>
Mon, 21 Mar 2016 14:57:42 +0000 (14:57 +0000)
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
include/trace/events/sched.h
kernel/sched/fair.c

index 20a7216f8abbeabf5850c48c67d072d2b2098e65..99f3e648c197e5e528fe33d48aa12fee03a4b254 100644 (file)
@@ -586,6 +586,49 @@ TRACE_EVENT(sched_contrib_scale_f,
                  __entry->cpu, __entry->freq_scale_factor,
                  __entry->cpu_scale_factor)
 );
+
+/*
+ * Tracepoint for accounting sched averages for tasks.
+ */
+TRACE_EVENT(sched_load_avg_task,
+
+       TP_PROTO(struct task_struct *tsk, struct sched_avg *avg),
+
+       TP_ARGS(tsk, avg),
+
+       TP_STRUCT__entry(
+               __array( char,  comm,   TASK_COMM_LEN           )
+               __field( pid_t, pid                             )
+               __field( int,   cpu                             )
+               __field( unsigned long, load_avg                )
+               __field( unsigned long, util_avg                )
+               __field( u64,           load_sum                )
+               __field( u32,           util_sum                )
+               __field( u32,           period_contrib          )
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+               __entry->pid                    = tsk->pid;
+               __entry->cpu                    = task_cpu(tsk);
+               __entry->load_avg               = avg->load_avg;
+               __entry->util_avg               = avg->util_avg;
+               __entry->load_sum               = avg->load_sum;
+               __entry->util_sum               = avg->util_sum;
+               __entry->period_contrib         = avg->period_contrib;
+       ),
+
+       TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu util_avg=%lu load_sum=%llu"
+                 " util_sum=%u period_contrib=%u",
+                 __entry->comm,
+                 __entry->pid,
+                 __entry->cpu,
+                 __entry->load_avg,
+                 __entry->util_avg,
+                 (u64)__entry->load_sum,
+                 (u32)__entry->util_sum,
+                 (u32)__entry->period_contrib)
+);
 #endif /* _TRACE_SCHED_H */
 
 /* This part must be outside protection */
index ec2e8aecc4f36c9c7501b2d1c7fea7b41238ea97..8cdb4e5592f9de63247151bf5b40253d15d89679 100644 (file)
@@ -2731,6 +2731,9 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg)
 
        if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg)
                update_tg_load_avg(cfs_rq, 0);
+
+       if (entity_is_task(se))
+               trace_sched_load_avg_task(task_of(se), &se->avg);
 }
 
 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)