DEBUG: sched,cpufreq: add cpu_capacity change tracepoint
authorJuri Lelli <juri.lelli@arm.com>
Thu, 30 Apr 2015 16:35:23 +0000 (17:35 +0100)
committerAmit Pundir <amit.pundir@linaro.org>
Wed, 14 Sep 2016 09:29:32 +0000 (14:59 +0530)
This is useful when we want to compare cpu utilization and
cpu curr capacity side by side.

Signed-off-by: Juri Lelli <juri.lelli@arm.com>
drivers/cpufreq/cpufreq.c
include/linux/sched.h
include/trace/events/power.h
kernel/sched/fair.c
kernel/sched/sched.h

index 2b99bc3050407f2cab10d82bffbffc19d9ea42cd..7264820e6443a65fdbd2587a5ee555a134f81cc5 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/suspend.h>
 #include <linux/syscore_ops.h>
 #include <linux/tick.h>
+#include <linux/sched.h>
 #include <trace/events/power.h>
 
 static LIST_HEAD(cpufreq_policy_list);
@@ -473,6 +474,7 @@ static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
                struct cpufreq_freqs *freqs)
 {
+       int cpu;
 
        /*
         * Catch double invocations of _begin() which lead to self-deadlock.
@@ -501,6 +503,8 @@ wait:
        spin_unlock(&policy->transition_lock);
 
        scale_freq_capacity(policy, freqs);
+       for_each_cpu(cpu, policy->cpus)
+               trace_cpu_capacity(capacity_curr_of(cpu), cpu);
 
        cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
 }
index c707c613664f1e0e2cb8f8ee5cec19d4c7316781..951422587dd91844b4a61d696d49394d9a603377 100644 (file)
@@ -1048,6 +1048,8 @@ struct sched_group_energy {
        struct capacity_state *cap_states; /* ptr to capacity state array */
 };
 
+unsigned long capacity_curr_of(int cpu);
+
 struct sched_group;
 
 struct sched_domain {
index 9af0d898016a611135753b4a14c04906bf13fc71..8924cc2b4ca8c45d867d00bbb522fa3eb37e74cd 100644 (file)
@@ -145,6 +145,13 @@ TRACE_EVENT(cpu_frequency_limits,
                  (unsigned long)__entry->cpu_id)
 );
 
+DEFINE_EVENT(cpu, cpu_capacity,
+
+       TP_PROTO(unsigned int capacity, unsigned int cpu_id),
+
+       TP_ARGS(capacity, cpu_id)
+);
+
 TRACE_EVENT(device_pm_callback_start,
 
        TP_PROTO(struct device *dev, const char *pm_ops, int event),
index 7ca5003005d87cdd14318eabb84a9eece727f2a1..7d1302d85818cbc878f6bd601b26b4ada9161b88 100644 (file)
@@ -4719,6 +4719,17 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
 
 #endif
 
+/*
+ * Returns the current capacity of cpu after applying both
+ * cpu and freq scaling.
+ */
+unsigned long capacity_curr_of(int cpu)
+{
+       return cpu_rq(cpu)->cpu_capacity_orig *
+              arch_scale_freq_capacity(NULL, cpu)
+              >> SCHED_CAPACITY_SHIFT;
+}
+
 static inline bool energy_aware(void)
 {
        return sched_feat(ENERGY_AWARE);
index 3f52226bb6f383e19f9c43ac5948faad65708a51..a537f1864dd08bad9b94563eeac44912da276f09 100644 (file)
@@ -1512,17 +1512,6 @@ static inline unsigned long cpu_util(int cpu)
        return __cpu_util(cpu, 0);
 }
 
-/*
- * Returns the current capacity of cpu after applying both
- * cpu and freq scaling.
- */
-static inline unsigned long capacity_curr_of(int cpu)
-{
-       return cpu_rq(cpu)->cpu_capacity_orig *
-              arch_scale_freq_capacity(NULL, cpu)
-              >> SCHED_CAPACITY_SHIFT;
-}
-
 #endif
 
 #ifdef CONFIG_CPU_FREQ_GOV_SCHED