ARM64: sched: cpufreq_sched: fix bug: init data before use it in thread
[firefly-linux-kernel-4.4.55.git] / kernel / sched / cpufreq_sched.c
index 58bca8d2ca653e487e8a25bef4846834133d6bfa..48053393dc722a3932a4c6435e4bf2a4e9ca1e68 100644 (file)
@@ -14,6 +14,9 @@
 #include <linux/delay.h>
 #include <linux/string.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/cpufreq_sched.h>
+
 #include "sched.h"
 
 #define THROTTLE_NSEC          50000000 /* 50ms default */
@@ -78,6 +81,7 @@ static bool finish_last_request(struct gov_data *gd)
                int usec_left = ktime_to_ns(ktime_sub(gd->throttle, now));
 
                usec_left /= NSEC_PER_USEC;
+               trace_cpufreq_sched_throttled(usec_left);
                usleep_range(usec_left, usec_left + 100);
                now = ktime_get();
                if (ktime_after(now, gd->throttle))
@@ -115,9 +119,9 @@ static int cpufreq_sched_thread(void *data)
        }
 
        do {
-               set_current_state(TASK_INTERRUPTIBLE);
                new_request = gd->requested_freq;
                if (new_request == last_request) {
+                       set_current_state(TASK_INTERRUPTIBLE);
                        schedule();
                } else {
                        /*
@@ -186,6 +190,9 @@ static void update_fdomain_capacity_request(int cpu)
                goto out;
        freq_new = policy->freq_table[index_new].frequency;
 
+       trace_cpufreq_sched_request_opp(cpu, capacity, freq_new,
+                                       gd->requested_freq);
+
        if (freq_new == gd->requested_freq)
                goto out;
 
@@ -222,6 +229,8 @@ void update_cpu_capacity_request(int cpu, bool request)
        if (new_capacity == scr->total)
                return;
 
+       trace_cpufreq_sched_update_capacity(cpu, request, scr, new_capacity);
+
        scr->total = new_capacity;
        if (request)
                update_fdomain_capacity_request(cpu);
@@ -256,6 +265,8 @@ static int cpufreq_sched_policy_init(struct cpufreq_policy *policy)
        pr_debug("%s: throttle threshold = %u [ns]\n",
                  __func__, gd->throttle_nsec);
 
+       policy->governor_data = gd;
+
        if (cpufreq_driver_is_slow()) {
                cpufreq_driver_slow = true;
                gd->task = kthread_create(cpufreq_sched_thread, policy,
@@ -272,7 +283,6 @@ static int cpufreq_sched_policy_init(struct cpufreq_policy *policy)
                init_irq_work(&gd->irq_work, cpufreq_sched_irq_work);
        }
 
-       policy->governor_data = gd;
        set_sched_freq();
 
        return 0;
@@ -318,6 +328,20 @@ static int cpufreq_sched_stop(struct cpufreq_policy *policy)
        return 0;
 }
 
+static int cpufreq_sched_limits(struct cpufreq_policy *policy)
+{
+       if (policy->max < policy->cur)
+               __cpufreq_driver_target(policy,
+                                       policy->max,
+                                       CPUFREQ_RELATION_H);
+       else if (policy->min > policy->cur)
+               __cpufreq_driver_target(policy,
+                                       policy->min,
+                                       CPUFREQ_RELATION_L);
+
+       return 0;
+}
+
 static int cpufreq_sched_setup(struct cpufreq_policy *policy,
                               unsigned int event)
 {
@@ -331,7 +355,7 @@ static int cpufreq_sched_setup(struct cpufreq_policy *policy,
        case CPUFREQ_GOV_STOP:
                return cpufreq_sched_stop(policy);
        case CPUFREQ_GOV_LIMITS:
-               break;
+               return cpufreq_sched_limits(policy);
        }
        return 0;
 }