cpufreq: interactive: fix show_target_loads and show_above_hispeed_delay
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_interactive.c
index 1b5d9301e2d7812b41e4b85ab535a9faa2e3ad14..a66748e13b38d9577f21078dd885a515f2712758 100644 (file)
@@ -169,19 +169,47 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu,
 static void cpufreq_interactive_timer_resched(
        struct cpufreq_interactive_cpuinfo *pcpu)
 {
-       unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
+       unsigned long expires;
        unsigned long flags;
 
+       spin_lock_irqsave(&pcpu->load_lock, flags);
+       pcpu->time_in_idle =
+               get_cpu_idle_time(smp_processor_id(),
+                                    &pcpu->time_in_idle_timestamp);
+       pcpu->cputime_speedadj = 0;
+       pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+       expires = jiffies + usecs_to_jiffies(timer_rate);
        mod_timer_pinned(&pcpu->cpu_timer, expires);
+
        if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
                expires += usecs_to_jiffies(timer_slack_val);
                mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
        }
 
+       spin_unlock_irqrestore(&pcpu->load_lock, flags);
+}
+
+/* The caller shall take enable_sem write semaphore to avoid any timer race.
+ * The cpu_timer and cpu_slack_timer must be deactivated when calling this
+ * function.
+ */
+static void cpufreq_interactive_timer_start(int cpu)
+{
+       struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+       unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
+       unsigned long flags;
+
+       pcpu->cpu_timer.expires = expires;
+       add_timer_on(&pcpu->cpu_timer, cpu);
+       if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
+               expires += usecs_to_jiffies(timer_slack_val);
+               pcpu->cpu_slack_timer.expires = expires;
+               add_timer_on(&pcpu->cpu_slack_timer, cpu);
+       }
+
        spin_lock_irqsave(&pcpu->load_lock, flags);
        pcpu->time_in_idle =
-               get_cpu_idle_time(smp_processor_id(),
-                                    &pcpu->time_in_idle_timestamp);
+               get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp);
        pcpu->cputime_speedadj = 0;
        pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
        spin_unlock_irqrestore(&pcpu->load_lock, flags);
@@ -323,7 +351,12 @@ static u64 update_load(int cpu)
        now_idle = get_cpu_idle_time(cpu, &now);
        delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
        delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
-       active_time = delta_time - delta_idle;
+
+       if (delta_time <= delta_idle)
+               active_time = 0;
+       else
+               active_time = delta_time - delta_idle;
+
        pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
 
        pcpu->time_in_idle = now_idle;
@@ -627,9 +660,19 @@ static int cpufreq_interactive_notifier(
                for_each_cpu(cpu, pcpu->policy->cpus) {
                        struct cpufreq_interactive_cpuinfo *pjcpu =
                                &per_cpu(cpuinfo, cpu);
+                       if (cpu != freq->cpu) {
+                               if (!down_read_trylock(&pjcpu->enable_sem))
+                                       continue;
+                               if (!pjcpu->governor_enabled) {
+                                       up_read(&pjcpu->enable_sem);
+                                       continue;
+                               }
+                       }
                        spin_lock_irqsave(&pjcpu->load_lock, flags);
                        update_load(cpu);
                        spin_unlock_irqrestore(&pjcpu->load_lock, flags);
+                       if (cpu != freq->cpu)
+                               up_read(&pjcpu->enable_sem);
                }
 
                up_read(&pcpu->enable_sem);
@@ -699,7 +742,7 @@ static ssize_t show_target_loads(
                ret += sprintf(buf + ret, "%u%s", target_loads[i],
                               i & 0x1 ? ":" : " ");
 
-       ret += sprintf(buf + ret, "\n");
+       ret += sprintf(buf + --ret, "\n");
        spin_unlock_irqrestore(&target_loads_lock, flags);
        return ret;
 }
@@ -742,7 +785,7 @@ static ssize_t show_above_hispeed_delay(
                ret += sprintf(buf + ret, "%u%s", above_hispeed_delay[i],
                               i & 0x1 ? ":" : " ");
 
-       ret += sprintf(buf + ret, "\n");
+       ret += sprintf(buf + --ret, "\n");
        spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
        return ret;
 }
@@ -1041,8 +1084,6 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                        hispeed_freq = policy->max;
 
                for_each_cpu(j, policy->cpus) {
-                       unsigned long expires;
-
                        pcpu = &per_cpu(cpuinfo, j);
                        pcpu->policy = policy;
                        pcpu->target_freq = policy->cur;
@@ -1053,14 +1094,7 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                        pcpu->hispeed_validate_time =
                                pcpu->floor_validate_time;
                        down_write(&pcpu->enable_sem);
-                       expires = jiffies + usecs_to_jiffies(timer_rate);
-                       pcpu->cpu_timer.expires = expires;
-                       add_timer_on(&pcpu->cpu_timer, j);
-                       if (timer_slack_val >= 0) {
-                               expires += usecs_to_jiffies(timer_slack_val);
-                               pcpu->cpu_slack_timer.expires = expires;
-                               add_timer_on(&pcpu->cpu_slack_timer, j);
-                       }
+                       cpufreq_interactive_timer_start(j);
                        pcpu->governor_enabled = 1;
                        up_write(&pcpu->enable_sem);
                }
@@ -1119,6 +1153,33 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                else if (policy->min > policy->cur)
                        __cpufreq_driver_target(policy,
                                        policy->min, CPUFREQ_RELATION_L);
+               for_each_cpu(j, policy->cpus) {
+                       pcpu = &per_cpu(cpuinfo, j);
+
+                       /* hold write semaphore to avoid race */
+                       down_write(&pcpu->enable_sem);
+                       if (pcpu->governor_enabled == 0) {
+                               up_write(&pcpu->enable_sem);
+                               continue;
+                       }
+
+                       /* update target_freq firstly */
+                       if (policy->max < pcpu->target_freq)
+                               pcpu->target_freq = policy->max;
+                       else if (policy->min > pcpu->target_freq)
+                               pcpu->target_freq = policy->min;
+
+                       /* Reschedule timer.
+                        * Delete the timers, else the timer callback may
+                        * return without re-arm the timer when failed
+                        * acquire the semaphore. This race may cause timer
+                        * stopped unexpectedly.
+                        */
+                       del_timer_sync(&pcpu->cpu_timer);
+                       del_timer_sync(&pcpu->cpu_slack_timer);
+                       cpufreq_interactive_timer_start(j);
+                       up_write(&pcpu->enable_sem);
+               }
                break;
        }
        return 0;