cpufreq: interactive: fix show_target_loads and show_above_hispeed_delay
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_interactive.c
index 3dc067e6eb45b758e8f9d5bcbb3ef469afdf61ee..a66748e13b38d9577f21078dd885a515f2712758 100644 (file)
@@ -169,19 +169,47 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu,
 static void cpufreq_interactive_timer_resched(
        struct cpufreq_interactive_cpuinfo *pcpu)
 {
-       unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
+       unsigned long expires;
        unsigned long flags;
 
+       spin_lock_irqsave(&pcpu->load_lock, flags);
+       pcpu->time_in_idle =
+               get_cpu_idle_time(smp_processor_id(),
+                                    &pcpu->time_in_idle_timestamp);
+       pcpu->cputime_speedadj = 0;
+       pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+       expires = jiffies + usecs_to_jiffies(timer_rate);
        mod_timer_pinned(&pcpu->cpu_timer, expires);
+
        if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
                expires += usecs_to_jiffies(timer_slack_val);
                mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
        }
 
+       spin_unlock_irqrestore(&pcpu->load_lock, flags);
+}
+
+/* The caller shall take enable_sem write semaphore to avoid any timer race.
+ * The cpu_timer and cpu_slack_timer must be deactivated when calling this
+ * function.
+ */
+static void cpufreq_interactive_timer_start(int cpu)
+{
+       struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+       unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
+       unsigned long flags;
+
+       pcpu->cpu_timer.expires = expires;
+       add_timer_on(&pcpu->cpu_timer, cpu);
+       if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
+               expires += usecs_to_jiffies(timer_slack_val);
+               pcpu->cpu_slack_timer.expires = expires;
+               add_timer_on(&pcpu->cpu_slack_timer, cpu);
+       }
+
        spin_lock_irqsave(&pcpu->load_lock, flags);
        pcpu->time_in_idle =
-               get_cpu_idle_time(smp_processor_id(),
-                                    &pcpu->time_in_idle_timestamp);
+               get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp);
        pcpu->cputime_speedadj = 0;
        pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
        spin_unlock_irqrestore(&pcpu->load_lock, flags);
@@ -246,9 +274,10 @@ static unsigned int choose_freq(
                 * than or equal to the target load.
                 */
 
-               cpufreq_frequency_table_target(
-                       pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
-                       CPUFREQ_RELATION_L, &index);
+               if (cpufreq_frequency_table_target(
+                           pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
+                           CPUFREQ_RELATION_L, &index))
+                       break;
                freq = pcpu->freq_table[index].frequency;
 
                if (freq > prevfreq) {
@@ -260,10 +289,11 @@ static unsigned int choose_freq(
                                 * Find the highest frequency that is less
                                 * than freqmax.
                                 */
-                               cpufreq_frequency_table_target(
-                                       pcpu->policy, pcpu->freq_table,
-                                       freqmax - 1, CPUFREQ_RELATION_H,
-                                       &index);
+                               if (cpufreq_frequency_table_target(
+                                           pcpu->policy, pcpu->freq_table,
+                                           freqmax - 1, CPUFREQ_RELATION_H,
+                                           &index))
+                                       break;
                                freq = pcpu->freq_table[index].frequency;
 
                                if (freq == freqmin) {
@@ -286,10 +316,11 @@ static unsigned int choose_freq(
                                 * Find the lowest frequency that is higher
                                 * than freqmin.
                                 */
-                               cpufreq_frequency_table_target(
-                                       pcpu->policy, pcpu->freq_table,
-                                       freqmin + 1, CPUFREQ_RELATION_L,
-                                       &index);
+                               if (cpufreq_frequency_table_target(
+                                           pcpu->policy, pcpu->freq_table,
+                                           freqmin + 1, CPUFREQ_RELATION_L,
+                                           &index))
+                                       break;
                                freq = pcpu->freq_table[index].frequency;
 
                                /*
@@ -320,7 +351,12 @@ static u64 update_load(int cpu)
        now_idle = get_cpu_idle_time(cpu, &now);
        delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
        delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
-       active_time = delta_time - delta_idle;
+
+       if (delta_time <= delta_idle)
+               active_time = 0;
+       else
+               active_time = delta_time - delta_idle;
+
        pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
 
        pcpu->time_in_idle = now_idle;
@@ -377,7 +413,7 @@ static void cpufreq_interactive_timer(unsigned long data)
        if (pcpu->target_freq >= hispeed_freq &&
            new_freq > pcpu->target_freq &&
            now - pcpu->hispeed_validate_time <
-           freq_to_above_hispeed_delay(pcpu->policy->cur)) {
+           freq_to_above_hispeed_delay(pcpu->target_freq)) {
                trace_cpufreq_interactive_notyet(
                        data, cpu_load, pcpu->target_freq,
                        pcpu->policy->cur, new_freq);
@@ -388,11 +424,8 @@ static void cpufreq_interactive_timer(unsigned long data)
 
        if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
                                           new_freq, CPUFREQ_RELATION_L,
-                                          &index)) {
-               pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
-                            (int) data);
+                                          &index))
                goto rearm;
-       }
 
        new_freq = pcpu->freq_table[index].frequency;
 
@@ -627,9 +660,19 @@ static int cpufreq_interactive_notifier(
                for_each_cpu(cpu, pcpu->policy->cpus) {
                        struct cpufreq_interactive_cpuinfo *pjcpu =
                                &per_cpu(cpuinfo, cpu);
+                       if (cpu != freq->cpu) {
+                               if (!down_read_trylock(&pjcpu->enable_sem))
+                                       continue;
+                               if (!pjcpu->governor_enabled) {
+                                       up_read(&pjcpu->enable_sem);
+                                       continue;
+                               }
+                       }
                        spin_lock_irqsave(&pjcpu->load_lock, flags);
                        update_load(cpu);
                        spin_unlock_irqrestore(&pjcpu->load_lock, flags);
+                       if (cpu != freq->cpu)
+                               up_read(&pjcpu->enable_sem);
                }
 
                up_read(&pcpu->enable_sem);
@@ -699,7 +742,7 @@ static ssize_t show_target_loads(
                ret += sprintf(buf + ret, "%u%s", target_loads[i],
                               i & 0x1 ? ":" : " ");
 
-       ret += sprintf(buf + ret, "\n");
+       ret += sprintf(buf + --ret, "\n");
        spin_unlock_irqrestore(&target_loads_lock, flags);
        return ret;
 }
@@ -742,7 +785,7 @@ static ssize_t show_above_hispeed_delay(
                ret += sprintf(buf + ret, "%u%s", above_hispeed_delay[i],
                               i & 0x1 ? ":" : " ");
 
-       ret += sprintf(buf + ret, "\n");
+       ret += sprintf(buf + --ret, "\n");
        spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
        return ret;
 }
@@ -1041,8 +1084,6 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                        hispeed_freq = policy->max;
 
                for_each_cpu(j, policy->cpus) {
-                       unsigned long expires;
-
                        pcpu = &per_cpu(cpuinfo, j);
                        pcpu->policy = policy;
                        pcpu->target_freq = policy->cur;
@@ -1053,14 +1094,7 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                        pcpu->hispeed_validate_time =
                                pcpu->floor_validate_time;
                        down_write(&pcpu->enable_sem);
-                       expires = jiffies + usecs_to_jiffies(timer_rate);
-                       pcpu->cpu_timer.expires = expires;
-                       add_timer_on(&pcpu->cpu_timer, j);
-                       if (timer_slack_val >= 0) {
-                               expires += usecs_to_jiffies(timer_slack_val);
-                               pcpu->cpu_slack_timer.expires = expires;
-                               add_timer_on(&pcpu->cpu_slack_timer, j);
-                       }
+                       cpufreq_interactive_timer_start(j);
                        pcpu->governor_enabled = 1;
                        up_write(&pcpu->enable_sem);
                }
@@ -1119,6 +1153,33 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                else if (policy->min > policy->cur)
                        __cpufreq_driver_target(policy,
                                        policy->min, CPUFREQ_RELATION_L);
+               for_each_cpu(j, policy->cpus) {
+                       pcpu = &per_cpu(cpuinfo, j);
+
+                       /* hold write semaphore to avoid race */
+                       down_write(&pcpu->enable_sem);
+                       if (pcpu->governor_enabled == 0) {
+                               up_write(&pcpu->enable_sem);
+                               continue;
+                       }
+
+                       /* update target_freq firstly */
+                       if (policy->max < pcpu->target_freq)
+                               pcpu->target_freq = policy->max;
+                       else if (policy->min > pcpu->target_freq)
+                               pcpu->target_freq = policy->min;
+
+                       /* Reschedule timer.
+                        * Delete the timers, else the timer callback may
+                        * return without re-arm the timer when failed
+                        * acquire the semaphore. This race may cause timer
+                        * stopped unexpectedly.
+                        */
+                       del_timer_sync(&pcpu->cpu_timer);
+                       del_timer_sync(&pcpu->cpu_slack_timer);
+                       cpufreq_interactive_timer_start(j);
+                       up_write(&pcpu->enable_sem);
+               }
                break;
        }
        return 0;
@@ -1148,6 +1209,7 @@ static int __init cpufreq_interactive_init(void)
 
        spin_lock_init(&target_loads_lock);
        spin_lock_init(&speedchange_cpumask_lock);
+       spin_lock_init(&above_hispeed_delay_lock);
        mutex_init(&gov_lock);
        speedchange_task =
                kthread_create(cpufreq_interactive_speedchange_task, NULL,