static void cpufreq_interactive_timer_resched(
struct cpufreq_interactive_cpuinfo *pcpu)
{
- unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
+ unsigned long expires;
unsigned long flags;
+ spin_lock_irqsave(&pcpu->load_lock, flags);
+ pcpu->time_in_idle =
+ get_cpu_idle_time(smp_processor_id(),
+ &pcpu->time_in_idle_timestamp);
+ pcpu->cputime_speedadj = 0;
+ pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+ expires = jiffies + usecs_to_jiffies(timer_rate);
mod_timer_pinned(&pcpu->cpu_timer, expires);
+
if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
expires += usecs_to_jiffies(timer_slack_val);
mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
}
+ spin_unlock_irqrestore(&pcpu->load_lock, flags);
+}
+
+/* The caller shall take enable_sem write semaphore to avoid any timer race.
+ * The cpu_timer and cpu_slack_timer must be deactivated when calling this
+ * function.
+ */
+static void cpufreq_interactive_timer_start(int cpu)
+{
+ struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+ unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
+ unsigned long flags;
+
+ pcpu->cpu_timer.expires = expires;
+ add_timer_on(&pcpu->cpu_timer, cpu);
+ if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
+ expires += usecs_to_jiffies(timer_slack_val);
+ pcpu->cpu_slack_timer.expires = expires;
+ add_timer_on(&pcpu->cpu_slack_timer, cpu);
+ }
+
spin_lock_irqsave(&pcpu->load_lock, flags);
pcpu->time_in_idle =
- get_cpu_idle_time(smp_processor_id(),
- &pcpu->time_in_idle_timestamp);
+ get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp);
pcpu->cputime_speedadj = 0;
pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
spin_unlock_irqrestore(&pcpu->load_lock, flags);
* than or equal to the target load.
*/
- cpufreq_frequency_table_target(
- pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
- CPUFREQ_RELATION_L, &index);
+ if (cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
+ CPUFREQ_RELATION_L, &index))
+ break;
freq = pcpu->freq_table[index].frequency;
if (freq > prevfreq) {
* Find the highest frequency that is less
* than freqmax.
*/
- cpufreq_frequency_table_target(
- pcpu->policy, pcpu->freq_table,
- freqmax - 1, CPUFREQ_RELATION_H,
- &index);
+ if (cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table,
+ freqmax - 1, CPUFREQ_RELATION_H,
+ &index))
+ break;
freq = pcpu->freq_table[index].frequency;
if (freq == freqmin) {
* Find the lowest frequency that is higher
* than freqmin.
*/
- cpufreq_frequency_table_target(
- pcpu->policy, pcpu->freq_table,
- freqmin + 1, CPUFREQ_RELATION_L,
- &index);
+ if (cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table,
+ freqmin + 1, CPUFREQ_RELATION_L,
+ &index))
+ break;
freq = pcpu->freq_table[index].frequency;
/*
now_idle = get_cpu_idle_time(cpu, &now);
delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
- active_time = delta_time - delta_idle;
+
+ if (delta_time <= delta_idle)
+ active_time = 0;
+ else
+ active_time = delta_time - delta_idle;
+
pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
pcpu->time_in_idle = now_idle;
if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
new_freq, CPUFREQ_RELATION_L,
- &index)) {
- pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
- (int) data);
+ &index))
goto rearm;
- }
new_freq = pcpu->freq_table[index].frequency;
for_each_cpu(cpu, pcpu->policy->cpus) {
struct cpufreq_interactive_cpuinfo *pjcpu =
&per_cpu(cpuinfo, cpu);
+ if (cpu != freq->cpu) {
+ if (!down_read_trylock(&pjcpu->enable_sem))
+ continue;
+ if (!pjcpu->governor_enabled) {
+ up_read(&pjcpu->enable_sem);
+ continue;
+ }
+ }
spin_lock_irqsave(&pjcpu->load_lock, flags);
update_load(cpu);
spin_unlock_irqrestore(&pjcpu->load_lock, flags);
+ if (cpu != freq->cpu)
+ up_read(&pjcpu->enable_sem);
}
up_read(&pcpu->enable_sem);
ret += sprintf(buf + ret, "%u%s", target_loads[i],
i & 0x1 ? ":" : " ");
- ret += sprintf(buf + ret, "\n");
+ ret += sprintf(buf + --ret, "\n");
spin_unlock_irqrestore(&target_loads_lock, flags);
return ret;
}
ret += sprintf(buf + ret, "%u%s", above_hispeed_delay[i],
i & 0x1 ? ":" : " ");
- ret += sprintf(buf + ret, "\n");
+ ret += sprintf(buf + --ret, "\n");
spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
return ret;
}
hispeed_freq = policy->max;
for_each_cpu(j, policy->cpus) {
- unsigned long expires;
-
pcpu = &per_cpu(cpuinfo, j);
pcpu->policy = policy;
pcpu->target_freq = policy->cur;
pcpu->hispeed_validate_time =
pcpu->floor_validate_time;
down_write(&pcpu->enable_sem);
- expires = jiffies + usecs_to_jiffies(timer_rate);
- pcpu->cpu_timer.expires = expires;
- add_timer_on(&pcpu->cpu_timer, j);
- if (timer_slack_val >= 0) {
- expires += usecs_to_jiffies(timer_slack_val);
- pcpu->cpu_slack_timer.expires = expires;
- add_timer_on(&pcpu->cpu_slack_timer, j);
- }
+ cpufreq_interactive_timer_start(j);
pcpu->governor_enabled = 1;
up_write(&pcpu->enable_sem);
}
else if (policy->min > policy->cur)
__cpufreq_driver_target(policy,
policy->min, CPUFREQ_RELATION_L);
+ for_each_cpu(j, policy->cpus) {
+ pcpu = &per_cpu(cpuinfo, j);
+
+ /* hold write semaphore to avoid race */
+ down_write(&pcpu->enable_sem);
+ if (pcpu->governor_enabled == 0) {
+ up_write(&pcpu->enable_sem);
+ continue;
+ }
+
+ /* update target_freq firstly */
+ if (policy->max < pcpu->target_freq)
+ pcpu->target_freq = policy->max;
+ else if (policy->min > pcpu->target_freq)
+ pcpu->target_freq = policy->min;
+
+ /* Reschedule timer.
+ * Delete the timers, else the timer callback may
+ * return without re-arm the timer when failed
+ * acquire the semaphore. This race may cause timer
+ * stopped unexpectedly.
+ */
+ del_timer_sync(&pcpu->cpu_timer);
+ del_timer_sync(&pcpu->cpu_slack_timer);
+ cpufreq_interactive_timer_start(j);
+ up_write(&pcpu->enable_sem);
+ }
break;
}
return 0;