2 * drivers/cpufreq/cpufreq_interactive.c
4 * Copyright (C) 2010 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * Author: Mike Chan (mike@android.com)
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/mutex.h>
25 #include <linux/sched.h>
26 #include <linux/sched/rt.h>
27 #include <linux/tick.h>
28 #include <linux/time.h>
29 #include <linux/timer.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/mutex.h>
33 #include <linux/slab.h>
34 #include <asm/cputime.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/cpufreq_interactive.h>
39 static atomic_t active_count = ATOMIC_INIT(0);
41 struct cpufreq_interactive_cpuinfo {
42 struct timer_list cpu_timer;
45 u64 time_in_idle_timestamp;
47 u64 target_set_time_in_idle;
48 struct cpufreq_policy *policy;
49 struct cpufreq_frequency_table *freq_table;
50 unsigned int target_freq;
51 unsigned int floor_freq;
52 u64 floor_validate_time;
53 u64 hispeed_validate_time;
57 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
59 /* realtime thread handles frequency scaling */
60 static struct task_struct *speedchange_task;
61 static cpumask_t speedchange_cpumask;
62 static spinlock_t speedchange_cpumask_lock;
64 /* Hi speed to bump to from lo speed when load burst (default max) */
65 static unsigned int hispeed_freq;
67 /* Go to hi speed when CPU load at or above this value. */
68 #define DEFAULT_GO_HISPEED_LOAD 85
69 static unsigned long go_hispeed_load;
71 /* Target load. Lower values result in higher CPU speeds. */
72 #define DEFAULT_TARGET_LOAD 90
73 static unsigned long target_load = DEFAULT_TARGET_LOAD;
76 * The minimum amount of time to spend at a frequency before we can ramp down.
78 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
79 static unsigned long min_sample_time;
82 * The sample rate of the timer used to increase frequency
84 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
85 static unsigned long timer_rate;
88 * Wait this long before raising speed above hispeed, by default a single
91 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
92 static unsigned long above_hispeed_delay_val;
95 * Non-zero means longer-term speed boost active.
100 static bool governidle;
101 module_param(governidle, bool, S_IWUSR | S_IRUGO);
102 MODULE_PARM_DESC(governidle,
103 "Set to 1 to wake up CPUs from idle to reduce speed (default 0)");
105 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
108 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
111 struct cpufreq_governor cpufreq_gov_interactive = {
112 .name = "interactive",
113 .governor = cpufreq_governor_interactive,
114 .max_transition_latency = 10000000,
115 .owner = THIS_MODULE,
118 static void cpufreq_interactive_timer_resched(
119 struct cpufreq_interactive_cpuinfo *pcpu)
121 mod_timer_pinned(&pcpu->cpu_timer,
122 jiffies + usecs_to_jiffies(timer_rate));
124 get_cpu_idle_time_us(smp_processor_id(),
125 &pcpu->time_in_idle_timestamp);
128 static void cpufreq_interactive_timer(unsigned long data)
131 unsigned int delta_idle;
132 unsigned int delta_time;
134 int load_since_change;
135 struct cpufreq_interactive_cpuinfo *pcpu =
136 &per_cpu(cpuinfo, data);
138 unsigned int new_freq;
144 if (!pcpu->governor_enabled)
147 now_idle = get_cpu_idle_time_us(data, &now);
148 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
149 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
152 * If timer ran less than 1ms after short-term sample started, retry.
154 if (delta_time < 1000)
157 if (delta_idle > delta_time)
160 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
162 delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
163 delta_time = (unsigned int)(now - pcpu->target_set_time);
165 if ((delta_time == 0) || (delta_idle > delta_time))
166 load_since_change = 0;
169 100 * (delta_time - delta_idle) / delta_time;
172 * Choose greater of short-term load (since last idle timer
173 * started or timer function re-armed itself) or long-term load
174 * (since last frequency change).
176 if (load_since_change > cpu_load)
177 cpu_load = load_since_change;
179 if ((cpu_load >= go_hispeed_load || boost_val) &&
180 pcpu->target_freq < hispeed_freq)
181 new_freq = hispeed_freq;
183 new_freq = pcpu->policy->cur * cpu_load / target_load;
185 if (pcpu->target_freq >= hispeed_freq &&
186 new_freq > pcpu->target_freq &&
187 now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
188 trace_cpufreq_interactive_notyet(
189 data, cpu_load, pcpu->target_freq,
190 pcpu->policy->cur, new_freq);
194 pcpu->hispeed_validate_time = now;
196 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
197 new_freq, CPUFREQ_RELATION_L,
199 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
204 new_freq = pcpu->freq_table[index].frequency;
207 * Do not scale below floor_freq unless we have been at or above the
208 * floor frequency for the minimum sample time since last validated.
210 if (new_freq < pcpu->floor_freq) {
211 if (now - pcpu->floor_validate_time < min_sample_time) {
212 trace_cpufreq_interactive_notyet(
213 data, cpu_load, pcpu->target_freq,
214 pcpu->policy->cur, new_freq);
219 pcpu->floor_freq = new_freq;
220 pcpu->floor_validate_time = now;
222 if (pcpu->target_freq == new_freq) {
223 trace_cpufreq_interactive_already(
224 data, cpu_load, pcpu->target_freq,
225 pcpu->policy->cur, new_freq);
226 goto rearm_if_notmax;
229 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
230 pcpu->policy->cur, new_freq);
231 pcpu->target_set_time_in_idle = now_idle;
232 pcpu->target_set_time = now;
234 pcpu->target_freq = new_freq;
235 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
236 cpumask_set_cpu(data, &speedchange_cpumask);
237 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
238 wake_up_process(speedchange_task);
242 * Already set max speed and don't see a need to change that,
243 * wait until next idle to re-evaluate, don't need timer.
245 if (pcpu->target_freq == pcpu->policy->max)
249 if (!timer_pending(&pcpu->cpu_timer)) {
251 * If governing speed in idle and already at min, cancel the
252 * timer if that CPU goes idle. We don't need to re-evaluate
253 * speed until the next idle exit.
255 if (governidle && pcpu->target_freq == pcpu->policy->min)
256 pcpu->timer_idlecancel = 1;
258 cpufreq_interactive_timer_resched(pcpu);
265 static void cpufreq_interactive_idle_start(void)
267 struct cpufreq_interactive_cpuinfo *pcpu =
268 &per_cpu(cpuinfo, smp_processor_id());
271 if (!pcpu->governor_enabled)
274 pending = timer_pending(&pcpu->cpu_timer);
276 if (pcpu->target_freq != pcpu->policy->min) {
278 * Entering idle while not at lowest speed. On some
279 * platforms this can hold the other CPU(s) at that speed
280 * even though the CPU is idle. Set a timer to re-evaluate
281 * speed so this idle CPU doesn't hold the other CPUs above
282 * min indefinitely. This should probably be a quirk of
283 * the CPUFreq driver.
286 pcpu->timer_idlecancel = 0;
287 cpufreq_interactive_timer_resched(pcpu);
289 } else if (governidle) {
291 * If at min speed and entering idle after load has
292 * already been evaluated, and a timer has been set just in
293 * case the CPU suddenly goes busy, cancel that timer. The
294 * CPU didn't go busy; we'll recheck things upon idle exit.
296 if (pending && pcpu->timer_idlecancel) {
297 del_timer(&pcpu->cpu_timer);
298 pcpu->timer_idlecancel = 0;
304 static void cpufreq_interactive_idle_end(void)
306 struct cpufreq_interactive_cpuinfo *pcpu =
307 &per_cpu(cpuinfo, smp_processor_id());
309 if (!pcpu->governor_enabled)
312 /* Arm the timer for 1-2 ticks later if not already. */
313 if (!timer_pending(&pcpu->cpu_timer)) {
314 pcpu->timer_idlecancel = 0;
315 cpufreq_interactive_timer_resched(pcpu);
316 } else if (!governidle &&
317 time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
318 del_timer(&pcpu->cpu_timer);
319 cpufreq_interactive_timer(smp_processor_id());
323 static int cpufreq_interactive_speedchange_task(void *data)
328 struct cpufreq_interactive_cpuinfo *pcpu;
331 set_current_state(TASK_INTERRUPTIBLE);
332 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
334 if (cpumask_empty(&speedchange_cpumask)) {
335 spin_unlock_irqrestore(&speedchange_cpumask_lock,
339 if (kthread_should_stop())
342 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
345 set_current_state(TASK_RUNNING);
346 tmp_mask = speedchange_cpumask;
347 cpumask_clear(&speedchange_cpumask);
348 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
350 for_each_cpu(cpu, &tmp_mask) {
352 unsigned int max_freq = 0;
354 pcpu = &per_cpu(cpuinfo, cpu);
357 if (!pcpu->governor_enabled)
360 for_each_cpu(j, pcpu->policy->cpus) {
361 struct cpufreq_interactive_cpuinfo *pjcpu =
362 &per_cpu(cpuinfo, j);
364 if (pjcpu->target_freq > max_freq)
365 max_freq = pjcpu->target_freq;
368 if (max_freq != pcpu->policy->cur)
369 __cpufreq_driver_target(pcpu->policy,
372 trace_cpufreq_interactive_setspeed(cpu,
381 static void cpufreq_interactive_boost(void)
386 struct cpufreq_interactive_cpuinfo *pcpu;
388 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
390 for_each_online_cpu(i) {
391 pcpu = &per_cpu(cpuinfo, i);
393 if (pcpu->target_freq < hispeed_freq) {
394 pcpu->target_freq = hispeed_freq;
395 cpumask_set_cpu(i, &speedchange_cpumask);
396 pcpu->target_set_time_in_idle =
397 get_cpu_idle_time_us(i, &pcpu->target_set_time);
398 pcpu->hispeed_validate_time = pcpu->target_set_time;
403 * Set floor freq and (re)start timer for when last
407 pcpu->floor_freq = hispeed_freq;
408 pcpu->floor_validate_time = ktime_to_us(ktime_get());
411 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
414 wake_up_process(speedchange_task);
417 static ssize_t show_target_load(
418 struct kobject *kobj, struct attribute *attr, char *buf)
420 return sprintf(buf, "%lu\n", target_load);
423 static ssize_t store_target_load(
424 struct kobject *kobj, struct attribute *attr, const char *buf,
430 ret = strict_strtoul(buf, 0, &val);
437 static struct global_attr target_load_attr =
438 __ATTR(target_load, S_IRUGO | S_IWUSR,
439 show_target_load, store_target_load);
441 static ssize_t show_hispeed_freq(struct kobject *kobj,
442 struct attribute *attr, char *buf)
444 return sprintf(buf, "%u\n", hispeed_freq);
447 static ssize_t store_hispeed_freq(struct kobject *kobj,
448 struct attribute *attr, const char *buf,
452 long unsigned int val;
454 ret = strict_strtoul(buf, 0, &val);
461 static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
462 show_hispeed_freq, store_hispeed_freq);
465 static ssize_t show_go_hispeed_load(struct kobject *kobj,
466 struct attribute *attr, char *buf)
468 return sprintf(buf, "%lu\n", go_hispeed_load);
471 static ssize_t store_go_hispeed_load(struct kobject *kobj,
472 struct attribute *attr, const char *buf, size_t count)
477 ret = strict_strtoul(buf, 0, &val);
480 go_hispeed_load = val;
484 static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
485 show_go_hispeed_load, store_go_hispeed_load);
487 static ssize_t show_min_sample_time(struct kobject *kobj,
488 struct attribute *attr, char *buf)
490 return sprintf(buf, "%lu\n", min_sample_time);
493 static ssize_t store_min_sample_time(struct kobject *kobj,
494 struct attribute *attr, const char *buf, size_t count)
499 ret = strict_strtoul(buf, 0, &val);
502 min_sample_time = val;
506 static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
507 show_min_sample_time, store_min_sample_time);
509 static ssize_t show_above_hispeed_delay(struct kobject *kobj,
510 struct attribute *attr, char *buf)
512 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
515 static ssize_t store_above_hispeed_delay(struct kobject *kobj,
516 struct attribute *attr,
517 const char *buf, size_t count)
522 ret = strict_strtoul(buf, 0, &val);
525 above_hispeed_delay_val = val;
529 define_one_global_rw(above_hispeed_delay);
531 static ssize_t show_timer_rate(struct kobject *kobj,
532 struct attribute *attr, char *buf)
534 return sprintf(buf, "%lu\n", timer_rate);
537 static ssize_t store_timer_rate(struct kobject *kobj,
538 struct attribute *attr, const char *buf, size_t count)
543 ret = strict_strtoul(buf, 0, &val);
550 static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
551 show_timer_rate, store_timer_rate);
553 static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
556 return sprintf(buf, "%d\n", boost_val);
559 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
560 const char *buf, size_t count)
565 ret = kstrtoul(buf, 0, &val);
572 trace_cpufreq_interactive_boost("on");
573 cpufreq_interactive_boost();
575 trace_cpufreq_interactive_unboost("off");
581 define_one_global_rw(boost);
583 static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
584 const char *buf, size_t count)
589 ret = kstrtoul(buf, 0, &val);
593 trace_cpufreq_interactive_boost("pulse");
594 cpufreq_interactive_boost();
598 static struct global_attr boostpulse =
599 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
601 static struct attribute *interactive_attributes[] = {
602 &target_load_attr.attr,
603 &hispeed_freq_attr.attr,
604 &go_hispeed_load_attr.attr,
605 &above_hispeed_delay.attr,
606 &min_sample_time_attr.attr,
607 &timer_rate_attr.attr,
613 static struct attribute_group interactive_attr_group = {
614 .attrs = interactive_attributes,
615 .name = "interactive",
618 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
624 cpufreq_interactive_idle_start();
627 cpufreq_interactive_idle_end();
634 static struct notifier_block cpufreq_interactive_idle_nb = {
635 .notifier_call = cpufreq_interactive_idle_notifier,
638 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
643 struct cpufreq_interactive_cpuinfo *pcpu;
644 struct cpufreq_frequency_table *freq_table;
647 case CPUFREQ_GOV_START:
648 if (!cpu_online(policy->cpu))
652 cpufreq_frequency_get_table(policy->cpu);
654 hispeed_freq = policy->max;
656 for_each_cpu(j, policy->cpus) {
657 pcpu = &per_cpu(cpuinfo, j);
658 pcpu->policy = policy;
659 pcpu->target_freq = policy->cur;
660 pcpu->freq_table = freq_table;
661 pcpu->target_set_time_in_idle =
662 get_cpu_idle_time_us(j,
663 &pcpu->target_set_time);
664 pcpu->floor_freq = pcpu->target_freq;
665 pcpu->floor_validate_time =
666 pcpu->target_set_time;
667 pcpu->hispeed_validate_time =
668 pcpu->target_set_time;
669 pcpu->governor_enabled = 1;
671 pcpu->cpu_timer.expires =
672 jiffies + usecs_to_jiffies(timer_rate);
673 add_timer_on(&pcpu->cpu_timer, j);
677 * Do not register the idle hook and create sysfs
678 * entries if we have already done so.
680 if (atomic_inc_return(&active_count) > 1)
683 rc = sysfs_create_group(cpufreq_global_kobject,
684 &interactive_attr_group);
688 idle_notifier_register(&cpufreq_interactive_idle_nb);
691 case CPUFREQ_GOV_STOP:
692 for_each_cpu(j, policy->cpus) {
693 pcpu = &per_cpu(cpuinfo, j);
694 pcpu->governor_enabled = 0;
696 del_timer_sync(&pcpu->cpu_timer);
699 if (atomic_dec_return(&active_count) > 0)
702 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
703 sysfs_remove_group(cpufreq_global_kobject,
704 &interactive_attr_group);
708 case CPUFREQ_GOV_LIMITS:
709 if (policy->max < policy->cur)
710 __cpufreq_driver_target(policy,
711 policy->max, CPUFREQ_RELATION_H);
712 else if (policy->min > policy->cur)
713 __cpufreq_driver_target(policy,
714 policy->min, CPUFREQ_RELATION_L);
720 static int __init cpufreq_interactive_init(void)
723 struct cpufreq_interactive_cpuinfo *pcpu;
724 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
726 go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
727 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
728 above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
729 timer_rate = DEFAULT_TIMER_RATE;
731 /* Initalize per-cpu timers */
732 for_each_possible_cpu(i) {
733 pcpu = &per_cpu(cpuinfo, i);
735 init_timer(&pcpu->cpu_timer);
737 init_timer_deferrable(&pcpu->cpu_timer);
738 pcpu->cpu_timer.function = cpufreq_interactive_timer;
739 pcpu->cpu_timer.data = i;
742 spin_lock_init(&speedchange_cpumask_lock);
744 kthread_create(cpufreq_interactive_speedchange_task, NULL,
746 if (IS_ERR(speedchange_task))
747 return PTR_ERR(speedchange_task);
749 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, ¶m);
750 get_task_struct(speedchange_task);
752 /* NB: wake up so the thread does not look hung to the freezer */
753 wake_up_process(speedchange_task);
755 return cpufreq_register_governor(&cpufreq_gov_interactive);
758 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
759 fs_initcall(cpufreq_interactive_init);
761 module_init(cpufreq_interactive_init);
764 static void __exit cpufreq_interactive_exit(void)
766 cpufreq_unregister_governor(&cpufreq_gov_interactive);
767 kthread_stop(speedchange_task);
768 put_task_struct(speedchange_task);
771 module_exit(cpufreq_interactive_exit);
773 MODULE_AUTHOR("Mike Chan <mike@android.com>");
774 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
775 "Latency sensitive workloads");
776 MODULE_LICENSE("GPL");