2 * drivers/cpufreq/cpufreq_interactive.c
4 * Copyright (C) 2010 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * Author: Mike Chan (mike@android.com)
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/mutex.h>
25 #include <linux/sched.h>
26 #include <linux/sched/rt.h>
27 #include <linux/tick.h>
28 #include <linux/time.h>
29 #include <linux/timer.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/mutex.h>
33 #include <linux/slab.h>
34 #include <asm/cputime.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/cpufreq_interactive.h>
39 static atomic_t active_count = ATOMIC_INIT(0);
41 struct cpufreq_interactive_cpuinfo {
42 struct timer_list cpu_timer;
45 u64 time_in_idle_timestamp;
46 struct cpufreq_policy *policy;
47 struct cpufreq_frequency_table *freq_table;
48 unsigned int target_freq;
49 unsigned int floor_freq;
50 u64 floor_validate_time;
51 u64 hispeed_validate_time;
55 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
57 /* realtime thread handles frequency scaling */
58 static struct task_struct *speedchange_task;
59 static cpumask_t speedchange_cpumask;
60 static spinlock_t speedchange_cpumask_lock;
62 /* Hi speed to bump to from lo speed when load burst (default max) */
63 static unsigned int hispeed_freq;
65 /* Go to hi speed when CPU load at or above this value. */
66 #define DEFAULT_GO_HISPEED_LOAD 85
67 static unsigned long go_hispeed_load;
69 /* Target load. Lower values result in higher CPU speeds. */
70 #define DEFAULT_TARGET_LOAD 90
71 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
72 static spinlock_t target_loads_lock;
73 static unsigned int *target_loads = default_target_loads;
74 static int ntarget_loads = ARRAY_SIZE(default_target_loads);
77 * The minimum amount of time to spend at a frequency before we can ramp down.
79 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
80 static unsigned long min_sample_time;
83 * The sample rate of the timer used to increase frequency
85 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
86 static unsigned long timer_rate;
89 * Wait this long before raising speed above hispeed, by default a single
92 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
93 static unsigned long above_hispeed_delay_val;
96 * Non-zero means longer-term speed boost active.
101 static bool governidle;
102 module_param(governidle, bool, S_IWUSR | S_IRUGO);
103 MODULE_PARM_DESC(governidle,
104 "Set to 1 to wake up CPUs from idle to reduce speed (default 0)");
106 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
109 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
112 struct cpufreq_governor cpufreq_gov_interactive = {
113 .name = "interactive",
114 .governor = cpufreq_governor_interactive,
115 .max_transition_latency = 10000000,
116 .owner = THIS_MODULE,
119 static void cpufreq_interactive_timer_resched(
120 struct cpufreq_interactive_cpuinfo *pcpu)
122 mod_timer_pinned(&pcpu->cpu_timer,
123 jiffies + usecs_to_jiffies(timer_rate));
125 get_cpu_idle_time_us(smp_processor_id(),
126 &pcpu->time_in_idle_timestamp);
129 static unsigned int freq_to_targetload(unsigned int freq)
134 spin_lock(&target_loads_lock);
136 for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
139 ret = target_loads[i];
140 spin_unlock(&target_loads_lock);
145 * If increasing frequencies never map to a lower target load then
146 * choose_freq() will find the minimum frequency that does not exceed its
147 * target load given the current load.
150 static unsigned int choose_freq(
151 struct cpufreq_interactive_cpuinfo *pcpu, unsigned int curload)
153 unsigned int freq = pcpu->policy->cur;
154 unsigned int loadadjfreq = freq * curload;
155 unsigned int prevfreq, freqmin, freqmax;
164 tl = freq_to_targetload(freq);
167 * Find the lowest frequency where the computed load is less
168 * than or equal to the target load.
171 cpufreq_frequency_table_target(
172 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
173 CPUFREQ_RELATION_L, &index);
174 freq = pcpu->freq_table[index].frequency;
176 if (freq > prevfreq) {
177 /* The previous frequency is too low. */
180 if (freq >= freqmax) {
182 * Find the highest frequency that is less
185 cpufreq_frequency_table_target(
186 pcpu->policy, pcpu->freq_table,
187 freqmax - 1, CPUFREQ_RELATION_H,
189 freq = pcpu->freq_table[index].frequency;
191 if (freq == freqmin) {
193 * The first frequency below freqmax
194 * has already been found to be too
195 * low. freqmax is the lowest speed
196 * we found that is fast enough.
202 } else if (freq < prevfreq) {
203 /* The previous frequency is high enough. */
206 if (freq <= freqmin) {
208 * Find the lowest frequency that is higher
211 cpufreq_frequency_table_target(
212 pcpu->policy, pcpu->freq_table,
213 freqmin + 1, CPUFREQ_RELATION_L,
215 freq = pcpu->freq_table[index].frequency;
218 * If freqmax is the first frequency above
219 * freqmin then we have already found that
220 * this speed is fast enough.
227 /* If same frequency chosen as previous then done. */
228 } while (freq != prevfreq);
233 static void cpufreq_interactive_timer(unsigned long data)
236 unsigned int delta_idle;
237 unsigned int delta_time;
239 struct cpufreq_interactive_cpuinfo *pcpu =
240 &per_cpu(cpuinfo, data);
242 unsigned int new_freq;
248 if (!pcpu->governor_enabled)
251 now_idle = get_cpu_idle_time_us(data, &now);
252 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
253 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
256 * If timer ran less than 1ms after short-term sample started, retry.
258 if (delta_time < 1000)
261 if (delta_idle > delta_time)
264 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
266 if ((cpu_load >= go_hispeed_load || boost_val) &&
267 pcpu->target_freq < hispeed_freq)
268 new_freq = hispeed_freq;
270 new_freq = choose_freq(pcpu, cpu_load);
272 if (pcpu->target_freq >= hispeed_freq &&
273 new_freq > pcpu->target_freq &&
274 now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
275 trace_cpufreq_interactive_notyet(
276 data, cpu_load, pcpu->target_freq,
277 pcpu->policy->cur, new_freq);
281 pcpu->hispeed_validate_time = now;
283 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
284 new_freq, CPUFREQ_RELATION_L,
286 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
291 new_freq = pcpu->freq_table[index].frequency;
294 * Do not scale below floor_freq unless we have been at or above the
295 * floor frequency for the minimum sample time since last validated.
297 if (new_freq < pcpu->floor_freq) {
298 if (now - pcpu->floor_validate_time < min_sample_time) {
299 trace_cpufreq_interactive_notyet(
300 data, cpu_load, pcpu->target_freq,
301 pcpu->policy->cur, new_freq);
306 pcpu->floor_freq = new_freq;
307 pcpu->floor_validate_time = now;
309 if (pcpu->target_freq == new_freq) {
310 trace_cpufreq_interactive_already(
311 data, cpu_load, pcpu->target_freq,
312 pcpu->policy->cur, new_freq);
313 goto rearm_if_notmax;
316 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
317 pcpu->policy->cur, new_freq);
319 pcpu->target_freq = new_freq;
320 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
321 cpumask_set_cpu(data, &speedchange_cpumask);
322 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
323 wake_up_process(speedchange_task);
327 * Already set max speed and don't see a need to change that,
328 * wait until next idle to re-evaluate, don't need timer.
330 if (pcpu->target_freq == pcpu->policy->max)
334 if (!timer_pending(&pcpu->cpu_timer)) {
336 * If governing speed in idle and already at min, cancel the
337 * timer if that CPU goes idle. We don't need to re-evaluate
338 * speed until the next idle exit.
340 if (governidle && pcpu->target_freq == pcpu->policy->min)
341 pcpu->timer_idlecancel = 1;
343 cpufreq_interactive_timer_resched(pcpu);
350 static void cpufreq_interactive_idle_start(void)
352 struct cpufreq_interactive_cpuinfo *pcpu =
353 &per_cpu(cpuinfo, smp_processor_id());
356 if (!pcpu->governor_enabled)
359 pending = timer_pending(&pcpu->cpu_timer);
361 if (pcpu->target_freq != pcpu->policy->min) {
363 * Entering idle while not at lowest speed. On some
364 * platforms this can hold the other CPU(s) at that speed
365 * even though the CPU is idle. Set a timer to re-evaluate
366 * speed so this idle CPU doesn't hold the other CPUs above
367 * min indefinitely. This should probably be a quirk of
368 * the CPUFreq driver.
371 pcpu->timer_idlecancel = 0;
372 cpufreq_interactive_timer_resched(pcpu);
374 } else if (governidle) {
376 * If at min speed and entering idle after load has
377 * already been evaluated, and a timer has been set just in
378 * case the CPU suddenly goes busy, cancel that timer. The
379 * CPU didn't go busy; we'll recheck things upon idle exit.
381 if (pending && pcpu->timer_idlecancel) {
382 del_timer(&pcpu->cpu_timer);
383 pcpu->timer_idlecancel = 0;
389 static void cpufreq_interactive_idle_end(void)
391 struct cpufreq_interactive_cpuinfo *pcpu =
392 &per_cpu(cpuinfo, smp_processor_id());
394 if (!pcpu->governor_enabled)
397 /* Arm the timer for 1-2 ticks later if not already. */
398 if (!timer_pending(&pcpu->cpu_timer)) {
399 pcpu->timer_idlecancel = 0;
400 cpufreq_interactive_timer_resched(pcpu);
401 } else if (!governidle &&
402 time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
403 del_timer(&pcpu->cpu_timer);
404 cpufreq_interactive_timer(smp_processor_id());
408 static int cpufreq_interactive_speedchange_task(void *data)
413 struct cpufreq_interactive_cpuinfo *pcpu;
416 set_current_state(TASK_INTERRUPTIBLE);
417 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
419 if (cpumask_empty(&speedchange_cpumask)) {
420 spin_unlock_irqrestore(&speedchange_cpumask_lock,
424 if (kthread_should_stop())
427 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
430 set_current_state(TASK_RUNNING);
431 tmp_mask = speedchange_cpumask;
432 cpumask_clear(&speedchange_cpumask);
433 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
435 for_each_cpu(cpu, &tmp_mask) {
437 unsigned int max_freq = 0;
439 pcpu = &per_cpu(cpuinfo, cpu);
442 if (!pcpu->governor_enabled)
445 for_each_cpu(j, pcpu->policy->cpus) {
446 struct cpufreq_interactive_cpuinfo *pjcpu =
447 &per_cpu(cpuinfo, j);
449 if (pjcpu->target_freq > max_freq)
450 max_freq = pjcpu->target_freq;
453 if (max_freq != pcpu->policy->cur)
454 __cpufreq_driver_target(pcpu->policy,
457 trace_cpufreq_interactive_setspeed(cpu,
466 static void cpufreq_interactive_boost(void)
471 struct cpufreq_interactive_cpuinfo *pcpu;
473 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
475 for_each_online_cpu(i) {
476 pcpu = &per_cpu(cpuinfo, i);
478 if (pcpu->target_freq < hispeed_freq) {
479 pcpu->target_freq = hispeed_freq;
480 cpumask_set_cpu(i, &speedchange_cpumask);
481 pcpu->hispeed_validate_time =
482 ktime_to_us(ktime_get());
487 * Set floor freq and (re)start timer for when last
491 pcpu->floor_freq = hispeed_freq;
492 pcpu->floor_validate_time = ktime_to_us(ktime_get());
495 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
498 wake_up_process(speedchange_task);
501 static ssize_t show_target_loads(
502 struct kobject *kobj, struct attribute *attr, char *buf)
507 spin_lock(&target_loads_lock);
509 for (i = 0; i < ntarget_loads; i++)
510 ret += sprintf(buf + ret, "%u%s", target_loads[i],
511 i & 0x1 ? ":" : " ");
513 ret += sprintf(buf + ret, "\n");
514 spin_unlock(&target_loads_lock);
518 static ssize_t store_target_loads(
519 struct kobject *kobj, struct attribute *attr, const char *buf,
524 unsigned int *new_target_loads = NULL;
529 while ((cp = strpbrk(cp + 1, " :")))
532 if (!(ntokens & 0x1))
535 new_target_loads = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
536 if (!new_target_loads) {
543 while (i < ntokens) {
544 if (sscanf(cp, "%u", &new_target_loads[i++]) != 1)
547 cp = strpbrk(cp, " :");
556 spin_lock(&target_loads_lock);
557 if (target_loads != default_target_loads)
559 target_loads = new_target_loads;
560 ntarget_loads = ntokens;
561 spin_unlock(&target_loads_lock);
567 kfree(new_target_loads);
571 static struct global_attr target_loads_attr =
572 __ATTR(target_loads, S_IRUGO | S_IWUSR,
573 show_target_loads, store_target_loads);
575 static ssize_t show_hispeed_freq(struct kobject *kobj,
576 struct attribute *attr, char *buf)
578 return sprintf(buf, "%u\n", hispeed_freq);
581 static ssize_t store_hispeed_freq(struct kobject *kobj,
582 struct attribute *attr, const char *buf,
586 long unsigned int val;
588 ret = strict_strtoul(buf, 0, &val);
595 static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
596 show_hispeed_freq, store_hispeed_freq);
599 static ssize_t show_go_hispeed_load(struct kobject *kobj,
600 struct attribute *attr, char *buf)
602 return sprintf(buf, "%lu\n", go_hispeed_load);
605 static ssize_t store_go_hispeed_load(struct kobject *kobj,
606 struct attribute *attr, const char *buf, size_t count)
611 ret = strict_strtoul(buf, 0, &val);
614 go_hispeed_load = val;
618 static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
619 show_go_hispeed_load, store_go_hispeed_load);
621 static ssize_t show_min_sample_time(struct kobject *kobj,
622 struct attribute *attr, char *buf)
624 return sprintf(buf, "%lu\n", min_sample_time);
627 static ssize_t store_min_sample_time(struct kobject *kobj,
628 struct attribute *attr, const char *buf, size_t count)
633 ret = strict_strtoul(buf, 0, &val);
636 min_sample_time = val;
640 static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
641 show_min_sample_time, store_min_sample_time);
643 static ssize_t show_above_hispeed_delay(struct kobject *kobj,
644 struct attribute *attr, char *buf)
646 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
649 static ssize_t store_above_hispeed_delay(struct kobject *kobj,
650 struct attribute *attr,
651 const char *buf, size_t count)
656 ret = strict_strtoul(buf, 0, &val);
659 above_hispeed_delay_val = val;
663 define_one_global_rw(above_hispeed_delay);
665 static ssize_t show_timer_rate(struct kobject *kobj,
666 struct attribute *attr, char *buf)
668 return sprintf(buf, "%lu\n", timer_rate);
671 static ssize_t store_timer_rate(struct kobject *kobj,
672 struct attribute *attr, const char *buf, size_t count)
677 ret = strict_strtoul(buf, 0, &val);
684 static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
685 show_timer_rate, store_timer_rate);
687 static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
690 return sprintf(buf, "%d\n", boost_val);
693 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
694 const char *buf, size_t count)
699 ret = kstrtoul(buf, 0, &val);
706 trace_cpufreq_interactive_boost("on");
707 cpufreq_interactive_boost();
709 trace_cpufreq_interactive_unboost("off");
715 define_one_global_rw(boost);
717 static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
718 const char *buf, size_t count)
723 ret = kstrtoul(buf, 0, &val);
727 trace_cpufreq_interactive_boost("pulse");
728 cpufreq_interactive_boost();
732 static struct global_attr boostpulse =
733 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
735 static struct attribute *interactive_attributes[] = {
736 &target_loads_attr.attr,
737 &hispeed_freq_attr.attr,
738 &go_hispeed_load_attr.attr,
739 &above_hispeed_delay.attr,
740 &min_sample_time_attr.attr,
741 &timer_rate_attr.attr,
747 static struct attribute_group interactive_attr_group = {
748 .attrs = interactive_attributes,
749 .name = "interactive",
752 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
758 cpufreq_interactive_idle_start();
761 cpufreq_interactive_idle_end();
768 static struct notifier_block cpufreq_interactive_idle_nb = {
769 .notifier_call = cpufreq_interactive_idle_notifier,
772 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
777 struct cpufreq_interactive_cpuinfo *pcpu;
778 struct cpufreq_frequency_table *freq_table;
781 case CPUFREQ_GOV_START:
782 if (!cpu_online(policy->cpu))
786 cpufreq_frequency_get_table(policy->cpu);
788 hispeed_freq = policy->max;
790 for_each_cpu(j, policy->cpus) {
791 pcpu = &per_cpu(cpuinfo, j);
792 pcpu->policy = policy;
793 pcpu->target_freq = policy->cur;
794 pcpu->freq_table = freq_table;
795 pcpu->floor_freq = pcpu->target_freq;
796 pcpu->floor_validate_time =
797 ktime_to_us(ktime_get());
798 pcpu->hispeed_validate_time =
799 pcpu->floor_validate_time;
800 pcpu->governor_enabled = 1;
802 pcpu->cpu_timer.expires =
803 jiffies + usecs_to_jiffies(timer_rate);
804 add_timer_on(&pcpu->cpu_timer, j);
808 * Do not register the idle hook and create sysfs
809 * entries if we have already done so.
811 if (atomic_inc_return(&active_count) > 1)
814 rc = sysfs_create_group(cpufreq_global_kobject,
815 &interactive_attr_group);
819 idle_notifier_register(&cpufreq_interactive_idle_nb);
822 case CPUFREQ_GOV_STOP:
823 for_each_cpu(j, policy->cpus) {
824 pcpu = &per_cpu(cpuinfo, j);
825 pcpu->governor_enabled = 0;
827 del_timer_sync(&pcpu->cpu_timer);
830 if (atomic_dec_return(&active_count) > 0)
833 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
834 sysfs_remove_group(cpufreq_global_kobject,
835 &interactive_attr_group);
839 case CPUFREQ_GOV_LIMITS:
840 if (policy->max < policy->cur)
841 __cpufreq_driver_target(policy,
842 policy->max, CPUFREQ_RELATION_H);
843 else if (policy->min > policy->cur)
844 __cpufreq_driver_target(policy,
845 policy->min, CPUFREQ_RELATION_L);
851 static int __init cpufreq_interactive_init(void)
854 struct cpufreq_interactive_cpuinfo *pcpu;
855 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
857 go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
858 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
859 above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
860 timer_rate = DEFAULT_TIMER_RATE;
862 /* Initalize per-cpu timers */
863 for_each_possible_cpu(i) {
864 pcpu = &per_cpu(cpuinfo, i);
866 init_timer(&pcpu->cpu_timer);
868 init_timer_deferrable(&pcpu->cpu_timer);
869 pcpu->cpu_timer.function = cpufreq_interactive_timer;
870 pcpu->cpu_timer.data = i;
873 spin_lock_init(&target_loads_lock);
874 spin_lock_init(&speedchange_cpumask_lock);
876 kthread_create(cpufreq_interactive_speedchange_task, NULL,
878 if (IS_ERR(speedchange_task))
879 return PTR_ERR(speedchange_task);
881 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, ¶m);
882 get_task_struct(speedchange_task);
884 /* NB: wake up so the thread does not look hung to the freezer */
885 wake_up_process(speedchange_task);
887 return cpufreq_register_governor(&cpufreq_gov_interactive);
890 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
891 fs_initcall(cpufreq_interactive_init);
893 module_init(cpufreq_interactive_init);
896 static void __exit cpufreq_interactive_exit(void)
898 cpufreq_unregister_governor(&cpufreq_gov_interactive);
899 kthread_stop(speedchange_task);
900 put_task_struct(speedchange_task);
903 module_exit(cpufreq_interactive_exit);
905 MODULE_AUTHOR("Mike Chan <mike@android.com>");
906 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
907 "Latency sensitive workloads");
908 MODULE_LICENSE("GPL");