2 * drivers/cpufreq/cpufreq_interactive.c
4 * Copyright (C) 2010 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * Author: Mike Chan (mike@android.com)
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/rwsem.h>
25 #include <linux/sched.h>
26 #include <linux/sched/rt.h>
27 #include <linux/tick.h>
28 #include <linux/time.h>
29 #include <linux/timer.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/slab.h>
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/cpufreq_interactive.h>
37 struct cpufreq_interactive_cpuinfo {
38 struct timer_list cpu_timer;
39 struct timer_list cpu_slack_timer;
40 spinlock_t load_lock; /* protects the next 4 fields */
42 u64 time_in_idle_timestamp;
44 u64 cputime_speedadj_timestamp;
45 struct cpufreq_policy *policy;
46 struct cpufreq_frequency_table *freq_table;
47 spinlock_t target_freq_lock; /*protects target freq */
48 unsigned int target_freq;
49 unsigned int floor_freq;
50 unsigned int max_freq;
51 u64 floor_validate_time;
52 u64 pol_hispeed_val_time; /* policy hispeed_validate_time */
53 u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */
54 struct rw_semaphore enable_sem;
58 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
60 /* realtime thread handles frequency scaling */
61 static struct task_struct *speedchange_task;
62 static cpumask_t speedchange_cpumask;
63 static spinlock_t speedchange_cpumask_lock;
64 static struct mutex gov_lock;
66 /* Target load. Lower values result in higher CPU speeds. */
67 #define DEFAULT_TARGET_LOAD 90
68 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
70 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
71 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
72 static unsigned int default_above_hispeed_delay[] = {
73 DEFAULT_ABOVE_HISPEED_DELAY };
75 struct cpufreq_interactive_tunables {
77 /* Hi speed to bump to from lo speed when load burst (default max) */
78 unsigned int hispeed_freq;
79 /* Go to hi speed when CPU load at or above this value. */
80 #define DEFAULT_GO_HISPEED_LOAD 99
81 unsigned long go_hispeed_load;
82 /* Target load. Lower values result in higher CPU speeds. */
83 spinlock_t target_loads_lock;
84 unsigned int *target_loads;
87 * The minimum amount of time to spend at a frequency before we can ramp
90 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
91 unsigned long min_sample_time;
93 * The sample rate of the timer used to increase frequency
95 unsigned long timer_rate;
97 * Wait this long before raising speed above hispeed, by default a
98 * single timer interval.
100 spinlock_t above_hispeed_delay_lock;
101 unsigned int *above_hispeed_delay;
102 int nabove_hispeed_delay;
103 /* Non-zero means indefinite speed boost active */
105 /* Duration of a boot pulse in usecs */
106 int boostpulse_duration_val;
107 /* End time of boost pulse in ktime converted to usecs */
108 u64 boostpulse_endtime;
111 * Max additional time to wait in idle, beyond timer_rate, at speeds
112 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
114 #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
119 /* For cases where we have single governor instance for system */
120 static struct cpufreq_interactive_tunables *common_tunables;
122 static struct attribute_group *get_sysfs_attr(void);
124 static void cpufreq_interactive_timer_resched(
125 struct cpufreq_interactive_cpuinfo *pcpu)
127 struct cpufreq_interactive_tunables *tunables =
128 pcpu->policy->governor_data;
129 unsigned long expires;
132 spin_lock_irqsave(&pcpu->load_lock, flags);
134 get_cpu_idle_time(smp_processor_id(),
135 &pcpu->time_in_idle_timestamp,
136 tunables->io_is_busy);
137 pcpu->cputime_speedadj = 0;
138 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
139 expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
140 mod_timer_pinned(&pcpu->cpu_timer, expires);
142 if (tunables->timer_slack_val >= 0 &&
143 pcpu->target_freq > pcpu->policy->min) {
144 expires += usecs_to_jiffies(tunables->timer_slack_val);
145 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
148 spin_unlock_irqrestore(&pcpu->load_lock, flags);
151 /* The caller shall take enable_sem write semaphore to avoid any timer race.
152 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
155 static void cpufreq_interactive_timer_start(
156 struct cpufreq_interactive_tunables *tunables, int cpu)
158 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
159 unsigned long expires = jiffies +
160 usecs_to_jiffies(tunables->timer_rate);
163 pcpu->cpu_timer.expires = expires;
164 add_timer_on(&pcpu->cpu_timer, cpu);
165 if (tunables->timer_slack_val >= 0 &&
166 pcpu->target_freq > pcpu->policy->min) {
167 expires += usecs_to_jiffies(tunables->timer_slack_val);
168 pcpu->cpu_slack_timer.expires = expires;
169 add_timer_on(&pcpu->cpu_slack_timer, cpu);
172 spin_lock_irqsave(&pcpu->load_lock, flags);
174 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
175 tunables->io_is_busy);
176 pcpu->cputime_speedadj = 0;
177 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
178 spin_unlock_irqrestore(&pcpu->load_lock, flags);
181 static unsigned int freq_to_above_hispeed_delay(
182 struct cpufreq_interactive_tunables *tunables,
189 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
191 for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
192 freq >= tunables->above_hispeed_delay[i+1]; i += 2)
195 ret = tunables->above_hispeed_delay[i];
196 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
200 static unsigned int freq_to_targetload(
201 struct cpufreq_interactive_tunables *tunables, unsigned int freq)
207 spin_lock_irqsave(&tunables->target_loads_lock, flags);
209 for (i = 0; i < tunables->ntarget_loads - 1 &&
210 freq >= tunables->target_loads[i+1]; i += 2)
213 ret = tunables->target_loads[i];
214 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
219 * If increasing frequencies never map to a lower target load then
220 * choose_freq() will find the minimum frequency that does not exceed its
221 * target load given the current load.
223 static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
224 unsigned int loadadjfreq)
226 unsigned int freq = pcpu->policy->cur;
227 unsigned int prevfreq, freqmin, freqmax;
236 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
239 * Find the lowest frequency where the computed load is less
240 * than or equal to the target load.
243 if (cpufreq_frequency_table_target(
244 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
245 CPUFREQ_RELATION_L, &index))
247 freq = pcpu->freq_table[index].frequency;
249 if (freq > prevfreq) {
250 /* The previous frequency is too low. */
253 if (freq >= freqmax) {
255 * Find the highest frequency that is less
258 if (cpufreq_frequency_table_target(
259 pcpu->policy, pcpu->freq_table,
260 freqmax - 1, CPUFREQ_RELATION_H,
263 freq = pcpu->freq_table[index].frequency;
265 if (freq == freqmin) {
267 * The first frequency below freqmax
268 * has already been found to be too
269 * low. freqmax is the lowest speed
270 * we found that is fast enough.
276 } else if (freq < prevfreq) {
277 /* The previous frequency is high enough. */
280 if (freq <= freqmin) {
282 * Find the lowest frequency that is higher
285 if (cpufreq_frequency_table_target(
286 pcpu->policy, pcpu->freq_table,
287 freqmin + 1, CPUFREQ_RELATION_L,
290 freq = pcpu->freq_table[index].frequency;
293 * If freqmax is the first frequency above
294 * freqmin then we have already found that
295 * this speed is fast enough.
302 /* If same frequency chosen as previous then done. */
303 } while (freq != prevfreq);
308 static u64 update_load(int cpu)
310 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
311 struct cpufreq_interactive_tunables *tunables =
312 pcpu->policy->governor_data;
315 unsigned int delta_idle;
316 unsigned int delta_time;
319 now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
320 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
321 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
323 if (delta_time <= delta_idle)
326 active_time = delta_time - delta_idle;
328 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
330 pcpu->time_in_idle = now_idle;
331 pcpu->time_in_idle_timestamp = now;
335 static void cpufreq_interactive_timer(unsigned long data)
338 unsigned int delta_time;
339 u64 cputime_speedadj;
341 struct cpufreq_interactive_cpuinfo *pcpu =
342 &per_cpu(cpuinfo, data);
343 struct cpufreq_interactive_tunables *tunables =
344 pcpu->policy->governor_data;
345 unsigned int new_freq;
346 unsigned int loadadjfreq;
350 if (!down_read_trylock(&pcpu->enable_sem))
352 if (!pcpu->governor_enabled)
355 spin_lock_irqsave(&pcpu->load_lock, flags);
356 now = update_load(data);
357 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
358 cputime_speedadj = pcpu->cputime_speedadj;
359 spin_unlock_irqrestore(&pcpu->load_lock, flags);
361 if (WARN_ON_ONCE(!delta_time))
364 spin_lock_irqsave(&pcpu->target_freq_lock, flags);
365 do_div(cputime_speedadj, delta_time);
366 loadadjfreq = (unsigned int)cputime_speedadj * 100;
367 cpu_load = loadadjfreq / pcpu->target_freq;
368 tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
370 if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
371 if (pcpu->policy->cur < tunables->hispeed_freq) {
372 new_freq = tunables->hispeed_freq;
374 new_freq = choose_freq(pcpu, loadadjfreq);
376 if (new_freq < tunables->hispeed_freq)
377 new_freq = tunables->hispeed_freq;
380 new_freq = choose_freq(pcpu, loadadjfreq);
381 if (new_freq > tunables->hispeed_freq &&
382 pcpu->policy->cur < tunables->hispeed_freq)
383 new_freq = tunables->hispeed_freq;
386 if (pcpu->policy->cur >= tunables->hispeed_freq &&
387 new_freq > pcpu->policy->cur &&
388 now - pcpu->pol_hispeed_val_time <
389 freq_to_above_hispeed_delay(tunables, pcpu->policy->cur)) {
390 trace_cpufreq_interactive_notyet(
391 data, cpu_load, pcpu->target_freq,
392 pcpu->policy->cur, new_freq);
393 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
397 pcpu->loc_hispeed_val_time = now;
399 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
400 new_freq, CPUFREQ_RELATION_L,
402 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
406 new_freq = pcpu->freq_table[index].frequency;
409 * Do not scale below floor_freq unless we have been at or above the
410 * floor frequency for the minimum sample time since last validated.
412 if (new_freq < pcpu->floor_freq) {
413 if (now - pcpu->floor_validate_time <
414 tunables->min_sample_time) {
415 trace_cpufreq_interactive_notyet(
416 data, cpu_load, pcpu->target_freq,
417 pcpu->policy->cur, new_freq);
418 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
424 * Update the timestamp for checking whether speed has been held at
425 * or above the selected frequency for a minimum of min_sample_time,
426 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
427 * allow the speed to drop as soon as the boostpulse duration expires
428 * (or the indefinite boost is turned off).
431 if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
432 pcpu->floor_freq = new_freq;
433 pcpu->floor_validate_time = now;
436 if (pcpu->target_freq == new_freq &&
437 pcpu->target_freq <= pcpu->policy->cur) {
438 trace_cpufreq_interactive_already(
439 data, cpu_load, pcpu->target_freq,
440 pcpu->policy->cur, new_freq);
441 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
442 goto rearm_if_notmax;
445 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
446 pcpu->policy->cur, new_freq);
448 pcpu->target_freq = new_freq;
449 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
450 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
451 cpumask_set_cpu(data, &speedchange_cpumask);
452 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
453 wake_up_process(speedchange_task);
457 * Already set max speed and don't see a need to change that,
458 * wait until next idle to re-evaluate, don't need timer.
460 if (pcpu->target_freq == pcpu->policy->max)
464 if (!timer_pending(&pcpu->cpu_timer))
465 cpufreq_interactive_timer_resched(pcpu);
468 up_read(&pcpu->enable_sem);
472 static void cpufreq_interactive_idle_start(void)
474 struct cpufreq_interactive_cpuinfo *pcpu =
475 &per_cpu(cpuinfo, smp_processor_id());
478 if (!down_read_trylock(&pcpu->enable_sem))
480 if (!pcpu->governor_enabled) {
481 up_read(&pcpu->enable_sem);
485 pending = timer_pending(&pcpu->cpu_timer);
487 if (pcpu->target_freq != pcpu->policy->min) {
489 * Entering idle while not at lowest speed. On some
490 * platforms this can hold the other CPU(s) at that speed
491 * even though the CPU is idle. Set a timer to re-evaluate
492 * speed so this idle CPU doesn't hold the other CPUs above
493 * min indefinitely. This should probably be a quirk of
494 * the CPUFreq driver.
497 cpufreq_interactive_timer_resched(pcpu);
500 up_read(&pcpu->enable_sem);
503 static void cpufreq_interactive_idle_end(void)
505 struct cpufreq_interactive_cpuinfo *pcpu =
506 &per_cpu(cpuinfo, smp_processor_id());
508 if (!down_read_trylock(&pcpu->enable_sem))
510 if (!pcpu->governor_enabled) {
511 up_read(&pcpu->enable_sem);
515 /* Arm the timer for 1-2 ticks later if not already. */
516 if (!timer_pending(&pcpu->cpu_timer)) {
517 cpufreq_interactive_timer_resched(pcpu);
518 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
519 del_timer(&pcpu->cpu_timer);
520 del_timer(&pcpu->cpu_slack_timer);
521 cpufreq_interactive_timer(smp_processor_id());
524 up_read(&pcpu->enable_sem);
527 static int cpufreq_interactive_speedchange_task(void *data)
532 struct cpufreq_interactive_cpuinfo *pcpu;
535 set_current_state(TASK_INTERRUPTIBLE);
536 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
538 if (cpumask_empty(&speedchange_cpumask)) {
539 spin_unlock_irqrestore(&speedchange_cpumask_lock,
543 if (kthread_should_stop())
546 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
549 set_current_state(TASK_RUNNING);
550 tmp_mask = speedchange_cpumask;
551 cpumask_clear(&speedchange_cpumask);
552 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
554 for_each_cpu(cpu, &tmp_mask) {
556 unsigned int max_freq = 0;
557 struct cpufreq_interactive_cpuinfo *pjcpu;
560 pcpu = &per_cpu(cpuinfo, cpu);
561 if (!down_read_trylock(&pcpu->enable_sem))
563 if (!pcpu->governor_enabled) {
564 up_read(&pcpu->enable_sem);
568 for_each_cpu(j, pcpu->policy->cpus) {
569 pjcpu = &per_cpu(cpuinfo, j);
571 if (pjcpu->target_freq > max_freq) {
572 max_freq = pjcpu->target_freq;
573 hvt = pjcpu->loc_hispeed_val_time;
574 } else if (pjcpu->target_freq == max_freq) {
575 hvt = min(hvt, pjcpu->loc_hispeed_val_time);
579 if (max_freq != pcpu->policy->cur) {
580 __cpufreq_driver_target(pcpu->policy,
583 for_each_cpu(j, pcpu->policy->cpus) {
584 pjcpu = &per_cpu(cpuinfo, j);
585 pjcpu->pol_hispeed_val_time = hvt;
588 trace_cpufreq_interactive_setspeed(cpu,
592 up_read(&pcpu->enable_sem);
599 static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
603 unsigned long flags[2];
604 struct cpufreq_interactive_cpuinfo *pcpu;
606 tunables->boosted = true;
608 spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
610 for_each_online_cpu(i) {
611 pcpu = &per_cpu(cpuinfo, i);
612 if (tunables != pcpu->policy->governor_data)
615 spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
616 if (pcpu->target_freq < tunables->hispeed_freq) {
617 pcpu->target_freq = tunables->hispeed_freq;
618 cpumask_set_cpu(i, &speedchange_cpumask);
619 pcpu->pol_hispeed_val_time =
620 ktime_to_us(ktime_get());
623 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
626 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
629 wake_up_process(speedchange_task);
632 static int cpufreq_interactive_notifier(
633 struct notifier_block *nb, unsigned long val, void *data)
635 struct cpufreq_freqs *freq = data;
636 struct cpufreq_interactive_cpuinfo *pcpu;
640 if (val == CPUFREQ_POSTCHANGE) {
641 pcpu = &per_cpu(cpuinfo, freq->cpu);
642 if (!down_read_trylock(&pcpu->enable_sem))
644 if (!pcpu->governor_enabled) {
645 up_read(&pcpu->enable_sem);
649 for_each_cpu(cpu, pcpu->policy->cpus) {
650 struct cpufreq_interactive_cpuinfo *pjcpu =
651 &per_cpu(cpuinfo, cpu);
652 if (cpu != freq->cpu) {
653 if (!down_read_trylock(&pjcpu->enable_sem))
655 if (!pjcpu->governor_enabled) {
656 up_read(&pjcpu->enable_sem);
660 spin_lock_irqsave(&pjcpu->load_lock, flags);
662 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
663 if (cpu != freq->cpu)
664 up_read(&pjcpu->enable_sem);
667 up_read(&pcpu->enable_sem);
672 static struct notifier_block cpufreq_notifier_block = {
673 .notifier_call = cpufreq_interactive_notifier,
676 static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
681 unsigned int *tokenized_data;
685 while ((cp = strpbrk(cp + 1, " :")))
688 if (!(ntokens & 0x1))
691 tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
692 if (!tokenized_data) {
699 while (i < ntokens) {
700 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
703 cp = strpbrk(cp, " :");
712 *num_tokens = ntokens;
713 return tokenized_data;
716 kfree(tokenized_data);
721 static ssize_t show_target_loads(
722 struct cpufreq_interactive_tunables *tunables,
729 spin_lock_irqsave(&tunables->target_loads_lock, flags);
731 for (i = 0; i < tunables->ntarget_loads; i++)
732 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
733 i & 0x1 ? ":" : " ");
735 sprintf(buf + ret - 1, "\n");
736 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
740 static ssize_t store_target_loads(
741 struct cpufreq_interactive_tunables *tunables,
742 const char *buf, size_t count)
745 unsigned int *new_target_loads = NULL;
748 new_target_loads = get_tokenized_data(buf, &ntokens);
749 if (IS_ERR(new_target_loads))
750 return PTR_RET(new_target_loads);
752 spin_lock_irqsave(&tunables->target_loads_lock, flags);
753 if (tunables->target_loads != default_target_loads)
754 kfree(tunables->target_loads);
755 tunables->target_loads = new_target_loads;
756 tunables->ntarget_loads = ntokens;
757 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
761 static ssize_t show_above_hispeed_delay(
762 struct cpufreq_interactive_tunables *tunables, char *buf)
768 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
770 for (i = 0; i < tunables->nabove_hispeed_delay; i++)
771 ret += sprintf(buf + ret, "%u%s",
772 tunables->above_hispeed_delay[i],
773 i & 0x1 ? ":" : " ");
775 sprintf(buf + ret - 1, "\n");
776 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
780 static ssize_t store_above_hispeed_delay(
781 struct cpufreq_interactive_tunables *tunables,
782 const char *buf, size_t count)
785 unsigned int *new_above_hispeed_delay = NULL;
788 new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
789 if (IS_ERR(new_above_hispeed_delay))
790 return PTR_RET(new_above_hispeed_delay);
792 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
793 if (tunables->above_hispeed_delay != default_above_hispeed_delay)
794 kfree(tunables->above_hispeed_delay);
795 tunables->above_hispeed_delay = new_above_hispeed_delay;
796 tunables->nabove_hispeed_delay = ntokens;
797 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
802 static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
805 return sprintf(buf, "%u\n", tunables->hispeed_freq);
808 static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
809 const char *buf, size_t count)
812 long unsigned int val;
814 ret = strict_strtoul(buf, 0, &val);
817 tunables->hispeed_freq = val;
821 static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
822 *tunables, char *buf)
824 return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
827 static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
828 *tunables, const char *buf, size_t count)
833 ret = strict_strtoul(buf, 0, &val);
836 tunables->go_hispeed_load = val;
840 static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
841 *tunables, char *buf)
843 return sprintf(buf, "%lu\n", tunables->min_sample_time);
846 static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
847 *tunables, const char *buf, size_t count)
852 ret = strict_strtoul(buf, 0, &val);
855 tunables->min_sample_time = val;
859 static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
862 return sprintf(buf, "%lu\n", tunables->timer_rate);
865 static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
866 const char *buf, size_t count)
869 unsigned long val, val_round;
871 ret = strict_strtoul(buf, 0, &val);
875 val_round = jiffies_to_usecs(usecs_to_jiffies(val));
876 if (val != val_round)
877 pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
880 tunables->timer_rate = val_round;
884 static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
887 return sprintf(buf, "%d\n", tunables->timer_slack_val);
890 static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
891 const char *buf, size_t count)
896 ret = kstrtol(buf, 10, &val);
900 tunables->timer_slack_val = val;
904 static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
907 return sprintf(buf, "%d\n", tunables->boost_val);
910 static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
911 const char *buf, size_t count)
916 ret = kstrtoul(buf, 0, &val);
920 tunables->boost_val = val;
922 if (tunables->boost_val) {
923 trace_cpufreq_interactive_boost("on");
924 if (!tunables->boosted)
925 cpufreq_interactive_boost(tunables);
927 tunables->boostpulse_endtime = ktime_to_us(ktime_get());
928 trace_cpufreq_interactive_unboost("off");
934 static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
935 const char *buf, size_t count)
940 ret = kstrtoul(buf, 0, &val);
944 tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
945 tunables->boostpulse_duration_val;
946 trace_cpufreq_interactive_boost("pulse");
947 if (!tunables->boosted)
948 cpufreq_interactive_boost(tunables);
952 static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
953 *tunables, char *buf)
955 return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
958 static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
959 *tunables, const char *buf, size_t count)
964 ret = kstrtoul(buf, 0, &val);
968 tunables->boostpulse_duration_val = val;
972 static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
975 return sprintf(buf, "%u\n", tunables->io_is_busy);
978 static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
979 const char *buf, size_t count)
984 ret = kstrtoul(buf, 0, &val);
987 tunables->io_is_busy = val;
992 * Create show/store routines
993 * - sys: One governor instance for complete SYSTEM
994 * - pol: One governor instance per struct cpufreq_policy
996 #define show_gov_pol_sys(file_name) \
997 static ssize_t show_##file_name##_gov_sys \
998 (struct kobject *kobj, struct attribute *attr, char *buf) \
1000 return show_##file_name(common_tunables, buf); \
1003 static ssize_t show_##file_name##_gov_pol \
1004 (struct cpufreq_policy *policy, char *buf) \
1006 return show_##file_name(policy->governor_data, buf); \
1009 #define store_gov_pol_sys(file_name) \
1010 static ssize_t store_##file_name##_gov_sys \
1011 (struct kobject *kobj, struct attribute *attr, const char *buf, \
1014 return store_##file_name(common_tunables, buf, count); \
1017 static ssize_t store_##file_name##_gov_pol \
1018 (struct cpufreq_policy *policy, const char *buf, size_t count) \
1020 return store_##file_name(policy->governor_data, buf, count); \
1023 #define show_store_gov_pol_sys(file_name) \
1024 show_gov_pol_sys(file_name); \
1025 store_gov_pol_sys(file_name)
1027 show_store_gov_pol_sys(target_loads);
1028 show_store_gov_pol_sys(above_hispeed_delay);
1029 show_store_gov_pol_sys(hispeed_freq);
1030 show_store_gov_pol_sys(go_hispeed_load);
1031 show_store_gov_pol_sys(min_sample_time);
1032 show_store_gov_pol_sys(timer_rate);
1033 show_store_gov_pol_sys(timer_slack);
1034 show_store_gov_pol_sys(boost);
1035 store_gov_pol_sys(boostpulse);
1036 show_store_gov_pol_sys(boostpulse_duration);
1037 show_store_gov_pol_sys(io_is_busy);
1039 #define gov_sys_attr_rw(_name) \
1040 static struct global_attr _name##_gov_sys = \
1041 __ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1043 #define gov_pol_attr_rw(_name) \
1044 static struct freq_attr _name##_gov_pol = \
1045 __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1047 #define gov_sys_pol_attr_rw(_name) \
1048 gov_sys_attr_rw(_name); \
1049 gov_pol_attr_rw(_name)
1051 gov_sys_pol_attr_rw(target_loads);
1052 gov_sys_pol_attr_rw(above_hispeed_delay);
1053 gov_sys_pol_attr_rw(hispeed_freq);
1054 gov_sys_pol_attr_rw(go_hispeed_load);
1055 gov_sys_pol_attr_rw(min_sample_time);
1056 gov_sys_pol_attr_rw(timer_rate);
1057 gov_sys_pol_attr_rw(timer_slack);
1058 gov_sys_pol_attr_rw(boost);
1059 gov_sys_pol_attr_rw(boostpulse_duration);
1060 gov_sys_pol_attr_rw(io_is_busy);
1062 static struct global_attr boostpulse_gov_sys =
1063 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1065 static struct freq_attr boostpulse_gov_pol =
1066 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1068 /* One Governor instance for entire system */
1069 static struct attribute *interactive_attributes_gov_sys[] = {
1070 &target_loads_gov_sys.attr,
1071 &above_hispeed_delay_gov_sys.attr,
1072 &hispeed_freq_gov_sys.attr,
1073 &go_hispeed_load_gov_sys.attr,
1074 &min_sample_time_gov_sys.attr,
1075 &timer_rate_gov_sys.attr,
1076 &timer_slack_gov_sys.attr,
1077 &boost_gov_sys.attr,
1078 &boostpulse_gov_sys.attr,
1079 &boostpulse_duration_gov_sys.attr,
1080 &io_is_busy_gov_sys.attr,
1084 static struct attribute_group interactive_attr_group_gov_sys = {
1085 .attrs = interactive_attributes_gov_sys,
1086 .name = "interactive",
1089 /* Per policy governor instance */
1090 static struct attribute *interactive_attributes_gov_pol[] = {
1091 &target_loads_gov_pol.attr,
1092 &above_hispeed_delay_gov_pol.attr,
1093 &hispeed_freq_gov_pol.attr,
1094 &go_hispeed_load_gov_pol.attr,
1095 &min_sample_time_gov_pol.attr,
1096 &timer_rate_gov_pol.attr,
1097 &timer_slack_gov_pol.attr,
1098 &boost_gov_pol.attr,
1099 &boostpulse_gov_pol.attr,
1100 &boostpulse_duration_gov_pol.attr,
1101 &io_is_busy_gov_pol.attr,
1105 static struct attribute_group interactive_attr_group_gov_pol = {
1106 .attrs = interactive_attributes_gov_pol,
1107 .name = "interactive",
1110 static struct attribute_group *get_sysfs_attr(void)
1112 if (have_governor_per_policy())
1113 return &interactive_attr_group_gov_pol;
1115 return &interactive_attr_group_gov_sys;
1118 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1124 cpufreq_interactive_idle_start();
1127 cpufreq_interactive_idle_end();
1134 static struct notifier_block cpufreq_interactive_idle_nb = {
1135 .notifier_call = cpufreq_interactive_idle_notifier,
1138 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1143 struct cpufreq_interactive_cpuinfo *pcpu;
1144 struct cpufreq_frequency_table *freq_table;
1145 struct cpufreq_interactive_tunables *tunables;
1146 unsigned long flags;
1148 if (have_governor_per_policy())
1149 tunables = policy->governor_data;
1151 tunables = common_tunables;
1153 WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
1156 case CPUFREQ_GOV_POLICY_INIT:
1157 if (have_governor_per_policy()) {
1159 } else if (tunables) {
1160 tunables->usage_count++;
1161 policy->governor_data = tunables;
1165 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1167 pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
1171 tunables->usage_count = 1;
1172 tunables->above_hispeed_delay = default_above_hispeed_delay;
1173 tunables->nabove_hispeed_delay =
1174 ARRAY_SIZE(default_above_hispeed_delay);
1175 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1176 tunables->target_loads = default_target_loads;
1177 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1178 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1179 tunables->timer_rate = DEFAULT_TIMER_RATE;
1180 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1181 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1183 spin_lock_init(&tunables->target_loads_lock);
1184 spin_lock_init(&tunables->above_hispeed_delay_lock);
1186 policy->governor_data = tunables;
1187 if (!have_governor_per_policy()) {
1188 common_tunables = tunables;
1189 WARN_ON(cpufreq_get_global_kobject());
1192 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1196 policy->governor_data = NULL;
1197 if (!have_governor_per_policy()) {
1198 common_tunables = NULL;
1199 cpufreq_put_global_kobject();
1204 if (!policy->governor->initialized) {
1205 idle_notifier_register(&cpufreq_interactive_idle_nb);
1206 cpufreq_register_notifier(&cpufreq_notifier_block,
1207 CPUFREQ_TRANSITION_NOTIFIER);
1212 case CPUFREQ_GOV_POLICY_EXIT:
1213 if (!--tunables->usage_count) {
1214 if (policy->governor->initialized == 1) {
1215 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1216 CPUFREQ_TRANSITION_NOTIFIER);
1217 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1220 sysfs_remove_group(get_governor_parent_kobj(policy),
1223 if (!have_governor_per_policy())
1224 cpufreq_put_global_kobject();
1227 common_tunables = NULL;
1230 policy->governor_data = NULL;
1233 case CPUFREQ_GOV_START:
1234 mutex_lock(&gov_lock);
1236 freq_table = cpufreq_frequency_get_table(policy->cpu);
1237 if (!tunables->hispeed_freq)
1238 tunables->hispeed_freq = policy->max;
1240 for_each_cpu(j, policy->cpus) {
1241 pcpu = &per_cpu(cpuinfo, j);
1242 pcpu->policy = policy;
1243 pcpu->target_freq = policy->cur;
1244 pcpu->freq_table = freq_table;
1245 pcpu->floor_freq = pcpu->target_freq;
1246 pcpu->floor_validate_time =
1247 ktime_to_us(ktime_get());
1248 pcpu->pol_hispeed_val_time =
1249 pcpu->floor_validate_time;
1250 pcpu->loc_hispeed_val_time = pcpu->floor_validate_time;
1251 pcpu->max_freq = policy->max;
1252 down_write(&pcpu->enable_sem);
1253 del_timer_sync(&pcpu->cpu_timer);
1254 del_timer_sync(&pcpu->cpu_slack_timer);
1255 cpufreq_interactive_timer_start(tunables, j);
1256 pcpu->governor_enabled = 1;
1257 up_write(&pcpu->enable_sem);
1260 mutex_unlock(&gov_lock);
1263 case CPUFREQ_GOV_STOP:
1264 mutex_lock(&gov_lock);
1265 for_each_cpu(j, policy->cpus) {
1266 pcpu = &per_cpu(cpuinfo, j);
1267 down_write(&pcpu->enable_sem);
1268 pcpu->governor_enabled = 0;
1269 del_timer_sync(&pcpu->cpu_timer);
1270 del_timer_sync(&pcpu->cpu_slack_timer);
1271 up_write(&pcpu->enable_sem);
1274 mutex_unlock(&gov_lock);
1277 case CPUFREQ_GOV_LIMITS:
1278 if (policy->max < policy->cur)
1279 __cpufreq_driver_target(policy,
1280 policy->max, CPUFREQ_RELATION_H);
1281 else if (policy->min > policy->cur)
1282 __cpufreq_driver_target(policy,
1283 policy->min, CPUFREQ_RELATION_L);
1284 for_each_cpu(j, policy->cpus) {
1285 pcpu = &per_cpu(cpuinfo, j);
1287 down_read(&pcpu->enable_sem);
1288 if (pcpu->governor_enabled == 0) {
1289 up_read(&pcpu->enable_sem);
1293 spin_lock_irqsave(&pcpu->target_freq_lock, flags);
1294 if (policy->max < pcpu->target_freq)
1295 pcpu->target_freq = policy->max;
1296 else if (policy->min > pcpu->target_freq)
1297 pcpu->target_freq = policy->min;
1299 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
1300 up_read(&pcpu->enable_sem);
1302 /* Reschedule timer only if policy->max is raised.
1303 * Delete the timers, else the timer callback may
1304 * return without re-arm the timer when failed
1305 * acquire the semaphore. This race may cause timer
1306 * stopped unexpectedly.
1309 if (policy->max > pcpu->max_freq) {
1310 down_write(&pcpu->enable_sem);
1311 del_timer_sync(&pcpu->cpu_timer);
1312 del_timer_sync(&pcpu->cpu_slack_timer);
1313 cpufreq_interactive_timer_start(tunables, j);
1314 up_write(&pcpu->enable_sem);
1317 pcpu->max_freq = policy->max;
1324 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1327 struct cpufreq_governor cpufreq_gov_interactive = {
1328 .name = "interactive",
1329 .governor = cpufreq_governor_interactive,
1330 .max_transition_latency = 10000000,
1331 .owner = THIS_MODULE,
1334 static void cpufreq_interactive_nop_timer(unsigned long data)
1338 static int __init cpufreq_interactive_init(void)
1341 struct cpufreq_interactive_cpuinfo *pcpu;
1342 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1344 /* Initalize per-cpu timers */
1345 for_each_possible_cpu(i) {
1346 pcpu = &per_cpu(cpuinfo, i);
1347 init_timer_deferrable(&pcpu->cpu_timer);
1348 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1349 pcpu->cpu_timer.data = i;
1350 init_timer(&pcpu->cpu_slack_timer);
1351 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
1352 spin_lock_init(&pcpu->load_lock);
1353 spin_lock_init(&pcpu->target_freq_lock);
1354 init_rwsem(&pcpu->enable_sem);
1357 spin_lock_init(&speedchange_cpumask_lock);
1358 mutex_init(&gov_lock);
1360 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1362 if (IS_ERR(speedchange_task))
1363 return PTR_ERR(speedchange_task);
1365 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, ¶m);
1366 get_task_struct(speedchange_task);
1368 /* NB: wake up so the thread does not look hung to the freezer */
1369 wake_up_process(speedchange_task);
1371 return cpufreq_register_governor(&cpufreq_gov_interactive);
1374 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1375 fs_initcall(cpufreq_interactive_init);
1377 module_init(cpufreq_interactive_init);
1380 static void __exit cpufreq_interactive_exit(void)
1382 cpufreq_unregister_governor(&cpufreq_gov_interactive);
1383 kthread_stop(speedchange_task);
1384 put_task_struct(speedchange_task);
1387 module_exit(cpufreq_interactive_exit);
1389 MODULE_AUTHOR("Mike Chan <mike@android.com>");
1390 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1391 "Latency sensitive workloads");
1392 MODULE_LICENSE("GPL");