2 * drivers/cpufreq/cpufreq_interactive.c
4 * Copyright (C) 2010 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * Author: Mike Chan (mike@android.com)
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/rwsem.h>
25 #include <linux/sched.h>
26 #include <linux/sched/rt.h>
27 #include <linux/tick.h>
28 #include <linux/time.h>
29 #include <linux/timer.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/slab.h>
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/cpufreq_interactive.h>
37 struct cpufreq_interactive_cpuinfo {
38 struct timer_list cpu_timer;
39 struct timer_list cpu_slack_timer;
40 spinlock_t load_lock; /* protects the next 4 fields */
42 u64 time_in_idle_timestamp;
44 u64 cputime_speedadj_timestamp;
45 struct cpufreq_policy *policy;
46 struct cpufreq_frequency_table *freq_table;
47 spinlock_t target_freq_lock; /*protects target freq */
48 unsigned int target_freq;
49 unsigned int floor_freq;
50 unsigned int max_freq;
51 u64 floor_validate_time;
52 u64 hispeed_validate_time;
53 struct rw_semaphore enable_sem;
57 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
59 /* realtime thread handles frequency scaling */
60 static struct task_struct *speedchange_task;
61 static cpumask_t speedchange_cpumask;
62 static spinlock_t speedchange_cpumask_lock;
63 static struct mutex gov_lock;
65 /* Target load. Lower values result in higher CPU speeds. */
66 #define DEFAULT_TARGET_LOAD 90
67 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
69 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
70 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
71 static unsigned int default_above_hispeed_delay[] = {
72 DEFAULT_ABOVE_HISPEED_DELAY };
74 struct cpufreq_interactive_tunables {
76 /* Hi speed to bump to from lo speed when load burst (default max) */
77 unsigned int hispeed_freq;
78 /* Go to hi speed when CPU load at or above this value. */
79 #define DEFAULT_GO_HISPEED_LOAD 99
80 unsigned long go_hispeed_load;
81 /* Target load. Lower values result in higher CPU speeds. */
82 spinlock_t target_loads_lock;
83 unsigned int *target_loads;
86 * The minimum amount of time to spend at a frequency before we can ramp
89 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
90 unsigned long min_sample_time;
92 * The sample rate of the timer used to increase frequency
94 unsigned long timer_rate;
96 * Wait this long before raising speed above hispeed, by default a
97 * single timer interval.
99 spinlock_t above_hispeed_delay_lock;
100 unsigned int *above_hispeed_delay;
101 int nabove_hispeed_delay;
102 /* Non-zero means indefinite speed boost active */
104 /* Duration of a boot pulse in usecs */
105 int boostpulse_duration_val;
106 /* End time of boost pulse in ktime converted to usecs */
107 u64 boostpulse_endtime;
110 * Max additional time to wait in idle, beyond timer_rate, at speeds
111 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
113 #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
118 /* For cases where we have single governor instance for system */
119 static struct cpufreq_interactive_tunables *common_tunables;
121 static struct attribute_group *get_sysfs_attr(void);
123 static void cpufreq_interactive_timer_resched(
124 struct cpufreq_interactive_cpuinfo *pcpu)
126 struct cpufreq_interactive_tunables *tunables =
127 pcpu->policy->governor_data;
128 unsigned long expires;
131 spin_lock_irqsave(&pcpu->load_lock, flags);
133 get_cpu_idle_time(smp_processor_id(),
134 &pcpu->time_in_idle_timestamp,
135 tunables->io_is_busy);
136 pcpu->cputime_speedadj = 0;
137 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
138 expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
139 mod_timer_pinned(&pcpu->cpu_timer, expires);
141 if (tunables->timer_slack_val >= 0 &&
142 pcpu->target_freq > pcpu->policy->min) {
143 expires += usecs_to_jiffies(tunables->timer_slack_val);
144 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
147 spin_unlock_irqrestore(&pcpu->load_lock, flags);
150 /* The caller shall take enable_sem write semaphore to avoid any timer race.
151 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
154 static void cpufreq_interactive_timer_start(
155 struct cpufreq_interactive_tunables *tunables, int cpu)
157 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
158 unsigned long expires = jiffies +
159 usecs_to_jiffies(tunables->timer_rate);
162 pcpu->cpu_timer.expires = expires;
163 add_timer_on(&pcpu->cpu_timer, cpu);
164 if (tunables->timer_slack_val >= 0 &&
165 pcpu->target_freq > pcpu->policy->min) {
166 expires += usecs_to_jiffies(tunables->timer_slack_val);
167 pcpu->cpu_slack_timer.expires = expires;
168 add_timer_on(&pcpu->cpu_slack_timer, cpu);
171 spin_lock_irqsave(&pcpu->load_lock, flags);
173 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
174 tunables->io_is_busy);
175 pcpu->cputime_speedadj = 0;
176 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
177 spin_unlock_irqrestore(&pcpu->load_lock, flags);
180 static unsigned int freq_to_above_hispeed_delay(
181 struct cpufreq_interactive_tunables *tunables,
188 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
190 for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
191 freq >= tunables->above_hispeed_delay[i+1]; i += 2)
194 ret = tunables->above_hispeed_delay[i];
195 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
199 static unsigned int freq_to_targetload(
200 struct cpufreq_interactive_tunables *tunables, unsigned int freq)
206 spin_lock_irqsave(&tunables->target_loads_lock, flags);
208 for (i = 0; i < tunables->ntarget_loads - 1 &&
209 freq >= tunables->target_loads[i+1]; i += 2)
212 ret = tunables->target_loads[i];
213 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
218 * If increasing frequencies never map to a lower target load then
219 * choose_freq() will find the minimum frequency that does not exceed its
220 * target load given the current load.
222 static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
223 unsigned int loadadjfreq)
225 unsigned int freq = pcpu->policy->cur;
226 unsigned int prevfreq, freqmin, freqmax;
235 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
238 * Find the lowest frequency where the computed load is less
239 * than or equal to the target load.
242 if (cpufreq_frequency_table_target(
243 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
244 CPUFREQ_RELATION_L, &index))
246 freq = pcpu->freq_table[index].frequency;
248 if (freq > prevfreq) {
249 /* The previous frequency is too low. */
252 if (freq >= freqmax) {
254 * Find the highest frequency that is less
257 if (cpufreq_frequency_table_target(
258 pcpu->policy, pcpu->freq_table,
259 freqmax - 1, CPUFREQ_RELATION_H,
262 freq = pcpu->freq_table[index].frequency;
264 if (freq == freqmin) {
266 * The first frequency below freqmax
267 * has already been found to be too
268 * low. freqmax is the lowest speed
269 * we found that is fast enough.
275 } else if (freq < prevfreq) {
276 /* The previous frequency is high enough. */
279 if (freq <= freqmin) {
281 * Find the lowest frequency that is higher
284 if (cpufreq_frequency_table_target(
285 pcpu->policy, pcpu->freq_table,
286 freqmin + 1, CPUFREQ_RELATION_L,
289 freq = pcpu->freq_table[index].frequency;
292 * If freqmax is the first frequency above
293 * freqmin then we have already found that
294 * this speed is fast enough.
301 /* If same frequency chosen as previous then done. */
302 } while (freq != prevfreq);
307 static u64 update_load(int cpu)
309 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
310 struct cpufreq_interactive_tunables *tunables =
311 pcpu->policy->governor_data;
314 unsigned int delta_idle;
315 unsigned int delta_time;
318 now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
319 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
320 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
322 if (delta_time <= delta_idle)
325 active_time = delta_time - delta_idle;
327 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
329 pcpu->time_in_idle = now_idle;
330 pcpu->time_in_idle_timestamp = now;
334 static void cpufreq_interactive_timer(unsigned long data)
337 unsigned int delta_time;
338 u64 cputime_speedadj;
340 struct cpufreq_interactive_cpuinfo *pcpu =
341 &per_cpu(cpuinfo, data);
342 struct cpufreq_interactive_tunables *tunables =
343 pcpu->policy->governor_data;
344 unsigned int new_freq;
345 unsigned int loadadjfreq;
349 if (!down_read_trylock(&pcpu->enable_sem))
351 if (!pcpu->governor_enabled)
354 spin_lock_irqsave(&pcpu->load_lock, flags);
355 now = update_load(data);
356 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
357 cputime_speedadj = pcpu->cputime_speedadj;
358 spin_unlock_irqrestore(&pcpu->load_lock, flags);
360 if (WARN_ON_ONCE(!delta_time))
363 spin_lock_irqsave(&pcpu->target_freq_lock, flags);
364 do_div(cputime_speedadj, delta_time);
365 loadadjfreq = (unsigned int)cputime_speedadj * 100;
366 cpu_load = loadadjfreq / pcpu->target_freq;
367 tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
369 if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
370 if (pcpu->target_freq < tunables->hispeed_freq) {
371 new_freq = tunables->hispeed_freq;
373 new_freq = choose_freq(pcpu, loadadjfreq);
375 if (new_freq < tunables->hispeed_freq)
376 new_freq = tunables->hispeed_freq;
379 new_freq = choose_freq(pcpu, loadadjfreq);
380 if (new_freq > tunables->hispeed_freq &&
381 pcpu->target_freq < tunables->hispeed_freq)
382 new_freq = tunables->hispeed_freq;
385 if (pcpu->target_freq >= tunables->hispeed_freq &&
386 new_freq > pcpu->target_freq &&
387 now - pcpu->hispeed_validate_time <
388 freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
389 trace_cpufreq_interactive_notyet(
390 data, cpu_load, pcpu->target_freq,
391 pcpu->policy->cur, new_freq);
392 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
396 pcpu->hispeed_validate_time = now;
398 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
399 new_freq, CPUFREQ_RELATION_L,
401 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
405 new_freq = pcpu->freq_table[index].frequency;
408 * Do not scale below floor_freq unless we have been at or above the
409 * floor frequency for the minimum sample time since last validated.
411 if (new_freq < pcpu->floor_freq) {
412 if (now - pcpu->floor_validate_time <
413 tunables->min_sample_time) {
414 trace_cpufreq_interactive_notyet(
415 data, cpu_load, pcpu->target_freq,
416 pcpu->policy->cur, new_freq);
417 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
423 * Update the timestamp for checking whether speed has been held at
424 * or above the selected frequency for a minimum of min_sample_time,
425 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
426 * allow the speed to drop as soon as the boostpulse duration expires
427 * (or the indefinite boost is turned off).
430 if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
431 pcpu->floor_freq = new_freq;
432 pcpu->floor_validate_time = now;
435 if (pcpu->target_freq == new_freq &&
436 pcpu->target_freq <= pcpu->policy->cur) {
437 trace_cpufreq_interactive_already(
438 data, cpu_load, pcpu->target_freq,
439 pcpu->policy->cur, new_freq);
440 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
441 goto rearm_if_notmax;
444 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
445 pcpu->policy->cur, new_freq);
447 pcpu->target_freq = new_freq;
448 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
449 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
450 cpumask_set_cpu(data, &speedchange_cpumask);
451 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
452 wake_up_process(speedchange_task);
456 * Already set max speed and don't see a need to change that,
457 * wait until next idle to re-evaluate, don't need timer.
459 if (pcpu->target_freq == pcpu->policy->max)
463 if (!timer_pending(&pcpu->cpu_timer))
464 cpufreq_interactive_timer_resched(pcpu);
467 up_read(&pcpu->enable_sem);
471 static void cpufreq_interactive_idle_start(void)
473 struct cpufreq_interactive_cpuinfo *pcpu =
474 &per_cpu(cpuinfo, smp_processor_id());
477 if (!down_read_trylock(&pcpu->enable_sem))
479 if (!pcpu->governor_enabled) {
480 up_read(&pcpu->enable_sem);
484 pending = timer_pending(&pcpu->cpu_timer);
486 if (pcpu->target_freq != pcpu->policy->min) {
488 * Entering idle while not at lowest speed. On some
489 * platforms this can hold the other CPU(s) at that speed
490 * even though the CPU is idle. Set a timer to re-evaluate
491 * speed so this idle CPU doesn't hold the other CPUs above
492 * min indefinitely. This should probably be a quirk of
493 * the CPUFreq driver.
496 cpufreq_interactive_timer_resched(pcpu);
499 up_read(&pcpu->enable_sem);
502 static void cpufreq_interactive_idle_end(void)
504 struct cpufreq_interactive_cpuinfo *pcpu =
505 &per_cpu(cpuinfo, smp_processor_id());
507 if (!down_read_trylock(&pcpu->enable_sem))
509 if (!pcpu->governor_enabled) {
510 up_read(&pcpu->enable_sem);
514 /* Arm the timer for 1-2 ticks later if not already. */
515 if (!timer_pending(&pcpu->cpu_timer)) {
516 cpufreq_interactive_timer_resched(pcpu);
517 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
518 del_timer(&pcpu->cpu_timer);
519 del_timer(&pcpu->cpu_slack_timer);
520 cpufreq_interactive_timer(smp_processor_id());
523 up_read(&pcpu->enable_sem);
526 static int cpufreq_interactive_speedchange_task(void *data)
531 struct cpufreq_interactive_cpuinfo *pcpu;
534 set_current_state(TASK_INTERRUPTIBLE);
535 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
537 if (cpumask_empty(&speedchange_cpumask)) {
538 spin_unlock_irqrestore(&speedchange_cpumask_lock,
542 if (kthread_should_stop())
545 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
548 set_current_state(TASK_RUNNING);
549 tmp_mask = speedchange_cpumask;
550 cpumask_clear(&speedchange_cpumask);
551 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
553 for_each_cpu(cpu, &tmp_mask) {
555 unsigned int max_freq = 0;
557 pcpu = &per_cpu(cpuinfo, cpu);
558 if (!down_read_trylock(&pcpu->enable_sem))
560 if (!pcpu->governor_enabled) {
561 up_read(&pcpu->enable_sem);
565 for_each_cpu(j, pcpu->policy->cpus) {
566 struct cpufreq_interactive_cpuinfo *pjcpu =
567 &per_cpu(cpuinfo, j);
569 if (pjcpu->target_freq > max_freq)
570 max_freq = pjcpu->target_freq;
573 if (max_freq != pcpu->policy->cur)
574 __cpufreq_driver_target(pcpu->policy,
577 trace_cpufreq_interactive_setspeed(cpu,
581 up_read(&pcpu->enable_sem);
588 static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
592 unsigned long flags[2];
593 struct cpufreq_interactive_cpuinfo *pcpu;
595 tunables->boosted = true;
597 spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
599 for_each_online_cpu(i) {
600 pcpu = &per_cpu(cpuinfo, i);
601 if (tunables != pcpu->policy->governor_data)
604 spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
605 if (pcpu->target_freq < tunables->hispeed_freq) {
606 pcpu->target_freq = tunables->hispeed_freq;
607 cpumask_set_cpu(i, &speedchange_cpumask);
608 pcpu->hispeed_validate_time =
609 ktime_to_us(ktime_get());
614 * Set floor freq and (re)start timer for when last
618 pcpu->floor_freq = tunables->hispeed_freq;
619 pcpu->floor_validate_time = ktime_to_us(ktime_get());
620 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
623 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
626 wake_up_process(speedchange_task);
629 static int cpufreq_interactive_notifier(
630 struct notifier_block *nb, unsigned long val, void *data)
632 struct cpufreq_freqs *freq = data;
633 struct cpufreq_interactive_cpuinfo *pcpu;
637 if (val == CPUFREQ_POSTCHANGE) {
638 pcpu = &per_cpu(cpuinfo, freq->cpu);
639 if (!down_read_trylock(&pcpu->enable_sem))
641 if (!pcpu->governor_enabled) {
642 up_read(&pcpu->enable_sem);
646 for_each_cpu(cpu, pcpu->policy->cpus) {
647 struct cpufreq_interactive_cpuinfo *pjcpu =
648 &per_cpu(cpuinfo, cpu);
649 if (cpu != freq->cpu) {
650 if (!down_read_trylock(&pjcpu->enable_sem))
652 if (!pjcpu->governor_enabled) {
653 up_read(&pjcpu->enable_sem);
657 spin_lock_irqsave(&pjcpu->load_lock, flags);
659 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
660 if (cpu != freq->cpu)
661 up_read(&pjcpu->enable_sem);
664 up_read(&pcpu->enable_sem);
669 static struct notifier_block cpufreq_notifier_block = {
670 .notifier_call = cpufreq_interactive_notifier,
673 static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
678 unsigned int *tokenized_data;
682 while ((cp = strpbrk(cp + 1, " :")))
685 if (!(ntokens & 0x1))
688 tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
689 if (!tokenized_data) {
696 while (i < ntokens) {
697 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
700 cp = strpbrk(cp, " :");
709 *num_tokens = ntokens;
710 return tokenized_data;
713 kfree(tokenized_data);
718 static ssize_t show_target_loads(
719 struct cpufreq_interactive_tunables *tunables,
726 spin_lock_irqsave(&tunables->target_loads_lock, flags);
728 for (i = 0; i < tunables->ntarget_loads; i++)
729 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
730 i & 0x1 ? ":" : " ");
732 sprintf(buf + ret - 1, "\n");
733 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
737 static ssize_t store_target_loads(
738 struct cpufreq_interactive_tunables *tunables,
739 const char *buf, size_t count)
742 unsigned int *new_target_loads = NULL;
745 new_target_loads = get_tokenized_data(buf, &ntokens);
746 if (IS_ERR(new_target_loads))
747 return PTR_RET(new_target_loads);
749 spin_lock_irqsave(&tunables->target_loads_lock, flags);
750 if (tunables->target_loads != default_target_loads)
751 kfree(tunables->target_loads);
752 tunables->target_loads = new_target_loads;
753 tunables->ntarget_loads = ntokens;
754 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
758 static ssize_t show_above_hispeed_delay(
759 struct cpufreq_interactive_tunables *tunables, char *buf)
765 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
767 for (i = 0; i < tunables->nabove_hispeed_delay; i++)
768 ret += sprintf(buf + ret, "%u%s",
769 tunables->above_hispeed_delay[i],
770 i & 0x1 ? ":" : " ");
772 sprintf(buf + ret - 1, "\n");
773 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
777 static ssize_t store_above_hispeed_delay(
778 struct cpufreq_interactive_tunables *tunables,
779 const char *buf, size_t count)
782 unsigned int *new_above_hispeed_delay = NULL;
785 new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
786 if (IS_ERR(new_above_hispeed_delay))
787 return PTR_RET(new_above_hispeed_delay);
789 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
790 if (tunables->above_hispeed_delay != default_above_hispeed_delay)
791 kfree(tunables->above_hispeed_delay);
792 tunables->above_hispeed_delay = new_above_hispeed_delay;
793 tunables->nabove_hispeed_delay = ntokens;
794 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
799 static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
802 return sprintf(buf, "%u\n", tunables->hispeed_freq);
805 static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
806 const char *buf, size_t count)
809 long unsigned int val;
811 ret = strict_strtoul(buf, 0, &val);
814 tunables->hispeed_freq = val;
818 static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
819 *tunables, char *buf)
821 return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
824 static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
825 *tunables, const char *buf, size_t count)
830 ret = strict_strtoul(buf, 0, &val);
833 tunables->go_hispeed_load = val;
837 static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
838 *tunables, char *buf)
840 return sprintf(buf, "%lu\n", tunables->min_sample_time);
843 static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
844 *tunables, const char *buf, size_t count)
849 ret = strict_strtoul(buf, 0, &val);
852 tunables->min_sample_time = val;
856 static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
859 return sprintf(buf, "%lu\n", tunables->timer_rate);
862 static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
863 const char *buf, size_t count)
868 ret = strict_strtoul(buf, 0, &val);
871 tunables->timer_rate = val;
875 static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
878 return sprintf(buf, "%d\n", tunables->timer_slack_val);
881 static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
882 const char *buf, size_t count)
887 ret = kstrtol(buf, 10, &val);
891 tunables->timer_slack_val = val;
895 static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
898 return sprintf(buf, "%d\n", tunables->boost_val);
901 static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
902 const char *buf, size_t count)
907 ret = kstrtoul(buf, 0, &val);
911 tunables->boost_val = val;
913 if (tunables->boost_val) {
914 trace_cpufreq_interactive_boost("on");
915 if (!tunables->boosted)
916 cpufreq_interactive_boost(tunables);
918 tunables->boostpulse_endtime = ktime_to_us(ktime_get());
919 trace_cpufreq_interactive_unboost("off");
925 static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
926 const char *buf, size_t count)
931 ret = kstrtoul(buf, 0, &val);
935 tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
936 tunables->boostpulse_duration_val;
937 trace_cpufreq_interactive_boost("pulse");
938 if (!tunables->boosted)
939 cpufreq_interactive_boost(tunables);
943 static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
944 *tunables, char *buf)
946 return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
949 static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
950 *tunables, const char *buf, size_t count)
955 ret = kstrtoul(buf, 0, &val);
959 tunables->boostpulse_duration_val = val;
963 static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
966 return sprintf(buf, "%u\n", tunables->io_is_busy);
969 static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
970 const char *buf, size_t count)
975 ret = kstrtoul(buf, 0, &val);
978 tunables->io_is_busy = val;
983 * Create show/store routines
984 * - sys: One governor instance for complete SYSTEM
985 * - pol: One governor instance per struct cpufreq_policy
987 #define show_gov_pol_sys(file_name) \
988 static ssize_t show_##file_name##_gov_sys \
989 (struct kobject *kobj, struct attribute *attr, char *buf) \
991 return show_##file_name(common_tunables, buf); \
994 static ssize_t show_##file_name##_gov_pol \
995 (struct cpufreq_policy *policy, char *buf) \
997 return show_##file_name(policy->governor_data, buf); \
1000 #define store_gov_pol_sys(file_name) \
1001 static ssize_t store_##file_name##_gov_sys \
1002 (struct kobject *kobj, struct attribute *attr, const char *buf, \
1005 return store_##file_name(common_tunables, buf, count); \
1008 static ssize_t store_##file_name##_gov_pol \
1009 (struct cpufreq_policy *policy, const char *buf, size_t count) \
1011 return store_##file_name(policy->governor_data, buf, count); \
1014 #define show_store_gov_pol_sys(file_name) \
1015 show_gov_pol_sys(file_name); \
1016 store_gov_pol_sys(file_name)
1018 show_store_gov_pol_sys(target_loads);
1019 show_store_gov_pol_sys(above_hispeed_delay);
1020 show_store_gov_pol_sys(hispeed_freq);
1021 show_store_gov_pol_sys(go_hispeed_load);
1022 show_store_gov_pol_sys(min_sample_time);
1023 show_store_gov_pol_sys(timer_rate);
1024 show_store_gov_pol_sys(timer_slack);
1025 show_store_gov_pol_sys(boost);
1026 store_gov_pol_sys(boostpulse);
1027 show_store_gov_pol_sys(boostpulse_duration);
1028 show_store_gov_pol_sys(io_is_busy);
1030 #define gov_sys_attr_rw(_name) \
1031 static struct global_attr _name##_gov_sys = \
1032 __ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1034 #define gov_pol_attr_rw(_name) \
1035 static struct freq_attr _name##_gov_pol = \
1036 __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1038 #define gov_sys_pol_attr_rw(_name) \
1039 gov_sys_attr_rw(_name); \
1040 gov_pol_attr_rw(_name)
1042 gov_sys_pol_attr_rw(target_loads);
1043 gov_sys_pol_attr_rw(above_hispeed_delay);
1044 gov_sys_pol_attr_rw(hispeed_freq);
1045 gov_sys_pol_attr_rw(go_hispeed_load);
1046 gov_sys_pol_attr_rw(min_sample_time);
1047 gov_sys_pol_attr_rw(timer_rate);
1048 gov_sys_pol_attr_rw(timer_slack);
1049 gov_sys_pol_attr_rw(boost);
1050 gov_sys_pol_attr_rw(boostpulse_duration);
1051 gov_sys_pol_attr_rw(io_is_busy);
1053 static struct global_attr boostpulse_gov_sys =
1054 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1056 static struct freq_attr boostpulse_gov_pol =
1057 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1059 /* One Governor instance for entire system */
1060 static struct attribute *interactive_attributes_gov_sys[] = {
1061 &target_loads_gov_sys.attr,
1062 &above_hispeed_delay_gov_sys.attr,
1063 &hispeed_freq_gov_sys.attr,
1064 &go_hispeed_load_gov_sys.attr,
1065 &min_sample_time_gov_sys.attr,
1066 &timer_rate_gov_sys.attr,
1067 &timer_slack_gov_sys.attr,
1068 &boost_gov_sys.attr,
1069 &boostpulse_gov_sys.attr,
1070 &boostpulse_duration_gov_sys.attr,
1071 &io_is_busy_gov_sys.attr,
1075 static struct attribute_group interactive_attr_group_gov_sys = {
1076 .attrs = interactive_attributes_gov_sys,
1077 .name = "interactive",
1080 /* Per policy governor instance */
1081 static struct attribute *interactive_attributes_gov_pol[] = {
1082 &target_loads_gov_pol.attr,
1083 &above_hispeed_delay_gov_pol.attr,
1084 &hispeed_freq_gov_pol.attr,
1085 &go_hispeed_load_gov_pol.attr,
1086 &min_sample_time_gov_pol.attr,
1087 &timer_rate_gov_pol.attr,
1088 &timer_slack_gov_pol.attr,
1089 &boost_gov_pol.attr,
1090 &boostpulse_gov_pol.attr,
1091 &boostpulse_duration_gov_pol.attr,
1092 &io_is_busy_gov_pol.attr,
1096 static struct attribute_group interactive_attr_group_gov_pol = {
1097 .attrs = interactive_attributes_gov_pol,
1098 .name = "interactive",
1101 static struct attribute_group *get_sysfs_attr(void)
1103 if (have_governor_per_policy())
1104 return &interactive_attr_group_gov_pol;
1106 return &interactive_attr_group_gov_sys;
1109 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1115 cpufreq_interactive_idle_start();
1118 cpufreq_interactive_idle_end();
1125 static struct notifier_block cpufreq_interactive_idle_nb = {
1126 .notifier_call = cpufreq_interactive_idle_notifier,
1129 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1134 struct cpufreq_interactive_cpuinfo *pcpu;
1135 struct cpufreq_frequency_table *freq_table;
1136 struct cpufreq_interactive_tunables *tunables;
1137 unsigned long flags;
1139 if (have_governor_per_policy())
1140 tunables = policy->governor_data;
1142 tunables = common_tunables;
1144 WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
1147 case CPUFREQ_GOV_POLICY_INIT:
1148 if (have_governor_per_policy()) {
1150 } else if (tunables) {
1151 tunables->usage_count++;
1152 policy->governor_data = tunables;
1156 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1158 pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
1162 tunables->usage_count = 1;
1163 tunables->above_hispeed_delay = default_above_hispeed_delay;
1164 tunables->nabove_hispeed_delay =
1165 ARRAY_SIZE(default_above_hispeed_delay);
1166 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1167 tunables->target_loads = default_target_loads;
1168 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1169 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1170 tunables->timer_rate = DEFAULT_TIMER_RATE;
1171 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1172 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1174 spin_lock_init(&tunables->target_loads_lock);
1175 spin_lock_init(&tunables->above_hispeed_delay_lock);
1177 policy->governor_data = tunables;
1178 if (!have_governor_per_policy()) {
1179 common_tunables = tunables;
1180 WARN_ON(cpufreq_get_global_kobject());
1183 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1187 policy->governor_data = NULL;
1188 if (!have_governor_per_policy())
1189 common_tunables = NULL;
1193 if (!policy->governor->initialized) {
1194 idle_notifier_register(&cpufreq_interactive_idle_nb);
1195 cpufreq_register_notifier(&cpufreq_notifier_block,
1196 CPUFREQ_TRANSITION_NOTIFIER);
1201 case CPUFREQ_GOV_POLICY_EXIT:
1202 if (!--tunables->usage_count) {
1203 if (policy->governor->initialized == 1) {
1204 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1205 CPUFREQ_TRANSITION_NOTIFIER);
1206 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1209 sysfs_remove_group(get_governor_parent_kobj(policy),
1212 if (!have_governor_per_policy())
1213 cpufreq_put_global_kobject();
1216 common_tunables = NULL;
1219 policy->governor_data = NULL;
1222 case CPUFREQ_GOV_START:
1223 mutex_lock(&gov_lock);
1225 freq_table = cpufreq_frequency_get_table(policy->cpu);
1226 if (!tunables->hispeed_freq)
1227 tunables->hispeed_freq = policy->max;
1229 for_each_cpu(j, policy->cpus) {
1230 pcpu = &per_cpu(cpuinfo, j);
1231 pcpu->policy = policy;
1232 pcpu->target_freq = policy->cur;
1233 pcpu->freq_table = freq_table;
1234 pcpu->floor_freq = pcpu->target_freq;
1235 pcpu->floor_validate_time =
1236 ktime_to_us(ktime_get());
1237 pcpu->hispeed_validate_time =
1238 pcpu->floor_validate_time;
1239 pcpu->max_freq = policy->max;
1240 down_write(&pcpu->enable_sem);
1241 del_timer_sync(&pcpu->cpu_timer);
1242 del_timer_sync(&pcpu->cpu_slack_timer);
1243 cpufreq_interactive_timer_start(tunables, j);
1244 pcpu->governor_enabled = 1;
1245 up_write(&pcpu->enable_sem);
1248 mutex_unlock(&gov_lock);
1251 case CPUFREQ_GOV_STOP:
1252 mutex_lock(&gov_lock);
1253 for_each_cpu(j, policy->cpus) {
1254 pcpu = &per_cpu(cpuinfo, j);
1255 down_write(&pcpu->enable_sem);
1256 pcpu->governor_enabled = 0;
1257 del_timer_sync(&pcpu->cpu_timer);
1258 del_timer_sync(&pcpu->cpu_slack_timer);
1259 up_write(&pcpu->enable_sem);
1262 mutex_unlock(&gov_lock);
1265 case CPUFREQ_GOV_LIMITS:
1266 if (policy->max < policy->cur)
1267 __cpufreq_driver_target(policy,
1268 policy->max, CPUFREQ_RELATION_H);
1269 else if (policy->min > policy->cur)
1270 __cpufreq_driver_target(policy,
1271 policy->min, CPUFREQ_RELATION_L);
1272 for_each_cpu(j, policy->cpus) {
1273 pcpu = &per_cpu(cpuinfo, j);
1275 down_read(&pcpu->enable_sem);
1276 if (pcpu->governor_enabled == 0) {
1277 up_read(&pcpu->enable_sem);
1281 spin_lock_irqsave(&pcpu->target_freq_lock, flags);
1282 if (policy->max < pcpu->target_freq)
1283 pcpu->target_freq = policy->max;
1284 else if (policy->min > pcpu->target_freq)
1285 pcpu->target_freq = policy->min;
1287 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
1288 up_read(&pcpu->enable_sem);
1290 /* Reschedule timer only if policy->max is raised.
1291 * Delete the timers, else the timer callback may
1292 * return without re-arm the timer when failed
1293 * acquire the semaphore. This race may cause timer
1294 * stopped unexpectedly.
1297 if (policy->max > pcpu->max_freq) {
1298 down_write(&pcpu->enable_sem);
1299 del_timer_sync(&pcpu->cpu_timer);
1300 del_timer_sync(&pcpu->cpu_slack_timer);
1301 cpufreq_interactive_timer_start(tunables, j);
1302 up_write(&pcpu->enable_sem);
1305 pcpu->max_freq = policy->max;
1312 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1315 struct cpufreq_governor cpufreq_gov_interactive = {
1316 .name = "interactive",
1317 .governor = cpufreq_governor_interactive,
1318 .max_transition_latency = 10000000,
1319 .owner = THIS_MODULE,
1322 static void cpufreq_interactive_nop_timer(unsigned long data)
1326 static int __init cpufreq_interactive_init(void)
1329 struct cpufreq_interactive_cpuinfo *pcpu;
1330 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1332 /* Initalize per-cpu timers */
1333 for_each_possible_cpu(i) {
1334 pcpu = &per_cpu(cpuinfo, i);
1335 init_timer_deferrable(&pcpu->cpu_timer);
1336 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1337 pcpu->cpu_timer.data = i;
1338 init_timer(&pcpu->cpu_slack_timer);
1339 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
1340 spin_lock_init(&pcpu->load_lock);
1341 spin_lock_init(&pcpu->target_freq_lock);
1342 init_rwsem(&pcpu->enable_sem);
1345 spin_lock_init(&speedchange_cpumask_lock);
1346 mutex_init(&gov_lock);
1348 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1350 if (IS_ERR(speedchange_task))
1351 return PTR_ERR(speedchange_task);
1353 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, ¶m);
1354 get_task_struct(speedchange_task);
1356 /* NB: wake up so the thread does not look hung to the freezer */
1357 wake_up_process(speedchange_task);
1359 return cpufreq_register_governor(&cpufreq_gov_interactive);
1362 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1363 fs_initcall(cpufreq_interactive_init);
1365 module_init(cpufreq_interactive_init);
1368 static void __exit cpufreq_interactive_exit(void)
1370 cpufreq_unregister_governor(&cpufreq_gov_interactive);
1371 kthread_stop(speedchange_task);
1372 put_task_struct(speedchange_task);
1375 module_exit(cpufreq_interactive_exit);
1377 MODULE_AUTHOR("Mike Chan <mike@android.com>");
1378 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1379 "Latency sensitive workloads");
1380 MODULE_LICENSE("GPL");