2 * drivers/cpufreq/cpufreq_interactive.c
4 * Copyright (C) 2010 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * Author: Mike Chan (mike@android.com)
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/rwsem.h>
25 #include <linux/sched.h>
26 #include <linux/sched/rt.h>
27 #include <linux/tick.h>
28 #include <linux/time.h>
29 #include <linux/timer.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/slab.h>
33 #include <linux/kernel_stat.h>
34 #include <asm/cputime.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/cpufreq_interactive.h>
39 struct cpufreq_interactive_cpuinfo {
40 struct timer_list cpu_timer;
41 struct timer_list cpu_slack_timer;
42 spinlock_t load_lock; /* protects the next 4 fields */
44 u64 time_in_idle_timestamp;
46 u64 cputime_speedadj_timestamp;
47 struct cpufreq_policy *policy;
48 struct cpufreq_frequency_table *freq_table;
49 spinlock_t target_freq_lock; /*protects target freq */
50 unsigned int target_freq;
51 unsigned int floor_freq;
52 unsigned int max_freq;
53 u64 floor_validate_time;
54 u64 hispeed_validate_time;
55 struct rw_semaphore enable_sem;
59 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
61 /* realtime thread handles frequency scaling */
62 static struct task_struct *speedchange_task;
63 static cpumask_t speedchange_cpumask;
64 static spinlock_t speedchange_cpumask_lock;
65 static struct mutex gov_lock;
67 /* Target load. Lower values result in higher CPU speeds. */
68 #define DEFAULT_TARGET_LOAD 90
69 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
71 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
72 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
73 static unsigned int default_above_hispeed_delay[] = {
74 DEFAULT_ABOVE_HISPEED_DELAY };
76 struct cpufreq_interactive_tunables {
78 /* Hi speed to bump to from lo speed when load burst (default max) */
79 unsigned int hispeed_freq;
80 /* Go to hi speed when CPU load at or above this value. */
81 #define DEFAULT_GO_HISPEED_LOAD 99
82 unsigned long go_hispeed_load;
83 /* Target load. Lower values result in higher CPU speeds. */
84 spinlock_t target_loads_lock;
85 unsigned int *target_loads;
88 * The minimum amount of time to spend at a frequency before we can ramp
91 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
92 unsigned long min_sample_time;
94 * The sample rate of the timer used to increase frequency
96 unsigned long timer_rate;
98 * Wait this long before raising speed above hispeed, by default a
99 * single timer interval.
101 spinlock_t above_hispeed_delay_lock;
102 unsigned int *above_hispeed_delay;
103 int nabove_hispeed_delay;
104 /* Non-zero means indefinite speed boost active */
106 /* Duration of a boot pulse in usecs */
107 int boostpulse_duration_val;
108 /* End time of boost pulse in ktime converted to usecs */
109 u64 boostpulse_endtime;
111 * Max additional time to wait in idle, beyond timer_rate, at speeds
112 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
114 #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
119 /* For cases where we have single governor instance for system */
120 static struct cpufreq_interactive_tunables *common_tunables;
122 static struct attribute_group *get_sysfs_attr(void);
124 static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
131 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
133 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
134 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
135 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
136 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
137 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
138 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
140 idle_time = cur_wall_time - busy_time;
142 *wall = jiffies_to_usecs(cur_wall_time);
144 return jiffies_to_usecs(idle_time);
147 static inline cputime64_t get_cpu_idle_time(
152 u64 idle_time = get_cpu_idle_time_us(cpu, wall);
154 if (idle_time == -1ULL)
155 idle_time = get_cpu_idle_time_jiffy(cpu, wall);
156 else if (!io_is_busy)
157 idle_time += get_cpu_iowait_time_us(cpu, wall);
162 static void cpufreq_interactive_timer_resched(
163 struct cpufreq_interactive_cpuinfo *pcpu)
165 struct cpufreq_interactive_tunables *tunables =
166 pcpu->policy->governor_data;
167 unsigned long expires;
170 spin_lock_irqsave(&pcpu->load_lock, flags);
172 get_cpu_idle_time(smp_processor_id(),
173 &pcpu->time_in_idle_timestamp,
174 tunables->io_is_busy);
175 pcpu->cputime_speedadj = 0;
176 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
177 expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
178 mod_timer_pinned(&pcpu->cpu_timer, expires);
180 if (tunables->timer_slack_val >= 0 &&
181 pcpu->target_freq > pcpu->policy->min) {
182 expires += usecs_to_jiffies(tunables->timer_slack_val);
183 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
186 spin_unlock_irqrestore(&pcpu->load_lock, flags);
189 /* The caller shall take enable_sem write semaphore to avoid any timer race.
190 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
193 static void cpufreq_interactive_timer_start(
194 struct cpufreq_interactive_tunables *tunables, int cpu)
196 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
197 unsigned long expires = jiffies +
198 usecs_to_jiffies(tunables->timer_rate);
201 pcpu->cpu_timer.expires = expires;
202 add_timer_on(&pcpu->cpu_timer, cpu);
203 if (tunables->timer_slack_val >= 0 &&
204 pcpu->target_freq > pcpu->policy->min) {
205 expires += usecs_to_jiffies(tunables->timer_slack_val);
206 pcpu->cpu_slack_timer.expires = expires;
207 add_timer_on(&pcpu->cpu_slack_timer, cpu);
210 spin_lock_irqsave(&pcpu->load_lock, flags);
212 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
213 tunables->io_is_busy);
214 pcpu->cputime_speedadj = 0;
215 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
216 spin_unlock_irqrestore(&pcpu->load_lock, flags);
219 static unsigned int freq_to_above_hispeed_delay(
220 struct cpufreq_interactive_tunables *tunables,
227 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
229 for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
230 freq >= tunables->above_hispeed_delay[i+1]; i += 2)
233 ret = tunables->above_hispeed_delay[i];
234 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
238 static unsigned int freq_to_targetload(
239 struct cpufreq_interactive_tunables *tunables, unsigned int freq)
245 spin_lock_irqsave(&tunables->target_loads_lock, flags);
247 for (i = 0; i < tunables->ntarget_loads - 1 &&
248 freq >= tunables->target_loads[i+1]; i += 2)
251 ret = tunables->target_loads[i];
252 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
257 * If increasing frequencies never map to a lower target load then
258 * choose_freq() will find the minimum frequency that does not exceed its
259 * target load given the current load.
261 static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
262 unsigned int loadadjfreq)
264 unsigned int freq = pcpu->policy->cur;
265 unsigned int prevfreq, freqmin, freqmax;
274 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
277 * Find the lowest frequency where the computed load is less
278 * than or equal to the target load.
281 if (cpufreq_frequency_table_target(
282 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
283 CPUFREQ_RELATION_L, &index))
285 freq = pcpu->freq_table[index].frequency;
287 if (freq > prevfreq) {
288 /* The previous frequency is too low. */
291 if (freq >= freqmax) {
293 * Find the highest frequency that is less
296 if (cpufreq_frequency_table_target(
297 pcpu->policy, pcpu->freq_table,
298 freqmax - 1, CPUFREQ_RELATION_H,
301 freq = pcpu->freq_table[index].frequency;
303 if (freq == freqmin) {
305 * The first frequency below freqmax
306 * has already been found to be too
307 * low. freqmax is the lowest speed
308 * we found that is fast enough.
314 } else if (freq < prevfreq) {
315 /* The previous frequency is high enough. */
318 if (freq <= freqmin) {
320 * Find the lowest frequency that is higher
323 if (cpufreq_frequency_table_target(
324 pcpu->policy, pcpu->freq_table,
325 freqmin + 1, CPUFREQ_RELATION_L,
328 freq = pcpu->freq_table[index].frequency;
331 * If freqmax is the first frequency above
332 * freqmin then we have already found that
333 * this speed is fast enough.
340 /* If same frequency chosen as previous then done. */
341 } while (freq != prevfreq);
346 static u64 update_load(int cpu)
348 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
349 struct cpufreq_interactive_tunables *tunables =
350 pcpu->policy->governor_data;
353 unsigned int delta_idle;
354 unsigned int delta_time;
357 now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
358 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
359 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
361 if (delta_time <= delta_idle)
364 active_time = delta_time - delta_idle;
366 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
368 pcpu->time_in_idle = now_idle;
369 pcpu->time_in_idle_timestamp = now;
373 static void cpufreq_interactive_timer(unsigned long data)
376 unsigned int delta_time;
377 u64 cputime_speedadj;
379 struct cpufreq_interactive_cpuinfo *pcpu =
380 &per_cpu(cpuinfo, data);
381 struct cpufreq_interactive_tunables *tunables =
382 pcpu->policy->governor_data;
383 unsigned int new_freq;
384 unsigned int loadadjfreq;
389 if (!down_read_trylock(&pcpu->enable_sem))
391 if (!pcpu->governor_enabled)
394 spin_lock_irqsave(&pcpu->load_lock, flags);
395 now = update_load(data);
396 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
397 cputime_speedadj = pcpu->cputime_speedadj;
398 spin_unlock_irqrestore(&pcpu->load_lock, flags);
400 if (WARN_ON_ONCE(!delta_time))
403 spin_lock_irqsave(&pcpu->target_freq_lock, flags);
404 do_div(cputime_speedadj, delta_time);
405 loadadjfreq = (unsigned int)cputime_speedadj * 100;
406 cpu_load = loadadjfreq / pcpu->target_freq;
407 boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
409 if (cpu_load >= tunables->go_hispeed_load || boosted) {
410 if (pcpu->target_freq < tunables->hispeed_freq) {
411 new_freq = tunables->hispeed_freq;
413 new_freq = choose_freq(pcpu, loadadjfreq);
415 if (new_freq < tunables->hispeed_freq)
416 new_freq = tunables->hispeed_freq;
419 new_freq = choose_freq(pcpu, loadadjfreq);
420 if (new_freq > tunables->hispeed_freq &&
421 pcpu->target_freq < tunables->hispeed_freq)
422 new_freq = tunables->hispeed_freq;
425 if (pcpu->target_freq >= tunables->hispeed_freq &&
426 new_freq > pcpu->target_freq &&
427 now - pcpu->hispeed_validate_time <
428 freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
429 trace_cpufreq_interactive_notyet(
430 data, cpu_load, pcpu->target_freq,
431 pcpu->policy->cur, new_freq);
432 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
436 pcpu->hispeed_validate_time = now;
438 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
439 new_freq, CPUFREQ_RELATION_L,
441 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
445 new_freq = pcpu->freq_table[index].frequency;
448 * Do not scale below floor_freq unless we have been at or above the
449 * floor frequency for the minimum sample time since last validated.
451 if (new_freq < pcpu->floor_freq) {
452 if (now - pcpu->floor_validate_time <
453 tunables->min_sample_time) {
454 trace_cpufreq_interactive_notyet(
455 data, cpu_load, pcpu->target_freq,
456 pcpu->policy->cur, new_freq);
457 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
463 * Update the timestamp for checking whether speed has been held at
464 * or above the selected frequency for a minimum of min_sample_time,
465 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
466 * allow the speed to drop as soon as the boostpulse duration expires
467 * (or the indefinite boost is turned off).
470 if (!boosted || new_freq > tunables->hispeed_freq) {
471 pcpu->floor_freq = new_freq;
472 pcpu->floor_validate_time = now;
475 if (pcpu->target_freq == new_freq) {
476 trace_cpufreq_interactive_already(
477 data, cpu_load, pcpu->target_freq,
478 pcpu->policy->cur, new_freq);
479 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
480 goto rearm_if_notmax;
483 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
484 pcpu->policy->cur, new_freq);
486 pcpu->target_freq = new_freq;
487 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
488 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
489 cpumask_set_cpu(data, &speedchange_cpumask);
490 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
491 wake_up_process(speedchange_task);
495 * Already set max speed and don't see a need to change that,
496 * wait until next idle to re-evaluate, don't need timer.
498 if (pcpu->target_freq == pcpu->policy->max)
502 if (!timer_pending(&pcpu->cpu_timer))
503 cpufreq_interactive_timer_resched(pcpu);
506 up_read(&pcpu->enable_sem);
510 static void cpufreq_interactive_idle_start(void)
512 struct cpufreq_interactive_cpuinfo *pcpu =
513 &per_cpu(cpuinfo, smp_processor_id());
516 if (!down_read_trylock(&pcpu->enable_sem))
518 if (!pcpu->governor_enabled) {
519 up_read(&pcpu->enable_sem);
523 pending = timer_pending(&pcpu->cpu_timer);
525 if (pcpu->target_freq != pcpu->policy->min) {
527 * Entering idle while not at lowest speed. On some
528 * platforms this can hold the other CPU(s) at that speed
529 * even though the CPU is idle. Set a timer to re-evaluate
530 * speed so this idle CPU doesn't hold the other CPUs above
531 * min indefinitely. This should probably be a quirk of
532 * the CPUFreq driver.
535 cpufreq_interactive_timer_resched(pcpu);
538 up_read(&pcpu->enable_sem);
541 static void cpufreq_interactive_idle_end(void)
543 struct cpufreq_interactive_cpuinfo *pcpu =
544 &per_cpu(cpuinfo, smp_processor_id());
546 if (!down_read_trylock(&pcpu->enable_sem))
548 if (!pcpu->governor_enabled) {
549 up_read(&pcpu->enable_sem);
553 /* Arm the timer for 1-2 ticks later if not already. */
554 if (!timer_pending(&pcpu->cpu_timer)) {
555 cpufreq_interactive_timer_resched(pcpu);
556 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
557 del_timer(&pcpu->cpu_timer);
558 del_timer(&pcpu->cpu_slack_timer);
559 cpufreq_interactive_timer(smp_processor_id());
562 up_read(&pcpu->enable_sem);
565 static int cpufreq_interactive_speedchange_task(void *data)
570 struct cpufreq_interactive_cpuinfo *pcpu;
573 set_current_state(TASK_INTERRUPTIBLE);
574 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
576 if (cpumask_empty(&speedchange_cpumask)) {
577 spin_unlock_irqrestore(&speedchange_cpumask_lock,
581 if (kthread_should_stop())
584 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
587 set_current_state(TASK_RUNNING);
588 tmp_mask = speedchange_cpumask;
589 cpumask_clear(&speedchange_cpumask);
590 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
592 for_each_cpu(cpu, &tmp_mask) {
594 unsigned int max_freq = 0;
596 pcpu = &per_cpu(cpuinfo, cpu);
597 if (!down_read_trylock(&pcpu->enable_sem))
599 if (!pcpu->governor_enabled) {
600 up_read(&pcpu->enable_sem);
604 for_each_cpu(j, pcpu->policy->cpus) {
605 struct cpufreq_interactive_cpuinfo *pjcpu =
606 &per_cpu(cpuinfo, j);
608 if (pjcpu->target_freq > max_freq)
609 max_freq = pjcpu->target_freq;
612 if (max_freq != pcpu->policy->cur)
613 __cpufreq_driver_target(pcpu->policy,
616 trace_cpufreq_interactive_setspeed(cpu,
620 up_read(&pcpu->enable_sem);
627 static void cpufreq_interactive_boost(void)
631 unsigned long flags[2];
632 struct cpufreq_interactive_cpuinfo *pcpu;
633 struct cpufreq_interactive_tunables *tunables;
635 spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
637 for_each_online_cpu(i) {
638 pcpu = &per_cpu(cpuinfo, i);
639 tunables = pcpu->policy->governor_data;
641 spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
642 if (pcpu->target_freq < tunables->hispeed_freq) {
643 pcpu->target_freq = tunables->hispeed_freq;
644 cpumask_set_cpu(i, &speedchange_cpumask);
645 pcpu->hispeed_validate_time =
646 ktime_to_us(ktime_get());
651 * Set floor freq and (re)start timer for when last
655 pcpu->floor_freq = tunables->hispeed_freq;
656 pcpu->floor_validate_time = ktime_to_us(ktime_get());
657 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
660 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
663 wake_up_process(speedchange_task);
666 static int cpufreq_interactive_notifier(
667 struct notifier_block *nb, unsigned long val, void *data)
669 struct cpufreq_freqs *freq = data;
670 struct cpufreq_interactive_cpuinfo *pcpu;
674 if (val == CPUFREQ_POSTCHANGE) {
675 pcpu = &per_cpu(cpuinfo, freq->cpu);
676 if (!down_read_trylock(&pcpu->enable_sem))
678 if (!pcpu->governor_enabled) {
679 up_read(&pcpu->enable_sem);
683 for_each_cpu(cpu, pcpu->policy->cpus) {
684 struct cpufreq_interactive_cpuinfo *pjcpu =
685 &per_cpu(cpuinfo, cpu);
686 if (cpu != freq->cpu) {
687 if (!down_read_trylock(&pjcpu->enable_sem))
689 if (!pjcpu->governor_enabled) {
690 up_read(&pjcpu->enable_sem);
694 spin_lock_irqsave(&pjcpu->load_lock, flags);
696 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
697 if (cpu != freq->cpu)
698 up_read(&pjcpu->enable_sem);
701 up_read(&pcpu->enable_sem);
706 static struct notifier_block cpufreq_notifier_block = {
707 .notifier_call = cpufreq_interactive_notifier,
710 static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
715 unsigned int *tokenized_data;
719 while ((cp = strpbrk(cp + 1, " :")))
722 if (!(ntokens & 0x1))
725 tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
726 if (!tokenized_data) {
733 while (i < ntokens) {
734 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
737 cp = strpbrk(cp, " :");
746 *num_tokens = ntokens;
747 return tokenized_data;
750 kfree(tokenized_data);
755 static ssize_t show_target_loads(
756 struct cpufreq_interactive_tunables *tunables,
763 spin_lock_irqsave(&tunables->target_loads_lock, flags);
765 for (i = 0; i < tunables->ntarget_loads; i++)
766 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
767 i & 0x1 ? ":" : " ");
769 sprintf(buf + ret - 1, "\n");
770 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
774 static ssize_t store_target_loads(
775 struct cpufreq_interactive_tunables *tunables,
776 const char *buf, size_t count)
779 unsigned int *new_target_loads = NULL;
782 new_target_loads = get_tokenized_data(buf, &ntokens);
783 if (IS_ERR(new_target_loads))
784 return PTR_RET(new_target_loads);
786 spin_lock_irqsave(&tunables->target_loads_lock, flags);
787 if (tunables->target_loads != default_target_loads)
788 kfree(tunables->target_loads);
789 tunables->target_loads = new_target_loads;
790 tunables->ntarget_loads = ntokens;
791 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
795 static ssize_t show_above_hispeed_delay(
796 struct cpufreq_interactive_tunables *tunables, char *buf)
802 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
804 for (i = 0; i < tunables->nabove_hispeed_delay; i++)
805 ret += sprintf(buf + ret, "%u%s",
806 tunables->above_hispeed_delay[i],
807 i & 0x1 ? ":" : " ");
809 sprintf(buf + ret - 1, "\n");
810 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
814 static ssize_t store_above_hispeed_delay(
815 struct cpufreq_interactive_tunables *tunables,
816 const char *buf, size_t count)
819 unsigned int *new_above_hispeed_delay = NULL;
822 new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
823 if (IS_ERR(new_above_hispeed_delay))
824 return PTR_RET(new_above_hispeed_delay);
826 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
827 if (tunables->above_hispeed_delay != default_above_hispeed_delay)
828 kfree(tunables->above_hispeed_delay);
829 tunables->above_hispeed_delay = new_above_hispeed_delay;
830 tunables->nabove_hispeed_delay = ntokens;
831 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
836 static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
839 return sprintf(buf, "%u\n", tunables->hispeed_freq);
842 static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
843 const char *buf, size_t count)
846 long unsigned int val;
848 ret = strict_strtoul(buf, 0, &val);
851 tunables->hispeed_freq = val;
855 static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
856 *tunables, char *buf)
858 return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
861 static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
862 *tunables, const char *buf, size_t count)
867 ret = strict_strtoul(buf, 0, &val);
870 tunables->go_hispeed_load = val;
874 static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
875 *tunables, char *buf)
877 return sprintf(buf, "%lu\n", tunables->min_sample_time);
880 static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
881 *tunables, const char *buf, size_t count)
886 ret = strict_strtoul(buf, 0, &val);
889 tunables->min_sample_time = val;
893 static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
896 return sprintf(buf, "%lu\n", tunables->timer_rate);
899 static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
900 const char *buf, size_t count)
905 ret = strict_strtoul(buf, 0, &val);
908 tunables->timer_rate = val;
912 static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
915 return sprintf(buf, "%d\n", tunables->timer_slack_val);
918 static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
919 const char *buf, size_t count)
924 ret = kstrtol(buf, 10, &val);
928 tunables->timer_slack_val = val;
932 static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
935 return sprintf(buf, "%d\n", tunables->boost_val);
938 static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
939 const char *buf, size_t count)
944 ret = kstrtoul(buf, 0, &val);
948 tunables->boost_val = val;
950 if (tunables->boost_val) {
951 trace_cpufreq_interactive_boost("on");
952 cpufreq_interactive_boost();
954 tunables->boostpulse_endtime = ktime_to_us(ktime_get());
955 trace_cpufreq_interactive_unboost("off");
961 static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
962 const char *buf, size_t count)
967 ret = kstrtoul(buf, 0, &val);
971 tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
972 tunables->boostpulse_duration_val;
973 trace_cpufreq_interactive_boost("pulse");
974 cpufreq_interactive_boost();
978 static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
979 *tunables, char *buf)
981 return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
984 static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
985 *tunables, const char *buf, size_t count)
990 ret = kstrtoul(buf, 0, &val);
994 tunables->boostpulse_duration_val = val;
998 static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
1001 return sprintf(buf, "%u\n", tunables->io_is_busy);
1004 static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
1005 const char *buf, size_t count)
1010 ret = kstrtoul(buf, 0, &val);
1013 tunables->io_is_busy = val;
1018 * Create show/store routines
1019 * - sys: One governor instance for complete SYSTEM
1020 * - pol: One governor instance per struct cpufreq_policy
1022 #define show_gov_pol_sys(file_name) \
1023 static ssize_t show_##file_name##_gov_sys \
1024 (struct kobject *kobj, struct attribute *attr, char *buf) \
1026 return show_##file_name(common_tunables, buf); \
1029 static ssize_t show_##file_name##_gov_pol \
1030 (struct cpufreq_policy *policy, char *buf) \
1032 return show_##file_name(policy->governor_data, buf); \
1035 #define store_gov_pol_sys(file_name) \
1036 static ssize_t store_##file_name##_gov_sys \
1037 (struct kobject *kobj, struct attribute *attr, const char *buf, \
1040 return store_##file_name(common_tunables, buf, count); \
1043 static ssize_t store_##file_name##_gov_pol \
1044 (struct cpufreq_policy *policy, const char *buf, size_t count) \
1046 return store_##file_name(policy->governor_data, buf, count); \
1049 #define show_store_gov_pol_sys(file_name) \
1050 show_gov_pol_sys(file_name); \
1051 store_gov_pol_sys(file_name)
1053 show_store_gov_pol_sys(target_loads);
1054 show_store_gov_pol_sys(above_hispeed_delay);
1055 show_store_gov_pol_sys(hispeed_freq);
1056 show_store_gov_pol_sys(go_hispeed_load);
1057 show_store_gov_pol_sys(min_sample_time);
1058 show_store_gov_pol_sys(timer_rate);
1059 show_store_gov_pol_sys(timer_slack);
1060 show_store_gov_pol_sys(boost);
1061 store_gov_pol_sys(boostpulse);
1062 show_store_gov_pol_sys(boostpulse_duration);
1063 show_store_gov_pol_sys(io_is_busy);
1065 #define gov_sys_attr_rw(_name) \
1066 static struct global_attr _name##_gov_sys = \
1067 __ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1069 #define gov_pol_attr_rw(_name) \
1070 static struct freq_attr _name##_gov_pol = \
1071 __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1073 #define gov_sys_pol_attr_rw(_name) \
1074 gov_sys_attr_rw(_name); \
1075 gov_pol_attr_rw(_name)
1077 gov_sys_pol_attr_rw(target_loads);
1078 gov_sys_pol_attr_rw(above_hispeed_delay);
1079 gov_sys_pol_attr_rw(hispeed_freq);
1080 gov_sys_pol_attr_rw(go_hispeed_load);
1081 gov_sys_pol_attr_rw(min_sample_time);
1082 gov_sys_pol_attr_rw(timer_rate);
1083 gov_sys_pol_attr_rw(timer_slack);
1084 gov_sys_pol_attr_rw(boost);
1085 gov_sys_pol_attr_rw(boostpulse_duration);
1086 gov_sys_pol_attr_rw(io_is_busy);
1088 static struct global_attr boostpulse_gov_sys =
1089 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1091 static struct freq_attr boostpulse_gov_pol =
1092 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1094 /* One Governor instance for entire system */
1095 static struct attribute *interactive_attributes_gov_sys[] = {
1096 &target_loads_gov_sys.attr,
1097 &above_hispeed_delay_gov_sys.attr,
1098 &hispeed_freq_gov_sys.attr,
1099 &go_hispeed_load_gov_sys.attr,
1100 &min_sample_time_gov_sys.attr,
1101 &timer_rate_gov_sys.attr,
1102 &timer_slack_gov_sys.attr,
1103 &boost_gov_sys.attr,
1104 &boostpulse_gov_sys.attr,
1105 &boostpulse_duration_gov_sys.attr,
1106 &io_is_busy_gov_sys.attr,
1110 static struct attribute_group interactive_attr_group_gov_sys = {
1111 .attrs = interactive_attributes_gov_sys,
1112 .name = "interactive",
1115 /* Per policy governor instance */
1116 static struct attribute *interactive_attributes_gov_pol[] = {
1117 &target_loads_gov_pol.attr,
1118 &above_hispeed_delay_gov_pol.attr,
1119 &hispeed_freq_gov_pol.attr,
1120 &go_hispeed_load_gov_pol.attr,
1121 &min_sample_time_gov_pol.attr,
1122 &timer_rate_gov_pol.attr,
1123 &timer_slack_gov_pol.attr,
1124 &boost_gov_pol.attr,
1125 &boostpulse_gov_pol.attr,
1126 &boostpulse_duration_gov_pol.attr,
1127 &io_is_busy_gov_pol.attr,
1131 static struct attribute_group interactive_attr_group_gov_pol = {
1132 .attrs = interactive_attributes_gov_pol,
1133 .name = "interactive",
1136 static struct attribute_group *get_sysfs_attr(void)
1138 if (have_governor_per_policy())
1139 return &interactive_attr_group_gov_pol;
1141 return &interactive_attr_group_gov_sys;
1144 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1150 cpufreq_interactive_idle_start();
1153 cpufreq_interactive_idle_end();
1160 static struct notifier_block cpufreq_interactive_idle_nb = {
1161 .notifier_call = cpufreq_interactive_idle_notifier,
1164 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1169 struct cpufreq_interactive_cpuinfo *pcpu;
1170 struct cpufreq_frequency_table *freq_table;
1171 struct cpufreq_interactive_tunables *tunables;
1172 unsigned long flags;
1174 if (have_governor_per_policy())
1175 tunables = policy->governor_data;
1177 tunables = common_tunables;
1179 WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
1182 case CPUFREQ_GOV_POLICY_INIT:
1183 if (have_governor_per_policy()) {
1185 } else if (tunables) {
1186 tunables->usage_count++;
1187 policy->governor_data = tunables;
1191 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1193 pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
1197 tunables->usage_count = 1;
1198 tunables->above_hispeed_delay = default_above_hispeed_delay;
1199 tunables->nabove_hispeed_delay =
1200 ARRAY_SIZE(default_above_hispeed_delay);
1201 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1202 tunables->target_loads = default_target_loads;
1203 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1204 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1205 tunables->timer_rate = DEFAULT_TIMER_RATE;
1206 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1207 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1209 spin_lock_init(&tunables->target_loads_lock);
1210 spin_lock_init(&tunables->above_hispeed_delay_lock);
1212 policy->governor_data = tunables;
1213 if (!have_governor_per_policy())
1214 common_tunables = tunables;
1216 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1220 policy->governor_data = NULL;
1221 if (!have_governor_per_policy())
1222 common_tunables = NULL;
1226 if (!policy->governor->initialized) {
1227 idle_notifier_register(&cpufreq_interactive_idle_nb);
1228 cpufreq_register_notifier(&cpufreq_notifier_block,
1229 CPUFREQ_TRANSITION_NOTIFIER);
1234 case CPUFREQ_GOV_POLICY_EXIT:
1235 if (!--tunables->usage_count) {
1236 if (policy->governor->initialized == 1) {
1237 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1238 CPUFREQ_TRANSITION_NOTIFIER);
1239 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1242 sysfs_remove_group(get_governor_parent_kobj(policy),
1245 common_tunables = NULL;
1248 policy->governor_data = NULL;
1251 case CPUFREQ_GOV_START:
1252 mutex_lock(&gov_lock);
1254 freq_table = cpufreq_frequency_get_table(policy->cpu);
1255 if (!tunables->hispeed_freq)
1256 tunables->hispeed_freq = policy->max;
1258 for_each_cpu(j, policy->cpus) {
1259 pcpu = &per_cpu(cpuinfo, j);
1260 pcpu->policy = policy;
1261 pcpu->target_freq = policy->cur;
1262 pcpu->freq_table = freq_table;
1263 pcpu->floor_freq = pcpu->target_freq;
1264 pcpu->floor_validate_time =
1265 ktime_to_us(ktime_get());
1266 pcpu->hispeed_validate_time =
1267 pcpu->floor_validate_time;
1268 pcpu->max_freq = policy->max;
1269 down_write(&pcpu->enable_sem);
1270 del_timer_sync(&pcpu->cpu_timer);
1271 del_timer_sync(&pcpu->cpu_slack_timer);
1272 cpufreq_interactive_timer_start(tunables, j);
1273 pcpu->governor_enabled = 1;
1274 up_write(&pcpu->enable_sem);
1277 mutex_unlock(&gov_lock);
1280 case CPUFREQ_GOV_STOP:
1281 mutex_lock(&gov_lock);
1282 for_each_cpu(j, policy->cpus) {
1283 pcpu = &per_cpu(cpuinfo, j);
1284 down_write(&pcpu->enable_sem);
1285 pcpu->governor_enabled = 0;
1286 del_timer_sync(&pcpu->cpu_timer);
1287 del_timer_sync(&pcpu->cpu_slack_timer);
1288 up_write(&pcpu->enable_sem);
1291 mutex_unlock(&gov_lock);
1294 case CPUFREQ_GOV_LIMITS:
1295 if (policy->max < policy->cur)
1296 __cpufreq_driver_target(policy,
1297 policy->max, CPUFREQ_RELATION_H);
1298 else if (policy->min > policy->cur)
1299 __cpufreq_driver_target(policy,
1300 policy->min, CPUFREQ_RELATION_L);
1301 for_each_cpu(j, policy->cpus) {
1302 pcpu = &per_cpu(cpuinfo, j);
1304 down_read(&pcpu->enable_sem);
1305 if (pcpu->governor_enabled == 0) {
1306 up_read(&pcpu->enable_sem);
1310 spin_lock_irqsave(&pcpu->target_freq_lock, flags);
1311 if (policy->max < pcpu->target_freq)
1312 pcpu->target_freq = policy->max;
1313 else if (policy->min > pcpu->target_freq)
1314 pcpu->target_freq = policy->min;
1316 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
1317 up_read(&pcpu->enable_sem);
1319 /* Reschedule timer only if policy->max is raised.
1320 * Delete the timers, else the timer callback may
1321 * return without re-arm the timer when failed
1322 * acquire the semaphore. This race may cause timer
1323 * stopped unexpectedly.
1326 if (policy->max > pcpu->max_freq) {
1327 down_write(&pcpu->enable_sem);
1328 del_timer_sync(&pcpu->cpu_timer);
1329 del_timer_sync(&pcpu->cpu_slack_timer);
1330 cpufreq_interactive_timer_start(tunables, j);
1331 up_write(&pcpu->enable_sem);
1334 pcpu->max_freq = policy->max;
1341 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1344 struct cpufreq_governor cpufreq_gov_interactive = {
1345 .name = "interactive",
1346 .governor = cpufreq_governor_interactive,
1347 .max_transition_latency = 10000000,
1348 .owner = THIS_MODULE,
1351 static void cpufreq_interactive_nop_timer(unsigned long data)
1355 static int __init cpufreq_interactive_init(void)
1358 struct cpufreq_interactive_cpuinfo *pcpu;
1359 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1361 /* Initalize per-cpu timers */
1362 for_each_possible_cpu(i) {
1363 pcpu = &per_cpu(cpuinfo, i);
1364 init_timer_deferrable(&pcpu->cpu_timer);
1365 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1366 pcpu->cpu_timer.data = i;
1367 init_timer(&pcpu->cpu_slack_timer);
1368 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
1369 spin_lock_init(&pcpu->load_lock);
1370 spin_lock_init(&pcpu->target_freq_lock);
1371 init_rwsem(&pcpu->enable_sem);
1374 spin_lock_init(&speedchange_cpumask_lock);
1375 mutex_init(&gov_lock);
1377 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1379 if (IS_ERR(speedchange_task))
1380 return PTR_ERR(speedchange_task);
1382 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, ¶m);
1383 get_task_struct(speedchange_task);
1385 /* NB: wake up so the thread does not look hung to the freezer */
1386 wake_up_process(speedchange_task);
1388 return cpufreq_register_governor(&cpufreq_gov_interactive);
1391 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1392 fs_initcall(cpufreq_interactive_init);
1394 module_init(cpufreq_interactive_init);
1397 static void __exit cpufreq_interactive_exit(void)
1399 cpufreq_unregister_governor(&cpufreq_gov_interactive);
1400 kthread_stop(speedchange_task);
1401 put_task_struct(speedchange_task);
1404 module_exit(cpufreq_interactive_exit);
1406 MODULE_AUTHOR("Mike Chan <mike@android.com>");
1407 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1408 "Latency sensitive workloads");
1409 MODULE_LICENSE("GPL");