2 * drivers/cpufreq/cpufreq_interactive.c
4 * Copyright (C) 2010 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * Author: Mike Chan (mike@android.com)
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/rwsem.h>
25 #include <linux/sched.h>
26 #include <linux/sched/rt.h>
27 #include <linux/tick.h>
28 #include <linux/time.h>
29 #include <linux/timer.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/slab.h>
33 #include <linux/kernel_stat.h>
34 #include <asm/cputime.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/cpufreq_interactive.h>
39 struct cpufreq_interactive_cpuinfo {
40 struct timer_list cpu_timer;
41 struct timer_list cpu_slack_timer;
42 spinlock_t load_lock; /* protects the next 4 fields */
44 u64 time_in_idle_timestamp;
46 u64 cputime_speedadj_timestamp;
47 struct cpufreq_policy *policy;
48 struct cpufreq_frequency_table *freq_table;
49 unsigned int target_freq;
50 unsigned int floor_freq;
51 u64 floor_validate_time;
52 u64 hispeed_validate_time;
53 struct rw_semaphore enable_sem;
57 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
59 /* realtime thread handles frequency scaling */
60 static struct task_struct *speedchange_task;
61 static cpumask_t speedchange_cpumask;
62 static spinlock_t speedchange_cpumask_lock;
63 static struct mutex gov_lock;
65 /* Target load. Lower values result in higher CPU speeds. */
66 #define DEFAULT_TARGET_LOAD 90
67 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
69 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
70 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
71 static unsigned int default_above_hispeed_delay[] = {
72 DEFAULT_ABOVE_HISPEED_DELAY };
74 struct cpufreq_interactive_tunables {
76 /* Hi speed to bump to from lo speed when load burst (default max) */
77 unsigned int hispeed_freq;
78 /* Go to hi speed when CPU load at or above this value. */
79 #define DEFAULT_GO_HISPEED_LOAD 99
80 unsigned long go_hispeed_load;
81 /* Target load. Lower values result in higher CPU speeds. */
82 spinlock_t target_loads_lock;
83 unsigned int *target_loads;
86 * The minimum amount of time to spend at a frequency before we can ramp
89 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
90 unsigned long min_sample_time;
92 * The sample rate of the timer used to increase frequency
94 unsigned long timer_rate;
96 * Wait this long before raising speed above hispeed, by default a
97 * single timer interval.
99 spinlock_t above_hispeed_delay_lock;
100 unsigned int *above_hispeed_delay;
101 int nabove_hispeed_delay;
102 /* Non-zero means indefinite speed boost active */
104 /* Duration of a boot pulse in usecs */
105 int boostpulse_duration_val;
106 /* End time of boost pulse in ktime converted to usecs */
107 u64 boostpulse_endtime;
109 * Max additional time to wait in idle, beyond timer_rate, at speeds
110 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
112 #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
117 /* For cases where we have single governor instance for system */
118 struct cpufreq_interactive_tunables *common_tunables;
120 static struct attribute_group *get_sysfs_attr(void);
122 static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
129 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
131 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
132 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
133 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
134 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
135 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
136 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
138 idle_time = cur_wall_time - busy_time;
140 *wall = jiffies_to_usecs(cur_wall_time);
142 return jiffies_to_usecs(idle_time);
145 static inline cputime64_t get_cpu_idle_time(
150 u64 idle_time = get_cpu_idle_time_us(cpu, wall);
152 if (idle_time == -1ULL)
153 idle_time = get_cpu_idle_time_jiffy(cpu, wall);
154 else if (!io_is_busy)
155 idle_time += get_cpu_iowait_time_us(cpu, wall);
160 static void cpufreq_interactive_timer_resched(
161 struct cpufreq_interactive_cpuinfo *pcpu)
163 struct cpufreq_interactive_tunables *tunables =
164 pcpu->policy->governor_data;
165 unsigned long expires;
168 spin_lock_irqsave(&pcpu->load_lock, flags);
170 get_cpu_idle_time(smp_processor_id(),
171 &pcpu->time_in_idle_timestamp,
172 tunables->io_is_busy);
173 pcpu->cputime_speedadj = 0;
174 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
175 expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
176 mod_timer_pinned(&pcpu->cpu_timer, expires);
178 if (tunables->timer_slack_val >= 0 &&
179 pcpu->target_freq > pcpu->policy->min) {
180 expires += usecs_to_jiffies(tunables->timer_slack_val);
181 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
184 spin_unlock_irqrestore(&pcpu->load_lock, flags);
187 /* The caller shall take enable_sem write semaphore to avoid any timer race.
188 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
191 static void cpufreq_interactive_timer_start(
192 struct cpufreq_interactive_tunables *tunables, int cpu)
194 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
195 unsigned long expires = jiffies +
196 usecs_to_jiffies(tunables->timer_rate);
199 pcpu->cpu_timer.expires = expires;
200 add_timer_on(&pcpu->cpu_timer, cpu);
201 if (tunables->timer_slack_val >= 0 &&
202 pcpu->target_freq > pcpu->policy->min) {
203 expires += usecs_to_jiffies(tunables->timer_slack_val);
204 pcpu->cpu_slack_timer.expires = expires;
205 add_timer_on(&pcpu->cpu_slack_timer, cpu);
208 spin_lock_irqsave(&pcpu->load_lock, flags);
210 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
211 tunables->io_is_busy);
212 pcpu->cputime_speedadj = 0;
213 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
214 spin_unlock_irqrestore(&pcpu->load_lock, flags);
217 static unsigned int freq_to_above_hispeed_delay(
218 struct cpufreq_interactive_tunables *tunables,
225 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
227 for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
228 freq >= tunables->above_hispeed_delay[i+1]; i += 2)
231 ret = tunables->above_hispeed_delay[i];
232 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
236 static unsigned int freq_to_targetload(
237 struct cpufreq_interactive_tunables *tunables, unsigned int freq)
243 spin_lock_irqsave(&tunables->target_loads_lock, flags);
245 for (i = 0; i < tunables->ntarget_loads - 1 &&
246 freq >= tunables->target_loads[i+1]; i += 2)
249 ret = tunables->target_loads[i];
250 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
255 * If increasing frequencies never map to a lower target load then
256 * choose_freq() will find the minimum frequency that does not exceed its
257 * target load given the current load.
259 static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
260 unsigned int loadadjfreq)
262 unsigned int freq = pcpu->policy->cur;
263 unsigned int prevfreq, freqmin, freqmax;
272 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
275 * Find the lowest frequency where the computed load is less
276 * than or equal to the target load.
279 if (cpufreq_frequency_table_target(
280 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
281 CPUFREQ_RELATION_L, &index))
283 freq = pcpu->freq_table[index].frequency;
285 if (freq > prevfreq) {
286 /* The previous frequency is too low. */
289 if (freq >= freqmax) {
291 * Find the highest frequency that is less
294 if (cpufreq_frequency_table_target(
295 pcpu->policy, pcpu->freq_table,
296 freqmax - 1, CPUFREQ_RELATION_H,
299 freq = pcpu->freq_table[index].frequency;
301 if (freq == freqmin) {
303 * The first frequency below freqmax
304 * has already been found to be too
305 * low. freqmax is the lowest speed
306 * we found that is fast enough.
312 } else if (freq < prevfreq) {
313 /* The previous frequency is high enough. */
316 if (freq <= freqmin) {
318 * Find the lowest frequency that is higher
321 if (cpufreq_frequency_table_target(
322 pcpu->policy, pcpu->freq_table,
323 freqmin + 1, CPUFREQ_RELATION_L,
326 freq = pcpu->freq_table[index].frequency;
329 * If freqmax is the first frequency above
330 * freqmin then we have already found that
331 * this speed is fast enough.
338 /* If same frequency chosen as previous then done. */
339 } while (freq != prevfreq);
344 static u64 update_load(int cpu)
346 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
347 struct cpufreq_interactive_tunables *tunables =
348 pcpu->policy->governor_data;
351 unsigned int delta_idle;
352 unsigned int delta_time;
355 now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
356 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
357 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
359 if (delta_time <= delta_idle)
362 active_time = delta_time - delta_idle;
364 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
366 pcpu->time_in_idle = now_idle;
367 pcpu->time_in_idle_timestamp = now;
371 static void cpufreq_interactive_timer(unsigned long data)
374 unsigned int delta_time;
375 u64 cputime_speedadj;
377 struct cpufreq_interactive_cpuinfo *pcpu =
378 &per_cpu(cpuinfo, data);
379 struct cpufreq_interactive_tunables *tunables =
380 pcpu->policy->governor_data;
381 unsigned int new_freq;
382 unsigned int loadadjfreq;
387 if (!down_read_trylock(&pcpu->enable_sem))
389 if (!pcpu->governor_enabled)
392 spin_lock_irqsave(&pcpu->load_lock, flags);
393 now = update_load(data);
394 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
395 cputime_speedadj = pcpu->cputime_speedadj;
396 spin_unlock_irqrestore(&pcpu->load_lock, flags);
398 if (WARN_ON_ONCE(!delta_time))
401 do_div(cputime_speedadj, delta_time);
402 loadadjfreq = (unsigned int)cputime_speedadj * 100;
403 cpu_load = loadadjfreq / pcpu->target_freq;
404 boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
406 if (cpu_load >= tunables->go_hispeed_load || boosted) {
407 if (pcpu->target_freq < tunables->hispeed_freq) {
408 new_freq = tunables->hispeed_freq;
410 new_freq = choose_freq(pcpu, loadadjfreq);
412 if (new_freq < tunables->hispeed_freq)
413 new_freq = tunables->hispeed_freq;
416 new_freq = choose_freq(pcpu, loadadjfreq);
419 if (pcpu->target_freq >= tunables->hispeed_freq &&
420 new_freq > pcpu->target_freq &&
421 now - pcpu->hispeed_validate_time <
422 freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
423 trace_cpufreq_interactive_notyet(
424 data, cpu_load, pcpu->target_freq,
425 pcpu->policy->cur, new_freq);
429 pcpu->hispeed_validate_time = now;
431 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
432 new_freq, CPUFREQ_RELATION_L,
436 new_freq = pcpu->freq_table[index].frequency;
439 * Do not scale below floor_freq unless we have been at or above the
440 * floor frequency for the minimum sample time since last validated.
442 if (new_freq < pcpu->floor_freq) {
443 if (now - pcpu->floor_validate_time <
444 tunables->min_sample_time) {
445 trace_cpufreq_interactive_notyet(
446 data, cpu_load, pcpu->target_freq,
447 pcpu->policy->cur, new_freq);
453 * Update the timestamp for checking whether speed has been held at
454 * or above the selected frequency for a minimum of min_sample_time,
455 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
456 * allow the speed to drop as soon as the boostpulse duration expires
457 * (or the indefinite boost is turned off).
460 if (!boosted || new_freq > tunables->hispeed_freq) {
461 pcpu->floor_freq = new_freq;
462 pcpu->floor_validate_time = now;
465 if (pcpu->target_freq == new_freq) {
466 trace_cpufreq_interactive_already(
467 data, cpu_load, pcpu->target_freq,
468 pcpu->policy->cur, new_freq);
469 goto rearm_if_notmax;
472 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
473 pcpu->policy->cur, new_freq);
475 pcpu->target_freq = new_freq;
476 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
477 cpumask_set_cpu(data, &speedchange_cpumask);
478 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
479 wake_up_process(speedchange_task);
483 * Already set max speed and don't see a need to change that,
484 * wait until next idle to re-evaluate, don't need timer.
486 if (pcpu->target_freq == pcpu->policy->max)
490 if (!timer_pending(&pcpu->cpu_timer))
491 cpufreq_interactive_timer_resched(pcpu);
494 up_read(&pcpu->enable_sem);
498 static void cpufreq_interactive_idle_start(void)
500 struct cpufreq_interactive_cpuinfo *pcpu =
501 &per_cpu(cpuinfo, smp_processor_id());
504 if (!down_read_trylock(&pcpu->enable_sem))
506 if (!pcpu->governor_enabled) {
507 up_read(&pcpu->enable_sem);
511 pending = timer_pending(&pcpu->cpu_timer);
513 if (pcpu->target_freq != pcpu->policy->min) {
515 * Entering idle while not at lowest speed. On some
516 * platforms this can hold the other CPU(s) at that speed
517 * even though the CPU is idle. Set a timer to re-evaluate
518 * speed so this idle CPU doesn't hold the other CPUs above
519 * min indefinitely. This should probably be a quirk of
520 * the CPUFreq driver.
523 cpufreq_interactive_timer_resched(pcpu);
526 up_read(&pcpu->enable_sem);
529 static void cpufreq_interactive_idle_end(void)
531 struct cpufreq_interactive_cpuinfo *pcpu =
532 &per_cpu(cpuinfo, smp_processor_id());
534 if (!down_read_trylock(&pcpu->enable_sem))
536 if (!pcpu->governor_enabled) {
537 up_read(&pcpu->enable_sem);
541 /* Arm the timer for 1-2 ticks later if not already. */
542 if (!timer_pending(&pcpu->cpu_timer)) {
543 cpufreq_interactive_timer_resched(pcpu);
544 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
545 del_timer(&pcpu->cpu_timer);
546 del_timer(&pcpu->cpu_slack_timer);
547 cpufreq_interactive_timer(smp_processor_id());
550 up_read(&pcpu->enable_sem);
553 static int cpufreq_interactive_speedchange_task(void *data)
558 struct cpufreq_interactive_cpuinfo *pcpu;
561 set_current_state(TASK_INTERRUPTIBLE);
562 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
564 if (cpumask_empty(&speedchange_cpumask)) {
565 spin_unlock_irqrestore(&speedchange_cpumask_lock,
569 if (kthread_should_stop())
572 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
575 set_current_state(TASK_RUNNING);
576 tmp_mask = speedchange_cpumask;
577 cpumask_clear(&speedchange_cpumask);
578 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
580 for_each_cpu(cpu, &tmp_mask) {
582 unsigned int max_freq = 0;
584 pcpu = &per_cpu(cpuinfo, cpu);
585 if (!down_read_trylock(&pcpu->enable_sem))
587 if (!pcpu->governor_enabled) {
588 up_read(&pcpu->enable_sem);
592 for_each_cpu(j, pcpu->policy->cpus) {
593 struct cpufreq_interactive_cpuinfo *pjcpu =
594 &per_cpu(cpuinfo, j);
596 if (pjcpu->target_freq > max_freq)
597 max_freq = pjcpu->target_freq;
600 if (max_freq != pcpu->policy->cur)
601 __cpufreq_driver_target(pcpu->policy,
604 trace_cpufreq_interactive_setspeed(cpu,
608 up_read(&pcpu->enable_sem);
615 static void cpufreq_interactive_boost(void)
620 struct cpufreq_interactive_cpuinfo *pcpu;
621 struct cpufreq_interactive_tunables *tunables;
623 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
625 for_each_online_cpu(i) {
626 pcpu = &per_cpu(cpuinfo, i);
627 tunables = pcpu->policy->governor_data;
629 if (pcpu->target_freq < tunables->hispeed_freq) {
630 pcpu->target_freq = tunables->hispeed_freq;
631 cpumask_set_cpu(i, &speedchange_cpumask);
632 pcpu->hispeed_validate_time =
633 ktime_to_us(ktime_get());
638 * Set floor freq and (re)start timer for when last
642 pcpu->floor_freq = tunables->hispeed_freq;
643 pcpu->floor_validate_time = ktime_to_us(ktime_get());
646 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
649 wake_up_process(speedchange_task);
652 static int cpufreq_interactive_notifier(
653 struct notifier_block *nb, unsigned long val, void *data)
655 struct cpufreq_freqs *freq = data;
656 struct cpufreq_interactive_cpuinfo *pcpu;
660 if (val == CPUFREQ_POSTCHANGE) {
661 pcpu = &per_cpu(cpuinfo, freq->cpu);
662 if (!down_read_trylock(&pcpu->enable_sem))
664 if (!pcpu->governor_enabled) {
665 up_read(&pcpu->enable_sem);
669 for_each_cpu(cpu, pcpu->policy->cpus) {
670 struct cpufreq_interactive_cpuinfo *pjcpu =
671 &per_cpu(cpuinfo, cpu);
672 if (cpu != freq->cpu) {
673 if (!down_read_trylock(&pjcpu->enable_sem))
675 if (!pjcpu->governor_enabled) {
676 up_read(&pjcpu->enable_sem);
680 spin_lock_irqsave(&pjcpu->load_lock, flags);
682 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
683 if (cpu != freq->cpu)
684 up_read(&pjcpu->enable_sem);
687 up_read(&pcpu->enable_sem);
692 static struct notifier_block cpufreq_notifier_block = {
693 .notifier_call = cpufreq_interactive_notifier,
696 static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
701 unsigned int *tokenized_data;
705 while ((cp = strpbrk(cp + 1, " :")))
708 if (!(ntokens & 0x1))
711 tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
712 if (!tokenized_data) {
719 while (i < ntokens) {
720 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
723 cp = strpbrk(cp, " :");
732 *num_tokens = ntokens;
733 return tokenized_data;
736 kfree(tokenized_data);
741 static ssize_t show_target_loads(
742 struct cpufreq_interactive_tunables *tunables,
749 spin_lock_irqsave(&tunables->target_loads_lock, flags);
751 for (i = 0; i < tunables->ntarget_loads; i++)
752 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
753 i & 0x1 ? ":" : " ");
755 sprintf(buf + ret - 1, "\n");
756 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
760 static ssize_t store_target_loads(
761 struct cpufreq_interactive_tunables *tunables,
762 const char *buf, size_t count)
765 unsigned int *new_target_loads = NULL;
768 new_target_loads = get_tokenized_data(buf, &ntokens);
769 if (IS_ERR(new_target_loads))
770 return PTR_RET(new_target_loads);
772 spin_lock_irqsave(&tunables->target_loads_lock, flags);
773 if (tunables->target_loads != default_target_loads)
774 kfree(tunables->target_loads);
775 tunables->target_loads = new_target_loads;
776 tunables->ntarget_loads = ntokens;
777 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
781 static ssize_t show_above_hispeed_delay(
782 struct cpufreq_interactive_tunables *tunables, char *buf)
788 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
790 for (i = 0; i < tunables->nabove_hispeed_delay; i++)
791 ret += sprintf(buf + ret, "%u%s",
792 tunables->above_hispeed_delay[i],
793 i & 0x1 ? ":" : " ");
795 sprintf(buf + ret - 1, "\n");
796 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
800 static ssize_t store_above_hispeed_delay(
801 struct cpufreq_interactive_tunables *tunables,
802 const char *buf, size_t count)
805 unsigned int *new_above_hispeed_delay = NULL;
808 new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
809 if (IS_ERR(new_above_hispeed_delay))
810 return PTR_RET(new_above_hispeed_delay);
812 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
813 if (tunables->above_hispeed_delay != default_above_hispeed_delay)
814 kfree(tunables->above_hispeed_delay);
815 tunables->above_hispeed_delay = new_above_hispeed_delay;
816 tunables->nabove_hispeed_delay = ntokens;
817 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
822 static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
825 return sprintf(buf, "%u\n", tunables->hispeed_freq);
828 static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
829 const char *buf, size_t count)
832 long unsigned int val;
834 ret = strict_strtoul(buf, 0, &val);
837 tunables->hispeed_freq = val;
841 static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
842 *tunables, char *buf)
844 return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
847 static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
848 *tunables, const char *buf, size_t count)
853 ret = strict_strtoul(buf, 0, &val);
856 tunables->go_hispeed_load = val;
860 static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
861 *tunables, char *buf)
863 return sprintf(buf, "%lu\n", tunables->min_sample_time);
866 static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
867 *tunables, const char *buf, size_t count)
872 ret = strict_strtoul(buf, 0, &val);
875 tunables->min_sample_time = val;
879 static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
882 return sprintf(buf, "%lu\n", tunables->timer_rate);
885 static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
886 const char *buf, size_t count)
891 ret = strict_strtoul(buf, 0, &val);
894 tunables->timer_rate = val;
898 static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
901 return sprintf(buf, "%d\n", tunables->timer_slack_val);
904 static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
905 const char *buf, size_t count)
910 ret = kstrtol(buf, 10, &val);
914 tunables->timer_slack_val = val;
918 static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
921 return sprintf(buf, "%d\n", tunables->boost_val);
924 static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
925 const char *buf, size_t count)
930 ret = kstrtoul(buf, 0, &val);
934 tunables->boost_val = val;
936 if (tunables->boost_val) {
937 trace_cpufreq_interactive_boost("on");
938 cpufreq_interactive_boost();
940 trace_cpufreq_interactive_unboost("off");
946 static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
947 const char *buf, size_t count)
952 ret = kstrtoul(buf, 0, &val);
956 tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
957 tunables->boostpulse_duration_val;
958 trace_cpufreq_interactive_boost("pulse");
959 cpufreq_interactive_boost();
963 static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
964 *tunables, char *buf)
966 return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
969 static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
970 *tunables, const char *buf, size_t count)
975 ret = kstrtoul(buf, 0, &val);
979 tunables->boostpulse_duration_val = val;
983 static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
986 return sprintf(buf, "%u\n", tunables->io_is_busy);
989 static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
990 const char *buf, size_t count)
995 ret = kstrtoul(buf, 0, &val);
998 tunables->io_is_busy = val;
1003 * Create show/store routines
1004 * - sys: One governor instance for complete SYSTEM
1005 * - pol: One governor instance per struct cpufreq_policy
1007 #define show_gov_pol_sys(file_name) \
1008 static ssize_t show_##file_name##_gov_sys \
1009 (struct kobject *kobj, struct attribute *attr, char *buf) \
1011 return show_##file_name(common_tunables, buf); \
1014 static ssize_t show_##file_name##_gov_pol \
1015 (struct cpufreq_policy *policy, char *buf) \
1017 return show_##file_name(policy->governor_data, buf); \
1020 #define store_gov_pol_sys(file_name) \
1021 static ssize_t store_##file_name##_gov_sys \
1022 (struct kobject *kobj, struct attribute *attr, const char *buf, \
1025 return store_##file_name(common_tunables, buf, count); \
1028 static ssize_t store_##file_name##_gov_pol \
1029 (struct cpufreq_policy *policy, const char *buf, size_t count) \
1031 return store_##file_name(policy->governor_data, buf, count); \
1034 #define show_store_gov_pol_sys(file_name) \
1035 show_gov_pol_sys(file_name); \
1036 store_gov_pol_sys(file_name)
1038 show_store_gov_pol_sys(target_loads);
1039 show_store_gov_pol_sys(above_hispeed_delay);
1040 show_store_gov_pol_sys(hispeed_freq);
1041 show_store_gov_pol_sys(go_hispeed_load);
1042 show_store_gov_pol_sys(min_sample_time);
1043 show_store_gov_pol_sys(timer_rate);
1044 show_store_gov_pol_sys(timer_slack);
1045 show_store_gov_pol_sys(boost);
1046 store_gov_pol_sys(boostpulse);
1047 show_store_gov_pol_sys(boostpulse_duration);
1048 show_store_gov_pol_sys(io_is_busy);
1050 #define gov_sys_attr_rw(_name) \
1051 static struct global_attr _name##_gov_sys = \
1052 __ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1054 #define gov_pol_attr_rw(_name) \
1055 static struct freq_attr _name##_gov_pol = \
1056 __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1058 #define gov_sys_pol_attr_rw(_name) \
1059 gov_sys_attr_rw(_name); \
1060 gov_pol_attr_rw(_name)
1062 gov_sys_pol_attr_rw(target_loads);
1063 gov_sys_pol_attr_rw(above_hispeed_delay);
1064 gov_sys_pol_attr_rw(hispeed_freq);
1065 gov_sys_pol_attr_rw(go_hispeed_load);
1066 gov_sys_pol_attr_rw(min_sample_time);
1067 gov_sys_pol_attr_rw(timer_rate);
1068 gov_sys_pol_attr_rw(timer_slack);
1069 gov_sys_pol_attr_rw(boost);
1070 gov_sys_pol_attr_rw(boostpulse_duration);
1071 gov_sys_pol_attr_rw(io_is_busy);
1073 static struct global_attr boostpulse_gov_sys =
1074 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1076 static struct freq_attr boostpulse_gov_pol =
1077 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1079 /* One Governor instance for entire system */
1080 static struct attribute *interactive_attributes_gov_sys[] = {
1081 &target_loads_gov_sys.attr,
1082 &above_hispeed_delay_gov_sys.attr,
1083 &hispeed_freq_gov_sys.attr,
1084 &go_hispeed_load_gov_sys.attr,
1085 &min_sample_time_gov_sys.attr,
1086 &timer_rate_gov_sys.attr,
1087 &timer_slack_gov_sys.attr,
1088 &boost_gov_sys.attr,
1089 &boostpulse_gov_sys.attr,
1090 &boostpulse_duration_gov_sys.attr,
1091 &io_is_busy_gov_sys.attr,
1095 static struct attribute_group interactive_attr_group_gov_sys = {
1096 .attrs = interactive_attributes_gov_sys,
1097 .name = "interactive",
1100 /* Per policy governor instance */
1101 static struct attribute *interactive_attributes_gov_pol[] = {
1102 &target_loads_gov_pol.attr,
1103 &above_hispeed_delay_gov_pol.attr,
1104 &hispeed_freq_gov_pol.attr,
1105 &go_hispeed_load_gov_pol.attr,
1106 &min_sample_time_gov_pol.attr,
1107 &timer_rate_gov_pol.attr,
1108 &timer_slack_gov_pol.attr,
1109 &boost_gov_pol.attr,
1110 &boostpulse_gov_pol.attr,
1111 &boostpulse_duration_gov_pol.attr,
1112 &io_is_busy_gov_pol.attr,
1116 static struct attribute_group interactive_attr_group_gov_pol = {
1117 .attrs = interactive_attributes_gov_pol,
1118 .name = "interactive",
1121 static struct attribute_group *get_sysfs_attr(void)
1123 if (have_governor_per_policy())
1124 return &interactive_attr_group_gov_pol;
1126 return &interactive_attr_group_gov_sys;
1129 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1135 cpufreq_interactive_idle_start();
1138 cpufreq_interactive_idle_end();
1145 static struct notifier_block cpufreq_interactive_idle_nb = {
1146 .notifier_call = cpufreq_interactive_idle_notifier,
1149 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1154 struct cpufreq_interactive_cpuinfo *pcpu;
1155 struct cpufreq_frequency_table *freq_table;
1156 struct cpufreq_interactive_tunables *tunables;
1158 if (have_governor_per_policy())
1159 tunables = policy->governor_data;
1161 tunables = common_tunables;
1163 WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
1166 case CPUFREQ_GOV_POLICY_INIT:
1167 if (have_governor_per_policy()) {
1169 } else if (tunables) {
1170 tunables->usage_count++;
1171 policy->governor_data = tunables;
1175 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1177 pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
1181 tunables->usage_count = 1;
1182 tunables->above_hispeed_delay = default_above_hispeed_delay;
1183 tunables->nabove_hispeed_delay =
1184 ARRAY_SIZE(default_above_hispeed_delay);
1185 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1186 tunables->target_loads = default_target_loads;
1187 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1188 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1189 tunables->timer_rate = DEFAULT_TIMER_RATE;
1190 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1191 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1193 spin_lock_init(&tunables->target_loads_lock);
1194 spin_lock_init(&tunables->above_hispeed_delay_lock);
1196 policy->governor_data = tunables;
1197 if (!have_governor_per_policy())
1198 common_tunables = tunables;
1200 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1204 policy->governor_data = NULL;
1205 if (!have_governor_per_policy())
1206 common_tunables = NULL;
1210 if (!policy->governor->initialized) {
1211 idle_notifier_register(&cpufreq_interactive_idle_nb);
1212 cpufreq_register_notifier(&cpufreq_notifier_block,
1213 CPUFREQ_TRANSITION_NOTIFIER);
1218 case CPUFREQ_GOV_POLICY_EXIT:
1219 if (!--tunables->usage_count) {
1220 if (policy->governor->initialized == 1) {
1221 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1222 CPUFREQ_TRANSITION_NOTIFIER);
1223 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1226 sysfs_remove_group(get_governor_parent_kobj(policy),
1229 common_tunables = NULL;
1232 policy->governor_data = NULL;
1235 case CPUFREQ_GOV_START:
1236 mutex_lock(&gov_lock);
1238 freq_table = cpufreq_frequency_get_table(policy->cpu);
1239 if (!tunables->hispeed_freq)
1240 tunables->hispeed_freq = policy->max;
1242 for_each_cpu(j, policy->cpus) {
1243 pcpu = &per_cpu(cpuinfo, j);
1244 pcpu->policy = policy;
1245 pcpu->target_freq = policy->cur;
1246 pcpu->freq_table = freq_table;
1247 pcpu->floor_freq = pcpu->target_freq;
1248 pcpu->floor_validate_time =
1249 ktime_to_us(ktime_get());
1250 pcpu->hispeed_validate_time =
1251 pcpu->floor_validate_time;
1252 down_write(&pcpu->enable_sem);
1253 del_timer_sync(&pcpu->cpu_timer);
1254 del_timer_sync(&pcpu->cpu_slack_timer);
1255 cpufreq_interactive_timer_start(tunables, j);
1256 pcpu->governor_enabled = 1;
1257 up_write(&pcpu->enable_sem);
1260 mutex_unlock(&gov_lock);
1263 case CPUFREQ_GOV_STOP:
1264 mutex_lock(&gov_lock);
1265 for_each_cpu(j, policy->cpus) {
1266 pcpu = &per_cpu(cpuinfo, j);
1267 down_write(&pcpu->enable_sem);
1268 pcpu->governor_enabled = 0;
1269 del_timer_sync(&pcpu->cpu_timer);
1270 del_timer_sync(&pcpu->cpu_slack_timer);
1271 up_write(&pcpu->enable_sem);
1274 mutex_unlock(&gov_lock);
1277 case CPUFREQ_GOV_LIMITS:
1278 if (policy->max < policy->cur)
1279 __cpufreq_driver_target(policy,
1280 policy->max, CPUFREQ_RELATION_H);
1281 else if (policy->min > policy->cur)
1282 __cpufreq_driver_target(policy,
1283 policy->min, CPUFREQ_RELATION_L);
1284 for_each_cpu(j, policy->cpus) {
1285 pcpu = &per_cpu(cpuinfo, j);
1287 /* hold write semaphore to avoid race */
1288 down_write(&pcpu->enable_sem);
1289 if (pcpu->governor_enabled == 0) {
1290 up_write(&pcpu->enable_sem);
1294 /* update target_freq firstly */
1295 if (policy->max < pcpu->target_freq)
1296 pcpu->target_freq = policy->max;
1297 else if (policy->min > pcpu->target_freq)
1298 pcpu->target_freq = policy->min;
1300 /* Reschedule timer.
1301 * Delete the timers, else the timer callback may
1302 * return without re-arm the timer when failed
1303 * acquire the semaphore. This race may cause timer
1304 * stopped unexpectedly.
1306 del_timer_sync(&pcpu->cpu_timer);
1307 del_timer_sync(&pcpu->cpu_slack_timer);
1308 cpufreq_interactive_timer_start(tunables, j);
1309 up_write(&pcpu->enable_sem);
1316 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1319 struct cpufreq_governor cpufreq_gov_interactive = {
1320 .name = "interactive",
1321 .governor = cpufreq_governor_interactive,
1322 .max_transition_latency = 10000000,
1323 .owner = THIS_MODULE,
1326 static void cpufreq_interactive_nop_timer(unsigned long data)
1330 static int __init cpufreq_interactive_init(void)
1333 struct cpufreq_interactive_cpuinfo *pcpu;
1334 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1336 /* Initalize per-cpu timers */
1337 for_each_possible_cpu(i) {
1338 pcpu = &per_cpu(cpuinfo, i);
1339 init_timer_deferrable(&pcpu->cpu_timer);
1340 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1341 pcpu->cpu_timer.data = i;
1342 init_timer(&pcpu->cpu_slack_timer);
1343 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
1344 spin_lock_init(&pcpu->load_lock);
1345 init_rwsem(&pcpu->enable_sem);
1348 spin_lock_init(&speedchange_cpumask_lock);
1349 mutex_init(&gov_lock);
1351 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1353 if (IS_ERR(speedchange_task))
1354 return PTR_ERR(speedchange_task);
1356 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, ¶m);
1357 get_task_struct(speedchange_task);
1359 /* NB: wake up so the thread does not look hung to the freezer */
1360 wake_up_process(speedchange_task);
1362 return cpufreq_register_governor(&cpufreq_gov_interactive);
1365 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1366 fs_initcall(cpufreq_interactive_init);
1368 module_init(cpufreq_interactive_init);
1371 static void __exit cpufreq_interactive_exit(void)
1373 cpufreq_unregister_governor(&cpufreq_gov_interactive);
1374 kthread_stop(speedchange_task);
1375 put_task_struct(speedchange_task);
1378 module_exit(cpufreq_interactive_exit);
1380 MODULE_AUTHOR("Mike Chan <mike@android.com>");
1381 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1382 "Latency sensitive workloads");
1383 MODULE_LICENSE("GPL");