2 * drivers/cpufreq/cpufreq_interactive.c
4 * Copyright (C) 2010 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * Author: Mike Chan (mike@android.com)
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/rwsem.h>
25 #include <linux/sched.h>
26 #include <linux/sched/rt.h>
27 #include <linux/tick.h>
28 #include <linux/time.h>
29 #include <linux/timer.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/slab.h>
33 #include <asm/cputime.h>
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/cpufreq_interactive.h>
38 static atomic_t active_count = ATOMIC_INIT(0);
40 struct cpufreq_interactive_cpuinfo {
41 struct timer_list cpu_timer;
42 struct timer_list cpu_slack_timer;
43 spinlock_t load_lock; /* protects the next 4 fields */
45 u64 time_in_idle_timestamp;
47 u64 cputime_speedadj_timestamp;
48 struct cpufreq_policy *policy;
49 struct cpufreq_frequency_table *freq_table;
50 unsigned int target_freq;
51 unsigned int floor_freq;
52 u64 floor_validate_time;
53 u64 hispeed_validate_time;
54 struct rw_semaphore enable_sem;
58 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
60 /* realtime thread handles frequency scaling */
61 static struct task_struct *speedchange_task;
62 static cpumask_t speedchange_cpumask;
63 static spinlock_t speedchange_cpumask_lock;
65 /* Hi speed to bump to from lo speed when load burst (default max) */
66 static unsigned int hispeed_freq;
68 /* Go to hi speed when CPU load at or above this value. */
69 #define DEFAULT_GO_HISPEED_LOAD 99
70 static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
72 /* Target load. Lower values result in higher CPU speeds. */
73 #define DEFAULT_TARGET_LOAD 90
74 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
75 static spinlock_t target_loads_lock;
76 static unsigned int *target_loads = default_target_loads;
77 static int ntarget_loads = ARRAY_SIZE(default_target_loads);
80 * The minimum amount of time to spend at a frequency before we can ramp down.
82 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
83 static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
86 * The sample rate of the timer used to increase frequency
88 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
89 static unsigned long timer_rate = DEFAULT_TIMER_RATE;
92 * Wait this long before raising speed above hispeed, by default a single
95 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
96 static unsigned long above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
98 /* Non-zero means indefinite speed boost active */
100 /* Duration of a boot pulse in usecs */
101 static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
102 /* End time of boost pulse in ktime converted to usecs */
103 static u64 boostpulse_endtime;
106 * Max additional time to wait in idle, beyond timer_rate, at speeds above
107 * minimum before wakeup to reduce speed, or -1 if unnecessary.
109 #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
110 static int timer_slack_val = DEFAULT_TIMER_SLACK;
112 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
115 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
118 struct cpufreq_governor cpufreq_gov_interactive = {
119 .name = "interactive",
120 .governor = cpufreq_governor_interactive,
121 .max_transition_latency = 10000000,
122 .owner = THIS_MODULE,
125 static void cpufreq_interactive_timer_resched(
126 struct cpufreq_interactive_cpuinfo *pcpu)
128 unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
131 mod_timer_pinned(&pcpu->cpu_timer, expires);
132 if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
133 expires += usecs_to_jiffies(timer_slack_val);
134 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
137 spin_lock_irqsave(&pcpu->load_lock, flags);
139 get_cpu_idle_time_us(smp_processor_id(),
140 &pcpu->time_in_idle_timestamp);
141 pcpu->cputime_speedadj = 0;
142 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
143 spin_unlock_irqrestore(&pcpu->load_lock, flags);
146 static unsigned int freq_to_targetload(unsigned int freq)
152 spin_lock_irqsave(&target_loads_lock, flags);
154 for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
157 ret = target_loads[i];
158 spin_unlock_irqrestore(&target_loads_lock, flags);
163 * If increasing frequencies never map to a lower target load then
164 * choose_freq() will find the minimum frequency that does not exceed its
165 * target load given the current load.
168 static unsigned int choose_freq(
169 struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
171 unsigned int freq = pcpu->policy->cur;
172 unsigned int prevfreq, freqmin, freqmax;
181 tl = freq_to_targetload(freq);
184 * Find the lowest frequency where the computed load is less
185 * than or equal to the target load.
188 cpufreq_frequency_table_target(
189 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
190 CPUFREQ_RELATION_L, &index);
191 freq = pcpu->freq_table[index].frequency;
193 if (freq > prevfreq) {
194 /* The previous frequency is too low. */
197 if (freq >= freqmax) {
199 * Find the highest frequency that is less
202 cpufreq_frequency_table_target(
203 pcpu->policy, pcpu->freq_table,
204 freqmax - 1, CPUFREQ_RELATION_H,
206 freq = pcpu->freq_table[index].frequency;
208 if (freq == freqmin) {
210 * The first frequency below freqmax
211 * has already been found to be too
212 * low. freqmax is the lowest speed
213 * we found that is fast enough.
219 } else if (freq < prevfreq) {
220 /* The previous frequency is high enough. */
223 if (freq <= freqmin) {
225 * Find the lowest frequency that is higher
228 cpufreq_frequency_table_target(
229 pcpu->policy, pcpu->freq_table,
230 freqmin + 1, CPUFREQ_RELATION_L,
232 freq = pcpu->freq_table[index].frequency;
235 * If freqmax is the first frequency above
236 * freqmin then we have already found that
237 * this speed is fast enough.
244 /* If same frequency chosen as previous then done. */
245 } while (freq != prevfreq);
250 static u64 update_load(int cpu)
252 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
255 unsigned int delta_idle;
256 unsigned int delta_time;
259 now_idle = get_cpu_idle_time_us(cpu, &now);
260 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
261 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
262 active_time = delta_time - delta_idle;
263 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
265 pcpu->time_in_idle = now_idle;
266 pcpu->time_in_idle_timestamp = now;
270 static void cpufreq_interactive_timer(unsigned long data)
273 unsigned int delta_time;
274 u64 cputime_speedadj;
276 struct cpufreq_interactive_cpuinfo *pcpu =
277 &per_cpu(cpuinfo, data);
278 unsigned int new_freq;
279 unsigned int loadadjfreq;
284 if (!down_read_trylock(&pcpu->enable_sem))
286 if (!pcpu->governor_enabled)
289 spin_lock_irqsave(&pcpu->load_lock, flags);
290 now = update_load(data);
291 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
292 cputime_speedadj = pcpu->cputime_speedadj;
293 spin_unlock_irqrestore(&pcpu->load_lock, flags);
295 if (WARN_ON_ONCE(!delta_time))
298 do_div(cputime_speedadj, delta_time);
299 loadadjfreq = (unsigned int)cputime_speedadj * 100;
300 cpu_load = loadadjfreq / pcpu->target_freq;
301 boosted = boost_val || now < boostpulse_endtime;
303 if (cpu_load >= go_hispeed_load || boosted) {
304 if (pcpu->target_freq < hispeed_freq) {
305 new_freq = hispeed_freq;
307 new_freq = choose_freq(pcpu, loadadjfreq);
309 if (new_freq < hispeed_freq)
310 new_freq = hispeed_freq;
313 new_freq = choose_freq(pcpu, loadadjfreq);
316 if (pcpu->target_freq >= hispeed_freq &&
317 new_freq > pcpu->target_freq &&
318 now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
319 trace_cpufreq_interactive_notyet(
320 data, cpu_load, pcpu->target_freq,
321 pcpu->policy->cur, new_freq);
325 pcpu->hispeed_validate_time = now;
327 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
328 new_freq, CPUFREQ_RELATION_L,
330 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
335 new_freq = pcpu->freq_table[index].frequency;
338 * Do not scale below floor_freq unless we have been at or above the
339 * floor frequency for the minimum sample time since last validated.
341 if (new_freq < pcpu->floor_freq) {
342 if (now - pcpu->floor_validate_time < min_sample_time) {
343 trace_cpufreq_interactive_notyet(
344 data, cpu_load, pcpu->target_freq,
345 pcpu->policy->cur, new_freq);
351 * Update the timestamp for checking whether speed has been held at
352 * or above the selected frequency for a minimum of min_sample_time,
353 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
354 * allow the speed to drop as soon as the boostpulse duration expires
355 * (or the indefinite boost is turned off).
358 if (!boosted || new_freq > hispeed_freq) {
359 pcpu->floor_freq = new_freq;
360 pcpu->floor_validate_time = now;
363 if (pcpu->target_freq == new_freq) {
364 trace_cpufreq_interactive_already(
365 data, cpu_load, pcpu->target_freq,
366 pcpu->policy->cur, new_freq);
367 goto rearm_if_notmax;
370 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
371 pcpu->policy->cur, new_freq);
373 pcpu->target_freq = new_freq;
374 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
375 cpumask_set_cpu(data, &speedchange_cpumask);
376 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
377 wake_up_process(speedchange_task);
381 * Already set max speed and don't see a need to change that,
382 * wait until next idle to re-evaluate, don't need timer.
384 if (pcpu->target_freq == pcpu->policy->max)
388 if (!timer_pending(&pcpu->cpu_timer))
389 cpufreq_interactive_timer_resched(pcpu);
392 up_read(&pcpu->enable_sem);
396 static void cpufreq_interactive_idle_start(void)
398 struct cpufreq_interactive_cpuinfo *pcpu =
399 &per_cpu(cpuinfo, smp_processor_id());
402 if (!down_read_trylock(&pcpu->enable_sem))
404 if (!pcpu->governor_enabled) {
405 up_read(&pcpu->enable_sem);
409 pending = timer_pending(&pcpu->cpu_timer);
411 if (pcpu->target_freq != pcpu->policy->min) {
413 * Entering idle while not at lowest speed. On some
414 * platforms this can hold the other CPU(s) at that speed
415 * even though the CPU is idle. Set a timer to re-evaluate
416 * speed so this idle CPU doesn't hold the other CPUs above
417 * min indefinitely. This should probably be a quirk of
418 * the CPUFreq driver.
421 cpufreq_interactive_timer_resched(pcpu);
424 up_read(&pcpu->enable_sem);
427 static void cpufreq_interactive_idle_end(void)
429 struct cpufreq_interactive_cpuinfo *pcpu =
430 &per_cpu(cpuinfo, smp_processor_id());
432 if (!down_read_trylock(&pcpu->enable_sem))
434 if (!pcpu->governor_enabled) {
435 up_read(&pcpu->enable_sem);
439 /* Arm the timer for 1-2 ticks later if not already. */
440 if (!timer_pending(&pcpu->cpu_timer)) {
441 cpufreq_interactive_timer_resched(pcpu);
442 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
443 del_timer(&pcpu->cpu_timer);
444 del_timer(&pcpu->cpu_slack_timer);
445 cpufreq_interactive_timer(smp_processor_id());
448 up_read(&pcpu->enable_sem);
451 static int cpufreq_interactive_speedchange_task(void *data)
456 struct cpufreq_interactive_cpuinfo *pcpu;
459 set_current_state(TASK_INTERRUPTIBLE);
460 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
462 if (cpumask_empty(&speedchange_cpumask)) {
463 spin_unlock_irqrestore(&speedchange_cpumask_lock,
467 if (kthread_should_stop())
470 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
473 set_current_state(TASK_RUNNING);
474 tmp_mask = speedchange_cpumask;
475 cpumask_clear(&speedchange_cpumask);
476 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
478 for_each_cpu(cpu, &tmp_mask) {
480 unsigned int max_freq = 0;
482 pcpu = &per_cpu(cpuinfo, cpu);
483 if (!down_read_trylock(&pcpu->enable_sem))
485 if (!pcpu->governor_enabled) {
486 up_read(&pcpu->enable_sem);
490 for_each_cpu(j, pcpu->policy->cpus) {
491 struct cpufreq_interactive_cpuinfo *pjcpu =
492 &per_cpu(cpuinfo, j);
494 if (pjcpu->target_freq > max_freq)
495 max_freq = pjcpu->target_freq;
498 if (max_freq != pcpu->policy->cur)
499 __cpufreq_driver_target(pcpu->policy,
502 trace_cpufreq_interactive_setspeed(cpu,
506 up_read(&pcpu->enable_sem);
513 static void cpufreq_interactive_boost(void)
518 struct cpufreq_interactive_cpuinfo *pcpu;
520 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
522 for_each_online_cpu(i) {
523 pcpu = &per_cpu(cpuinfo, i);
525 if (pcpu->target_freq < hispeed_freq) {
526 pcpu->target_freq = hispeed_freq;
527 cpumask_set_cpu(i, &speedchange_cpumask);
528 pcpu->hispeed_validate_time =
529 ktime_to_us(ktime_get());
534 * Set floor freq and (re)start timer for when last
538 pcpu->floor_freq = hispeed_freq;
539 pcpu->floor_validate_time = ktime_to_us(ktime_get());
542 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
545 wake_up_process(speedchange_task);
548 static int cpufreq_interactive_notifier(
549 struct notifier_block *nb, unsigned long val, void *data)
551 struct cpufreq_freqs *freq = data;
552 struct cpufreq_interactive_cpuinfo *pcpu;
556 if (val == CPUFREQ_POSTCHANGE) {
557 pcpu = &per_cpu(cpuinfo, freq->cpu);
558 if (!down_read_trylock(&pcpu->enable_sem))
560 if (!pcpu->governor_enabled) {
561 up_read(&pcpu->enable_sem);
565 for_each_cpu(cpu, pcpu->policy->cpus) {
566 struct cpufreq_interactive_cpuinfo *pjcpu =
567 &per_cpu(cpuinfo, cpu);
568 spin_lock_irqsave(&pjcpu->load_lock, flags);
570 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
573 up_read(&pcpu->enable_sem);
578 static struct notifier_block cpufreq_notifier_block = {
579 .notifier_call = cpufreq_interactive_notifier,
582 static ssize_t show_target_loads(
583 struct kobject *kobj, struct attribute *attr, char *buf)
589 spin_lock_irqsave(&target_loads_lock, flags);
591 for (i = 0; i < ntarget_loads; i++)
592 ret += sprintf(buf + ret, "%u%s", target_loads[i],
593 i & 0x1 ? ":" : " ");
595 ret += sprintf(buf + ret, "\n");
596 spin_unlock_irqrestore(&target_loads_lock, flags);
600 static ssize_t store_target_loads(
601 struct kobject *kobj, struct attribute *attr, const char *buf,
606 unsigned int *new_target_loads = NULL;
612 while ((cp = strpbrk(cp + 1, " :")))
615 if (!(ntokens & 0x1))
618 new_target_loads = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
619 if (!new_target_loads) {
626 while (i < ntokens) {
627 if (sscanf(cp, "%u", &new_target_loads[i++]) != 1)
630 cp = strpbrk(cp, " :");
639 spin_lock_irqsave(&target_loads_lock, flags);
640 if (target_loads != default_target_loads)
642 target_loads = new_target_loads;
643 ntarget_loads = ntokens;
644 spin_unlock_irqrestore(&target_loads_lock, flags);
650 kfree(new_target_loads);
654 static struct global_attr target_loads_attr =
655 __ATTR(target_loads, S_IRUGO | S_IWUSR,
656 show_target_loads, store_target_loads);
658 static ssize_t show_hispeed_freq(struct kobject *kobj,
659 struct attribute *attr, char *buf)
661 return sprintf(buf, "%u\n", hispeed_freq);
664 static ssize_t store_hispeed_freq(struct kobject *kobj,
665 struct attribute *attr, const char *buf,
669 long unsigned int val;
671 ret = strict_strtoul(buf, 0, &val);
678 static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
679 show_hispeed_freq, store_hispeed_freq);
682 static ssize_t show_go_hispeed_load(struct kobject *kobj,
683 struct attribute *attr, char *buf)
685 return sprintf(buf, "%lu\n", go_hispeed_load);
688 static ssize_t store_go_hispeed_load(struct kobject *kobj,
689 struct attribute *attr, const char *buf, size_t count)
694 ret = strict_strtoul(buf, 0, &val);
697 go_hispeed_load = val;
701 static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
702 show_go_hispeed_load, store_go_hispeed_load);
704 static ssize_t show_min_sample_time(struct kobject *kobj,
705 struct attribute *attr, char *buf)
707 return sprintf(buf, "%lu\n", min_sample_time);
710 static ssize_t store_min_sample_time(struct kobject *kobj,
711 struct attribute *attr, const char *buf, size_t count)
716 ret = strict_strtoul(buf, 0, &val);
719 min_sample_time = val;
723 static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
724 show_min_sample_time, store_min_sample_time);
726 static ssize_t show_above_hispeed_delay(struct kobject *kobj,
727 struct attribute *attr, char *buf)
729 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
732 static ssize_t store_above_hispeed_delay(struct kobject *kobj,
733 struct attribute *attr,
734 const char *buf, size_t count)
739 ret = strict_strtoul(buf, 0, &val);
742 above_hispeed_delay_val = val;
746 define_one_global_rw(above_hispeed_delay);
748 static ssize_t show_timer_rate(struct kobject *kobj,
749 struct attribute *attr, char *buf)
751 return sprintf(buf, "%lu\n", timer_rate);
754 static ssize_t store_timer_rate(struct kobject *kobj,
755 struct attribute *attr, const char *buf, size_t count)
760 ret = strict_strtoul(buf, 0, &val);
767 static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
768 show_timer_rate, store_timer_rate);
770 static ssize_t show_timer_slack(
771 struct kobject *kobj, struct attribute *attr, char *buf)
773 return sprintf(buf, "%d\n", timer_slack_val);
776 static ssize_t store_timer_slack(
777 struct kobject *kobj, struct attribute *attr, const char *buf,
783 ret = kstrtol(buf, 10, &val);
787 timer_slack_val = val;
791 define_one_global_rw(timer_slack);
793 static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
796 return sprintf(buf, "%d\n", boost_val);
799 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
800 const char *buf, size_t count)
805 ret = kstrtoul(buf, 0, &val);
812 trace_cpufreq_interactive_boost("on");
813 cpufreq_interactive_boost();
815 trace_cpufreq_interactive_unboost("off");
821 define_one_global_rw(boost);
823 static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
824 const char *buf, size_t count)
829 ret = kstrtoul(buf, 0, &val);
833 boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
834 trace_cpufreq_interactive_boost("pulse");
835 cpufreq_interactive_boost();
839 static struct global_attr boostpulse =
840 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
842 static ssize_t show_boostpulse_duration(
843 struct kobject *kobj, struct attribute *attr, char *buf)
845 return sprintf(buf, "%d\n", boostpulse_duration_val);
848 static ssize_t store_boostpulse_duration(
849 struct kobject *kobj, struct attribute *attr, const char *buf,
855 ret = kstrtoul(buf, 0, &val);
859 boostpulse_duration_val = val;
863 define_one_global_rw(boostpulse_duration);
865 static struct attribute *interactive_attributes[] = {
866 &target_loads_attr.attr,
867 &hispeed_freq_attr.attr,
868 &go_hispeed_load_attr.attr,
869 &above_hispeed_delay.attr,
870 &min_sample_time_attr.attr,
871 &timer_rate_attr.attr,
875 &boostpulse_duration.attr,
879 static struct attribute_group interactive_attr_group = {
880 .attrs = interactive_attributes,
881 .name = "interactive",
884 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
890 cpufreq_interactive_idle_start();
893 cpufreq_interactive_idle_end();
900 static struct notifier_block cpufreq_interactive_idle_nb = {
901 .notifier_call = cpufreq_interactive_idle_notifier,
904 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
909 struct cpufreq_interactive_cpuinfo *pcpu;
910 struct cpufreq_frequency_table *freq_table;
913 case CPUFREQ_GOV_START:
914 if (!cpu_online(policy->cpu))
918 cpufreq_frequency_get_table(policy->cpu);
920 hispeed_freq = policy->max;
922 for_each_cpu(j, policy->cpus) {
923 unsigned long expires;
925 pcpu = &per_cpu(cpuinfo, j);
926 pcpu->policy = policy;
927 pcpu->target_freq = policy->cur;
928 pcpu->freq_table = freq_table;
929 pcpu->floor_freq = pcpu->target_freq;
930 pcpu->floor_validate_time =
931 ktime_to_us(ktime_get());
932 pcpu->hispeed_validate_time =
933 pcpu->floor_validate_time;
934 down_write(&pcpu->enable_sem);
935 expires = jiffies + usecs_to_jiffies(timer_rate);
936 pcpu->cpu_timer.expires = expires;
937 add_timer_on(&pcpu->cpu_timer, j);
938 if (timer_slack_val >= 0) {
939 expires += usecs_to_jiffies(timer_slack_val);
940 pcpu->cpu_slack_timer.expires = expires;
941 add_timer_on(&pcpu->cpu_slack_timer, j);
943 pcpu->governor_enabled = 1;
944 up_write(&pcpu->enable_sem);
948 * Do not register the idle hook and create sysfs
949 * entries if we have already done so.
951 if (atomic_inc_return(&active_count) > 1)
954 rc = sysfs_create_group(cpufreq_global_kobject,
955 &interactive_attr_group);
959 idle_notifier_register(&cpufreq_interactive_idle_nb);
960 cpufreq_register_notifier(
961 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
964 case CPUFREQ_GOV_STOP:
965 for_each_cpu(j, policy->cpus) {
966 pcpu = &per_cpu(cpuinfo, j);
967 down_write(&pcpu->enable_sem);
968 pcpu->governor_enabled = 0;
969 del_timer_sync(&pcpu->cpu_timer);
970 del_timer_sync(&pcpu->cpu_slack_timer);
971 up_write(&pcpu->enable_sem);
974 if (atomic_dec_return(&active_count) > 0)
977 cpufreq_unregister_notifier(
978 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
979 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
980 sysfs_remove_group(cpufreq_global_kobject,
981 &interactive_attr_group);
985 case CPUFREQ_GOV_LIMITS:
986 if (policy->max < policy->cur)
987 __cpufreq_driver_target(policy,
988 policy->max, CPUFREQ_RELATION_H);
989 else if (policy->min > policy->cur)
990 __cpufreq_driver_target(policy,
991 policy->min, CPUFREQ_RELATION_L);
997 static void cpufreq_interactive_nop_timer(unsigned long data)
1001 static int __init cpufreq_interactive_init(void)
1004 struct cpufreq_interactive_cpuinfo *pcpu;
1005 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1007 /* Initalize per-cpu timers */
1008 for_each_possible_cpu(i) {
1009 pcpu = &per_cpu(cpuinfo, i);
1010 init_timer_deferrable(&pcpu->cpu_timer);
1011 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1012 pcpu->cpu_timer.data = i;
1013 init_timer(&pcpu->cpu_slack_timer);
1014 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
1015 spin_lock_init(&pcpu->load_lock);
1016 init_rwsem(&pcpu->enable_sem);
1019 spin_lock_init(&target_loads_lock);
1020 spin_lock_init(&speedchange_cpumask_lock);
1022 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1024 if (IS_ERR(speedchange_task))
1025 return PTR_ERR(speedchange_task);
1027 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, ¶m);
1028 get_task_struct(speedchange_task);
1030 /* NB: wake up so the thread does not look hung to the freezer */
1031 wake_up_process(speedchange_task);
1033 return cpufreq_register_governor(&cpufreq_gov_interactive);
1036 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1037 fs_initcall(cpufreq_interactive_init);
1039 module_init(cpufreq_interactive_init);
1042 static void __exit cpufreq_interactive_exit(void)
1044 cpufreq_unregister_governor(&cpufreq_gov_interactive);
1045 kthread_stop(speedchange_task);
1046 put_task_struct(speedchange_task);
1049 module_exit(cpufreq_interactive_exit);
1051 MODULE_AUTHOR("Mike Chan <mike@android.com>");
1052 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1053 "Latency sensitive workloads");
1054 MODULE_LICENSE("GPL");