2 * drivers/cpufreq/cpufreq_interactive.c
4 * Copyright (C) 2010 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * Author: Mike Chan (mike@android.com)
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/mutex.h>
24 #include <linux/sched.h>
25 #include <linux/sched/rt.h>
26 #include <linux/tick.h>
27 #include <linux/time.h>
28 #include <linux/timer.h>
29 #include <linux/workqueue.h>
30 #include <linux/kthread.h>
31 #include <linux/mutex.h>
32 #include <linux/slab.h>
33 #include <linux/input.h>
34 #include <asm/cputime.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/cpufreq_interactive.h>
39 static atomic_t active_count = ATOMIC_INIT(0);
41 struct cpufreq_interactive_cpuinfo {
42 struct timer_list cpu_timer;
49 u64 target_set_time_in_idle;
50 struct cpufreq_policy *policy;
51 struct cpufreq_frequency_table *freq_table;
52 unsigned int target_freq;
53 unsigned int floor_freq;
54 u64 floor_validate_time;
55 u64 hispeed_validate_time;
59 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
61 /* Workqueues handle frequency scaling */
62 static struct task_struct *up_task;
63 static struct workqueue_struct *down_wq;
64 static struct work_struct freq_scale_down_work;
65 static cpumask_t up_cpumask;
66 static spinlock_t up_cpumask_lock;
67 static cpumask_t down_cpumask;
68 static spinlock_t down_cpumask_lock;
69 static struct mutex set_speed_lock;
71 /* Hi speed to bump to from lo speed when load burst (default max) */
72 static u64 hispeed_freq;
74 /* Go to hi speed when CPU load at or above this value. */
75 #define DEFAULT_GO_HISPEED_LOAD 85
76 static unsigned long go_hispeed_load;
79 * The minimum amount of time to spend at a frequency before we can ramp down.
81 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
82 static unsigned long min_sample_time;
85 * The sample rate of the timer used to increase frequency
87 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
88 static unsigned long timer_rate;
91 * Wait this long before raising speed above hispeed, by default a single
94 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
95 static unsigned long above_hispeed_delay_val;
98 * Boost pulse to hispeed on touchscreen input.
101 static int input_boost_val;
103 struct cpufreq_interactive_inputopen {
104 struct input_handle *handle;
105 struct work_struct inputopen_work;
108 static struct cpufreq_interactive_inputopen inputopen;
111 * Non-zero means longer-term speed boost active.
114 static int boost_val;
116 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
119 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
122 struct cpufreq_governor cpufreq_gov_interactive = {
123 .name = "interactive",
124 .governor = cpufreq_governor_interactive,
125 .max_transition_latency = 10000000,
126 .owner = THIS_MODULE,
129 static void cpufreq_interactive_timer(unsigned long data)
131 unsigned int delta_idle;
132 unsigned int delta_time;
134 int load_since_change;
137 struct cpufreq_interactive_cpuinfo *pcpu =
138 &per_cpu(cpuinfo, data);
140 unsigned int new_freq;
146 if (!pcpu->governor_enabled)
150 * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
151 * this lets idle exit know the current idle time sample has
152 * been processed, and idle exit can generate a new sample and
153 * re-arm the timer. This prevents a concurrent idle
154 * exit on that CPU from writing a new set of info at the same time
155 * the timer function runs (the timer function can't use that info
156 * until more time passes).
158 time_in_idle = pcpu->time_in_idle;
159 idle_exit_time = pcpu->idle_exit_time;
160 now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
163 /* If we raced with cancelling a timer, skip. */
167 delta_idle = (unsigned int)(now_idle - time_in_idle);
168 delta_time = (unsigned int)(pcpu->timer_run_time - idle_exit_time);
171 * If timer ran less than 1ms after short-term sample started, retry.
173 if (delta_time < 1000)
176 if (delta_idle > delta_time)
179 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
181 delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
182 delta_time = (unsigned int)(pcpu->timer_run_time -
183 pcpu->target_set_time);
185 if ((delta_time == 0) || (delta_idle > delta_time))
186 load_since_change = 0;
189 100 * (delta_time - delta_idle) / delta_time;
192 * Choose greater of short-term load (since last idle timer
193 * started or timer function re-armed itself) or long-term load
194 * (since last frequency change).
196 if (load_since_change > cpu_load)
197 cpu_load = load_since_change;
199 if (cpu_load >= go_hispeed_load || boost_val) {
200 if (pcpu->target_freq <= pcpu->policy->min) {
201 new_freq = hispeed_freq;
203 new_freq = pcpu->policy->max * cpu_load / 100;
205 if (new_freq < hispeed_freq)
206 new_freq = hispeed_freq;
208 if (pcpu->target_freq == hispeed_freq &&
209 new_freq > hispeed_freq &&
210 pcpu->timer_run_time - pcpu->hispeed_validate_time
211 < above_hispeed_delay_val) {
212 trace_cpufreq_interactive_notyet(data, cpu_load,
219 new_freq = pcpu->policy->max * cpu_load / 100;
222 if (new_freq <= hispeed_freq)
223 pcpu->hispeed_validate_time = pcpu->timer_run_time;
225 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
226 new_freq, CPUFREQ_RELATION_H,
228 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
233 new_freq = pcpu->freq_table[index].frequency;
236 * Do not scale below floor_freq unless we have been at or above the
237 * floor frequency for the minimum sample time since last validated.
239 if (new_freq < pcpu->floor_freq) {
240 if (pcpu->timer_run_time - pcpu->floor_validate_time
242 trace_cpufreq_interactive_notyet(data, cpu_load,
243 pcpu->target_freq, new_freq);
248 pcpu->floor_freq = new_freq;
249 pcpu->floor_validate_time = pcpu->timer_run_time;
251 if (pcpu->target_freq == new_freq) {
252 trace_cpufreq_interactive_already(data, cpu_load,
253 pcpu->target_freq, new_freq);
254 goto rearm_if_notmax;
257 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
259 pcpu->target_set_time_in_idle = now_idle;
260 pcpu->target_set_time = pcpu->timer_run_time;
262 if (new_freq < pcpu->target_freq) {
263 pcpu->target_freq = new_freq;
264 spin_lock_irqsave(&down_cpumask_lock, flags);
265 cpumask_set_cpu(data, &down_cpumask);
266 spin_unlock_irqrestore(&down_cpumask_lock, flags);
267 queue_work(down_wq, &freq_scale_down_work);
269 pcpu->target_freq = new_freq;
270 spin_lock_irqsave(&up_cpumask_lock, flags);
271 cpumask_set_cpu(data, &up_cpumask);
272 spin_unlock_irqrestore(&up_cpumask_lock, flags);
273 wake_up_process(up_task);
278 * Already set max speed and don't see a need to change that,
279 * wait until next idle to re-evaluate, don't need timer.
281 if (pcpu->target_freq == pcpu->policy->max)
285 if (!timer_pending(&pcpu->cpu_timer)) {
287 * If already at min: if that CPU is idle, don't set timer.
288 * Else cancel the timer if that CPU goes idle. We don't
289 * need to re-evaluate speed until the next idle exit.
291 if (pcpu->target_freq == pcpu->policy->min) {
297 pcpu->timer_idlecancel = 1;
300 pcpu->time_in_idle = get_cpu_idle_time_us(
301 data, &pcpu->idle_exit_time);
302 mod_timer(&pcpu->cpu_timer,
303 jiffies + usecs_to_jiffies(timer_rate));
310 static void cpufreq_interactive_idle_start(void)
312 struct cpufreq_interactive_cpuinfo *pcpu =
313 &per_cpu(cpuinfo, smp_processor_id());
316 if (!pcpu->governor_enabled)
321 pending = timer_pending(&pcpu->cpu_timer);
323 if (pcpu->target_freq != pcpu->policy->min) {
326 * Entering idle while not at lowest speed. On some
327 * platforms this can hold the other CPU(s) at that speed
328 * even though the CPU is idle. Set a timer to re-evaluate
329 * speed so this idle CPU doesn't hold the other CPUs above
330 * min indefinitely. This should probably be a quirk of
331 * the CPUFreq driver.
334 pcpu->time_in_idle = get_cpu_idle_time_us(
335 smp_processor_id(), &pcpu->idle_exit_time);
336 pcpu->timer_idlecancel = 0;
337 mod_timer(&pcpu->cpu_timer,
338 jiffies + usecs_to_jiffies(timer_rate));
343 * If at min speed and entering idle after load has
344 * already been evaluated, and a timer has been set just in
345 * case the CPU suddenly goes busy, cancel that timer. The
346 * CPU didn't go busy; we'll recheck things upon idle exit.
348 if (pending && pcpu->timer_idlecancel) {
349 del_timer(&pcpu->cpu_timer);
351 * Ensure last timer run time is after current idle
352 * sample start time, so next idle exit will always
353 * start a new idle sampling period.
355 pcpu->idle_exit_time = 0;
356 pcpu->timer_idlecancel = 0;
362 static void cpufreq_interactive_idle_end(void)
364 struct cpufreq_interactive_cpuinfo *pcpu =
365 &per_cpu(cpuinfo, smp_processor_id());
371 * Arm the timer for 1-2 ticks later if not already, and if the timer
372 * function has already processed the previous load sampling
373 * interval. (If the timer is not pending but has not processed
374 * the previous interval, it is probably racing with us on another
375 * CPU. Let it compute load based on the previous sample and then
376 * re-arm the timer for another interval when it's done, rather
377 * than updating the interval start time to be "now", which doesn't
378 * give the timer function enough time to make a decision on this
381 if (timer_pending(&pcpu->cpu_timer) == 0 &&
382 pcpu->timer_run_time >= pcpu->idle_exit_time &&
383 pcpu->governor_enabled) {
385 get_cpu_idle_time_us(smp_processor_id(),
386 &pcpu->idle_exit_time);
387 pcpu->timer_idlecancel = 0;
388 mod_timer(&pcpu->cpu_timer,
389 jiffies + usecs_to_jiffies(timer_rate));
394 static int cpufreq_interactive_up_task(void *data)
399 struct cpufreq_interactive_cpuinfo *pcpu;
402 set_current_state(TASK_INTERRUPTIBLE);
403 spin_lock_irqsave(&up_cpumask_lock, flags);
405 if (cpumask_empty(&up_cpumask)) {
406 spin_unlock_irqrestore(&up_cpumask_lock, flags);
409 if (kthread_should_stop())
412 spin_lock_irqsave(&up_cpumask_lock, flags);
415 set_current_state(TASK_RUNNING);
416 tmp_mask = up_cpumask;
417 cpumask_clear(&up_cpumask);
418 spin_unlock_irqrestore(&up_cpumask_lock, flags);
420 for_each_cpu(cpu, &tmp_mask) {
422 unsigned int max_freq = 0;
424 pcpu = &per_cpu(cpuinfo, cpu);
427 if (!pcpu->governor_enabled)
430 mutex_lock(&set_speed_lock);
432 for_each_cpu(j, pcpu->policy->cpus) {
433 struct cpufreq_interactive_cpuinfo *pjcpu =
434 &per_cpu(cpuinfo, j);
436 if (pjcpu->target_freq > max_freq)
437 max_freq = pjcpu->target_freq;
440 if (max_freq != pcpu->policy->cur)
441 __cpufreq_driver_target(pcpu->policy,
444 mutex_unlock(&set_speed_lock);
445 trace_cpufreq_interactive_up(cpu, pcpu->target_freq,
453 static void cpufreq_interactive_freq_down(struct work_struct *work)
458 struct cpufreq_interactive_cpuinfo *pcpu;
460 spin_lock_irqsave(&down_cpumask_lock, flags);
461 tmp_mask = down_cpumask;
462 cpumask_clear(&down_cpumask);
463 spin_unlock_irqrestore(&down_cpumask_lock, flags);
465 for_each_cpu(cpu, &tmp_mask) {
467 unsigned int max_freq = 0;
469 pcpu = &per_cpu(cpuinfo, cpu);
472 if (!pcpu->governor_enabled)
475 mutex_lock(&set_speed_lock);
477 for_each_cpu(j, pcpu->policy->cpus) {
478 struct cpufreq_interactive_cpuinfo *pjcpu =
479 &per_cpu(cpuinfo, j);
481 if (pjcpu->target_freq > max_freq)
482 max_freq = pjcpu->target_freq;
485 if (max_freq != pcpu->policy->cur)
486 __cpufreq_driver_target(pcpu->policy, max_freq,
489 mutex_unlock(&set_speed_lock);
490 trace_cpufreq_interactive_down(cpu, pcpu->target_freq,
495 static void cpufreq_interactive_boost(void)
500 struct cpufreq_interactive_cpuinfo *pcpu;
502 spin_lock_irqsave(&up_cpumask_lock, flags);
504 for_each_online_cpu(i) {
505 pcpu = &per_cpu(cpuinfo, i);
507 if (pcpu->target_freq < hispeed_freq) {
508 pcpu->target_freq = hispeed_freq;
509 cpumask_set_cpu(i, &up_cpumask);
510 pcpu->target_set_time_in_idle =
511 get_cpu_idle_time_us(i, &pcpu->target_set_time);
512 pcpu->hispeed_validate_time = pcpu->target_set_time;
517 * Set floor freq and (re)start timer for when last
521 pcpu->floor_freq = hispeed_freq;
522 pcpu->floor_validate_time = ktime_to_us(ktime_get());
525 spin_unlock_irqrestore(&up_cpumask_lock, flags);
528 wake_up_process(up_task);
532 * Pulsed boost on input event raises CPUs to hispeed_freq and lets
533 * usual algorithm of min_sample_time decide when to allow speed
537 static void cpufreq_interactive_input_event(struct input_handle *handle,
539 unsigned int code, int value)
541 if (input_boost_val && type == EV_SYN && code == SYN_REPORT) {
542 trace_cpufreq_interactive_boost("input");
543 cpufreq_interactive_boost();
547 static void cpufreq_interactive_input_open(struct work_struct *w)
549 struct cpufreq_interactive_inputopen *io =
550 container_of(w, struct cpufreq_interactive_inputopen,
554 error = input_open_device(io->handle);
556 input_unregister_handle(io->handle);
559 static int cpufreq_interactive_input_connect(struct input_handler *handler,
560 struct input_dev *dev,
561 const struct input_device_id *id)
563 struct input_handle *handle;
566 pr_info("%s: connect to %s\n", __func__, dev->name);
567 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
572 handle->handler = handler;
573 handle->name = "cpufreq_interactive";
575 error = input_register_handle(handle);
579 inputopen.handle = handle;
580 queue_work(down_wq, &inputopen.inputopen_work);
587 static void cpufreq_interactive_input_disconnect(struct input_handle *handle)
589 input_close_device(handle);
590 input_unregister_handle(handle);
594 static const struct input_device_id cpufreq_interactive_ids[] = {
596 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
597 INPUT_DEVICE_ID_MATCH_ABSBIT,
598 .evbit = { BIT_MASK(EV_ABS) },
599 .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
600 BIT_MASK(ABS_MT_POSITION_X) |
601 BIT_MASK(ABS_MT_POSITION_Y) },
602 }, /* multi-touch touchscreen */
604 .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
605 INPUT_DEVICE_ID_MATCH_ABSBIT,
606 .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
607 .absbit = { [BIT_WORD(ABS_X)] =
608 BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
613 static struct input_handler cpufreq_interactive_input_handler = {
614 .event = cpufreq_interactive_input_event,
615 .connect = cpufreq_interactive_input_connect,
616 .disconnect = cpufreq_interactive_input_disconnect,
617 .name = "cpufreq_interactive",
618 .id_table = cpufreq_interactive_ids,
621 static ssize_t show_hispeed_freq(struct kobject *kobj,
622 struct attribute *attr, char *buf)
624 return sprintf(buf, "%llu\n", hispeed_freq);
627 static ssize_t store_hispeed_freq(struct kobject *kobj,
628 struct attribute *attr, const char *buf,
634 ret = strict_strtoull(buf, 0, &val);
641 static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
642 show_hispeed_freq, store_hispeed_freq);
645 static ssize_t show_go_hispeed_load(struct kobject *kobj,
646 struct attribute *attr, char *buf)
648 return sprintf(buf, "%lu\n", go_hispeed_load);
651 static ssize_t store_go_hispeed_load(struct kobject *kobj,
652 struct attribute *attr, const char *buf, size_t count)
657 ret = strict_strtoul(buf, 0, &val);
660 go_hispeed_load = val;
664 static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
665 show_go_hispeed_load, store_go_hispeed_load);
667 static ssize_t show_min_sample_time(struct kobject *kobj,
668 struct attribute *attr, char *buf)
670 return sprintf(buf, "%lu\n", min_sample_time);
673 static ssize_t store_min_sample_time(struct kobject *kobj,
674 struct attribute *attr, const char *buf, size_t count)
679 ret = strict_strtoul(buf, 0, &val);
682 min_sample_time = val;
686 static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
687 show_min_sample_time, store_min_sample_time);
689 static ssize_t show_above_hispeed_delay(struct kobject *kobj,
690 struct attribute *attr, char *buf)
692 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
695 static ssize_t store_above_hispeed_delay(struct kobject *kobj,
696 struct attribute *attr,
697 const char *buf, size_t count)
702 ret = strict_strtoul(buf, 0, &val);
705 above_hispeed_delay_val = val;
709 define_one_global_rw(above_hispeed_delay);
711 static ssize_t show_timer_rate(struct kobject *kobj,
712 struct attribute *attr, char *buf)
714 return sprintf(buf, "%lu\n", timer_rate);
717 static ssize_t store_timer_rate(struct kobject *kobj,
718 struct attribute *attr, const char *buf, size_t count)
723 ret = strict_strtoul(buf, 0, &val);
730 static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
731 show_timer_rate, store_timer_rate);
733 static ssize_t show_input_boost(struct kobject *kobj, struct attribute *attr,
736 return sprintf(buf, "%u\n", input_boost_val);
739 static ssize_t store_input_boost(struct kobject *kobj, struct attribute *attr,
740 const char *buf, size_t count)
745 ret = strict_strtoul(buf, 0, &val);
748 input_boost_val = val;
752 define_one_global_rw(input_boost);
754 static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
757 return sprintf(buf, "%d\n", boost_val);
760 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
761 const char *buf, size_t count)
766 ret = kstrtoul(buf, 0, &val);
773 trace_cpufreq_interactive_boost("on");
774 cpufreq_interactive_boost();
776 trace_cpufreq_interactive_unboost("off");
782 define_one_global_rw(boost);
784 static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
785 const char *buf, size_t count)
790 ret = kstrtoul(buf, 0, &val);
794 trace_cpufreq_interactive_boost("pulse");
795 cpufreq_interactive_boost();
799 static struct global_attr boostpulse =
800 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
802 static struct attribute *interactive_attributes[] = {
803 &hispeed_freq_attr.attr,
804 &go_hispeed_load_attr.attr,
805 &above_hispeed_delay.attr,
806 &min_sample_time_attr.attr,
807 &timer_rate_attr.attr,
814 static struct attribute_group interactive_attr_group = {
815 .attrs = interactive_attributes,
816 .name = "interactive",
819 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
824 struct cpufreq_interactive_cpuinfo *pcpu;
825 struct cpufreq_frequency_table *freq_table;
828 case CPUFREQ_GOV_START:
829 if (!cpu_online(policy->cpu))
833 cpufreq_frequency_get_table(policy->cpu);
835 for_each_cpu(j, policy->cpus) {
836 pcpu = &per_cpu(cpuinfo, j);
837 pcpu->policy = policy;
838 pcpu->target_freq = policy->cur;
839 pcpu->freq_table = freq_table;
840 pcpu->target_set_time_in_idle =
841 get_cpu_idle_time_us(j,
842 &pcpu->target_set_time);
843 pcpu->floor_freq = pcpu->target_freq;
844 pcpu->floor_validate_time =
845 pcpu->target_set_time;
846 pcpu->hispeed_validate_time =
847 pcpu->target_set_time;
848 pcpu->governor_enabled = 1;
853 hispeed_freq = policy->max;
856 * Do not register the idle hook and create sysfs
857 * entries if we have already done so.
859 if (atomic_inc_return(&active_count) > 1)
862 rc = sysfs_create_group(cpufreq_global_kobject,
863 &interactive_attr_group);
867 rc = input_register_handler(&cpufreq_interactive_input_handler);
869 pr_warn("%s: failed to register input handler\n",
874 case CPUFREQ_GOV_STOP:
875 for_each_cpu(j, policy->cpus) {
876 pcpu = &per_cpu(cpuinfo, j);
877 pcpu->governor_enabled = 0;
879 del_timer_sync(&pcpu->cpu_timer);
882 * Reset idle exit time since we may cancel the timer
883 * before it can run after the last idle exit time,
884 * to avoid tripping the check in idle exit for a timer
885 * that is trying to run.
887 pcpu->idle_exit_time = 0;
890 flush_work(&freq_scale_down_work);
891 if (atomic_dec_return(&active_count) > 0)
894 input_unregister_handler(&cpufreq_interactive_input_handler);
895 sysfs_remove_group(cpufreq_global_kobject,
896 &interactive_attr_group);
900 case CPUFREQ_GOV_LIMITS:
901 if (policy->max < policy->cur)
902 __cpufreq_driver_target(policy,
903 policy->max, CPUFREQ_RELATION_H);
904 else if (policy->min > policy->cur)
905 __cpufreq_driver_target(policy,
906 policy->min, CPUFREQ_RELATION_L);
912 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
918 cpufreq_interactive_idle_start();
921 cpufreq_interactive_idle_end();
928 static struct notifier_block cpufreq_interactive_idle_nb = {
929 .notifier_call = cpufreq_interactive_idle_notifier,
932 static int __init cpufreq_interactive_init(void)
935 struct cpufreq_interactive_cpuinfo *pcpu;
936 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
938 go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
939 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
940 above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
941 timer_rate = DEFAULT_TIMER_RATE;
943 /* Initalize per-cpu timers */
944 for_each_possible_cpu(i) {
945 pcpu = &per_cpu(cpuinfo, i);
946 init_timer(&pcpu->cpu_timer);
947 pcpu->cpu_timer.function = cpufreq_interactive_timer;
948 pcpu->cpu_timer.data = i;
951 up_task = kthread_create(cpufreq_interactive_up_task, NULL,
954 return PTR_ERR(up_task);
956 sched_setscheduler_nocheck(up_task, SCHED_FIFO, ¶m);
957 get_task_struct(up_task);
959 /* No rescuer thread, bind to CPU queuing the work for possibly
960 warm cache (probably doesn't matter much). */
961 down_wq = alloc_workqueue("knteractive_down", 0, 1);
966 INIT_WORK(&freq_scale_down_work,
967 cpufreq_interactive_freq_down);
969 spin_lock_init(&up_cpumask_lock);
970 spin_lock_init(&down_cpumask_lock);
971 mutex_init(&set_speed_lock);
973 idle_notifier_register(&cpufreq_interactive_idle_nb);
974 INIT_WORK(&inputopen.inputopen_work, cpufreq_interactive_input_open);
975 return cpufreq_register_governor(&cpufreq_gov_interactive);
978 put_task_struct(up_task);
982 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
983 fs_initcall(cpufreq_interactive_init);
985 module_init(cpufreq_interactive_init);
988 static void __exit cpufreq_interactive_exit(void)
990 cpufreq_unregister_governor(&cpufreq_gov_interactive);
991 kthread_stop(up_task);
992 put_task_struct(up_task);
993 destroy_workqueue(down_wq);
996 module_exit(cpufreq_interactive_exit);
998 MODULE_AUTHOR("Mike Chan <mike@android.com>");
999 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1000 "Latency sensitive workloads");
1001 MODULE_LICENSE("GPL");