2 * drivers/cpufreq/cpufreq_interactive.c
4 * Copyright (C) 2010 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * Author: Mike Chan (mike@android.com)
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/mutex.h>
24 #include <linux/sched.h>
25 #include <linux/sched/rt.h>
26 #include <linux/tick.h>
27 #include <linux/time.h>
28 #include <linux/timer.h>
29 #include <linux/workqueue.h>
30 #include <linux/kthread.h>
31 #include <linux/mutex.h>
32 #include <linux/slab.h>
33 #include <linux/input.h>
34 #include <asm/cputime.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/cpufreq_interactive.h>
39 static atomic_t active_count = ATOMIC_INIT(0);
41 struct cpufreq_interactive_cpuinfo {
42 struct timer_list cpu_timer;
49 u64 target_set_time_in_idle;
50 struct cpufreq_policy *policy;
51 struct cpufreq_frequency_table *freq_table;
52 unsigned int target_freq;
53 unsigned int floor_freq;
54 u64 floor_validate_time;
55 u64 hispeed_validate_time;
59 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
61 /* realtime thread handles frequency scaling */
62 static struct task_struct *speedchange_task;
63 static cpumask_t speedchange_cpumask;
64 static spinlock_t speedchange_cpumask_lock;
66 /* Hi speed to bump to from lo speed when load burst (default max) */
67 static u64 hispeed_freq;
69 /* Go to hi speed when CPU load at or above this value. */
70 #define DEFAULT_GO_HISPEED_LOAD 85
71 static unsigned long go_hispeed_load;
74 * The minimum amount of time to spend at a frequency before we can ramp down.
76 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
77 static unsigned long min_sample_time;
80 * The sample rate of the timer used to increase frequency
82 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
83 static unsigned long timer_rate;
86 * Wait this long before raising speed above hispeed, by default a single
89 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
90 static unsigned long above_hispeed_delay_val;
93 * Boost pulse to hispeed on touchscreen input.
96 static int input_boost_val;
98 struct cpufreq_interactive_inputopen {
99 struct input_handle *handle;
100 struct work_struct inputopen_work;
103 static struct cpufreq_interactive_inputopen inputopen;
104 static struct workqueue_struct *inputopen_wq;
107 * Non-zero means longer-term speed boost active.
110 static int boost_val;
112 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
115 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
118 struct cpufreq_governor cpufreq_gov_interactive = {
119 .name = "interactive",
120 .governor = cpufreq_governor_interactive,
121 .max_transition_latency = 10000000,
122 .owner = THIS_MODULE,
125 static void cpufreq_interactive_timer(unsigned long data)
127 unsigned int delta_idle;
128 unsigned int delta_time;
130 int load_since_change;
133 struct cpufreq_interactive_cpuinfo *pcpu =
134 &per_cpu(cpuinfo, data);
136 unsigned int new_freq;
142 if (!pcpu->governor_enabled)
146 * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
147 * this lets idle exit know the current idle time sample has
148 * been processed, and idle exit can generate a new sample and
149 * re-arm the timer. This prevents a concurrent idle
150 * exit on that CPU from writing a new set of info at the same time
151 * the timer function runs (the timer function can't use that info
152 * until more time passes).
154 time_in_idle = pcpu->time_in_idle;
155 idle_exit_time = pcpu->idle_exit_time;
156 now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
159 /* If we raced with cancelling a timer, skip. */
163 delta_idle = (unsigned int)(now_idle - time_in_idle);
164 delta_time = (unsigned int)(pcpu->timer_run_time - idle_exit_time);
167 * If timer ran less than 1ms after short-term sample started, retry.
169 if (delta_time < 1000)
172 if (delta_idle > delta_time)
175 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
177 delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
178 delta_time = (unsigned int)(pcpu->timer_run_time -
179 pcpu->target_set_time);
181 if ((delta_time == 0) || (delta_idle > delta_time))
182 load_since_change = 0;
185 100 * (delta_time - delta_idle) / delta_time;
188 * Choose greater of short-term load (since last idle timer
189 * started or timer function re-armed itself) or long-term load
190 * (since last frequency change).
192 if (load_since_change > cpu_load)
193 cpu_load = load_since_change;
195 if (cpu_load >= go_hispeed_load || boost_val) {
196 if (pcpu->target_freq <= pcpu->policy->min) {
197 new_freq = hispeed_freq;
199 new_freq = pcpu->policy->max * cpu_load / 100;
201 if (new_freq < hispeed_freq)
202 new_freq = hispeed_freq;
204 if (pcpu->target_freq == hispeed_freq &&
205 new_freq > hispeed_freq &&
206 pcpu->timer_run_time - pcpu->hispeed_validate_time
207 < above_hispeed_delay_val) {
208 trace_cpufreq_interactive_notyet(data, cpu_load,
215 new_freq = pcpu->policy->max * cpu_load / 100;
218 if (new_freq <= hispeed_freq)
219 pcpu->hispeed_validate_time = pcpu->timer_run_time;
221 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
222 new_freq, CPUFREQ_RELATION_H,
224 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
229 new_freq = pcpu->freq_table[index].frequency;
232 * Do not scale below floor_freq unless we have been at or above the
233 * floor frequency for the minimum sample time since last validated.
235 if (new_freq < pcpu->floor_freq) {
236 if (pcpu->timer_run_time - pcpu->floor_validate_time
238 trace_cpufreq_interactive_notyet(data, cpu_load,
239 pcpu->target_freq, new_freq);
244 pcpu->floor_freq = new_freq;
245 pcpu->floor_validate_time = pcpu->timer_run_time;
247 if (pcpu->target_freq == new_freq) {
248 trace_cpufreq_interactive_already(data, cpu_load,
249 pcpu->target_freq, new_freq);
250 goto rearm_if_notmax;
253 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
255 pcpu->target_set_time_in_idle = now_idle;
256 pcpu->target_set_time = pcpu->timer_run_time;
258 pcpu->target_freq = new_freq;
259 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
260 cpumask_set_cpu(data, &speedchange_cpumask);
261 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
262 wake_up_process(speedchange_task);
266 * Already set max speed and don't see a need to change that,
267 * wait until next idle to re-evaluate, don't need timer.
269 if (pcpu->target_freq == pcpu->policy->max)
273 if (!timer_pending(&pcpu->cpu_timer)) {
275 * If already at min: if that CPU is idle, don't set timer.
276 * Else cancel the timer if that CPU goes idle. We don't
277 * need to re-evaluate speed until the next idle exit.
279 if (pcpu->target_freq == pcpu->policy->min) {
285 pcpu->timer_idlecancel = 1;
288 pcpu->time_in_idle = get_cpu_idle_time_us(
289 data, &pcpu->idle_exit_time);
290 mod_timer(&pcpu->cpu_timer,
291 jiffies + usecs_to_jiffies(timer_rate));
298 static void cpufreq_interactive_idle_start(void)
300 struct cpufreq_interactive_cpuinfo *pcpu =
301 &per_cpu(cpuinfo, smp_processor_id());
304 if (!pcpu->governor_enabled)
309 pending = timer_pending(&pcpu->cpu_timer);
311 if (pcpu->target_freq != pcpu->policy->min) {
314 * Entering idle while not at lowest speed. On some
315 * platforms this can hold the other CPU(s) at that speed
316 * even though the CPU is idle. Set a timer to re-evaluate
317 * speed so this idle CPU doesn't hold the other CPUs above
318 * min indefinitely. This should probably be a quirk of
319 * the CPUFreq driver.
322 pcpu->time_in_idle = get_cpu_idle_time_us(
323 smp_processor_id(), &pcpu->idle_exit_time);
324 pcpu->timer_idlecancel = 0;
325 mod_timer(&pcpu->cpu_timer,
326 jiffies + usecs_to_jiffies(timer_rate));
331 * If at min speed and entering idle after load has
332 * already been evaluated, and a timer has been set just in
333 * case the CPU suddenly goes busy, cancel that timer. The
334 * CPU didn't go busy; we'll recheck things upon idle exit.
336 if (pending && pcpu->timer_idlecancel) {
337 del_timer(&pcpu->cpu_timer);
339 * Ensure last timer run time is after current idle
340 * sample start time, so next idle exit will always
341 * start a new idle sampling period.
343 pcpu->idle_exit_time = 0;
344 pcpu->timer_idlecancel = 0;
350 static void cpufreq_interactive_idle_end(void)
352 struct cpufreq_interactive_cpuinfo *pcpu =
353 &per_cpu(cpuinfo, smp_processor_id());
355 if (!pcpu->governor_enabled)
362 * Arm the timer for 1-2 ticks later if not already, and if the timer
363 * function has already processed the previous load sampling
364 * interval. (If the timer is not pending but has not processed
365 * the previous interval, it is probably racing with us on another
366 * CPU. Let it compute load based on the previous sample and then
367 * re-arm the timer for another interval when it's done, rather
368 * than updating the interval start time to be "now", which doesn't
369 * give the timer function enough time to make a decision on this
372 if (timer_pending(&pcpu->cpu_timer) == 0 &&
373 pcpu->timer_run_time >= pcpu->idle_exit_time &&
374 pcpu->governor_enabled) {
376 get_cpu_idle_time_us(smp_processor_id(),
377 &pcpu->idle_exit_time);
378 pcpu->timer_idlecancel = 0;
379 mod_timer(&pcpu->cpu_timer,
380 jiffies + usecs_to_jiffies(timer_rate));
385 static int cpufreq_interactive_speedchange_task(void *data)
390 struct cpufreq_interactive_cpuinfo *pcpu;
393 set_current_state(TASK_INTERRUPTIBLE);
394 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
396 if (cpumask_empty(&speedchange_cpumask)) {
397 spin_unlock_irqrestore(&speedchange_cpumask_lock,
401 if (kthread_should_stop())
404 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
407 set_current_state(TASK_RUNNING);
408 tmp_mask = speedchange_cpumask;
409 cpumask_clear(&speedchange_cpumask);
410 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
412 for_each_cpu(cpu, &tmp_mask) {
414 unsigned int max_freq = 0;
416 pcpu = &per_cpu(cpuinfo, cpu);
419 if (!pcpu->governor_enabled)
422 for_each_cpu(j, pcpu->policy->cpus) {
423 struct cpufreq_interactive_cpuinfo *pjcpu =
424 &per_cpu(cpuinfo, j);
426 if (pjcpu->target_freq > max_freq)
427 max_freq = pjcpu->target_freq;
430 if (max_freq != pcpu->policy->cur)
431 __cpufreq_driver_target(pcpu->policy,
434 trace_cpufreq_interactive_setspeed(cpu,
443 static void cpufreq_interactive_boost(void)
448 struct cpufreq_interactive_cpuinfo *pcpu;
450 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
452 for_each_online_cpu(i) {
453 pcpu = &per_cpu(cpuinfo, i);
455 if (pcpu->target_freq < hispeed_freq) {
456 pcpu->target_freq = hispeed_freq;
457 cpumask_set_cpu(i, &speedchange_cpumask);
458 pcpu->target_set_time_in_idle =
459 get_cpu_idle_time_us(i, &pcpu->target_set_time);
460 pcpu->hispeed_validate_time = pcpu->target_set_time;
465 * Set floor freq and (re)start timer for when last
469 pcpu->floor_freq = hispeed_freq;
470 pcpu->floor_validate_time = ktime_to_us(ktime_get());
473 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
476 wake_up_process(speedchange_task);
480 * Pulsed boost on input event raises CPUs to hispeed_freq and lets
481 * usual algorithm of min_sample_time decide when to allow speed
485 static void cpufreq_interactive_input_event(struct input_handle *handle,
487 unsigned int code, int value)
489 if (input_boost_val && type == EV_SYN && code == SYN_REPORT) {
490 trace_cpufreq_interactive_boost("input");
491 cpufreq_interactive_boost();
495 static void cpufreq_interactive_input_open(struct work_struct *w)
497 struct cpufreq_interactive_inputopen *io =
498 container_of(w, struct cpufreq_interactive_inputopen,
502 error = input_open_device(io->handle);
504 input_unregister_handle(io->handle);
507 static int cpufreq_interactive_input_connect(struct input_handler *handler,
508 struct input_dev *dev,
509 const struct input_device_id *id)
511 struct input_handle *handle;
514 pr_info("%s: connect to %s\n", __func__, dev->name);
515 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
520 handle->handler = handler;
521 handle->name = "cpufreq_interactive";
523 error = input_register_handle(handle);
527 inputopen.handle = handle;
528 queue_work(inputopen_wq, &inputopen.inputopen_work);
535 static void cpufreq_interactive_input_disconnect(struct input_handle *handle)
537 input_close_device(handle);
538 input_unregister_handle(handle);
542 static const struct input_device_id cpufreq_interactive_ids[] = {
544 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
545 INPUT_DEVICE_ID_MATCH_ABSBIT,
546 .evbit = { BIT_MASK(EV_ABS) },
547 .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
548 BIT_MASK(ABS_MT_POSITION_X) |
549 BIT_MASK(ABS_MT_POSITION_Y) },
550 }, /* multi-touch touchscreen */
552 .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
553 INPUT_DEVICE_ID_MATCH_ABSBIT,
554 .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
555 .absbit = { [BIT_WORD(ABS_X)] =
556 BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
561 static struct input_handler cpufreq_interactive_input_handler = {
562 .event = cpufreq_interactive_input_event,
563 .connect = cpufreq_interactive_input_connect,
564 .disconnect = cpufreq_interactive_input_disconnect,
565 .name = "cpufreq_interactive",
566 .id_table = cpufreq_interactive_ids,
569 static ssize_t show_hispeed_freq(struct kobject *kobj,
570 struct attribute *attr, char *buf)
572 return sprintf(buf, "%llu\n", hispeed_freq);
575 static ssize_t store_hispeed_freq(struct kobject *kobj,
576 struct attribute *attr, const char *buf,
582 ret = strict_strtoull(buf, 0, &val);
589 static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
590 show_hispeed_freq, store_hispeed_freq);
593 static ssize_t show_go_hispeed_load(struct kobject *kobj,
594 struct attribute *attr, char *buf)
596 return sprintf(buf, "%lu\n", go_hispeed_load);
599 static ssize_t store_go_hispeed_load(struct kobject *kobj,
600 struct attribute *attr, const char *buf, size_t count)
605 ret = strict_strtoul(buf, 0, &val);
608 go_hispeed_load = val;
612 static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
613 show_go_hispeed_load, store_go_hispeed_load);
615 static ssize_t show_min_sample_time(struct kobject *kobj,
616 struct attribute *attr, char *buf)
618 return sprintf(buf, "%lu\n", min_sample_time);
621 static ssize_t store_min_sample_time(struct kobject *kobj,
622 struct attribute *attr, const char *buf, size_t count)
627 ret = strict_strtoul(buf, 0, &val);
630 min_sample_time = val;
634 static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
635 show_min_sample_time, store_min_sample_time);
637 static ssize_t show_above_hispeed_delay(struct kobject *kobj,
638 struct attribute *attr, char *buf)
640 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
643 static ssize_t store_above_hispeed_delay(struct kobject *kobj,
644 struct attribute *attr,
645 const char *buf, size_t count)
650 ret = strict_strtoul(buf, 0, &val);
653 above_hispeed_delay_val = val;
657 define_one_global_rw(above_hispeed_delay);
659 static ssize_t show_timer_rate(struct kobject *kobj,
660 struct attribute *attr, char *buf)
662 return sprintf(buf, "%lu\n", timer_rate);
665 static ssize_t store_timer_rate(struct kobject *kobj,
666 struct attribute *attr, const char *buf, size_t count)
671 ret = strict_strtoul(buf, 0, &val);
678 static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
679 show_timer_rate, store_timer_rate);
681 static ssize_t show_input_boost(struct kobject *kobj, struct attribute *attr,
684 return sprintf(buf, "%u\n", input_boost_val);
687 static ssize_t store_input_boost(struct kobject *kobj, struct attribute *attr,
688 const char *buf, size_t count)
693 ret = strict_strtoul(buf, 0, &val);
696 input_boost_val = val;
700 define_one_global_rw(input_boost);
702 static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
705 return sprintf(buf, "%d\n", boost_val);
708 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
709 const char *buf, size_t count)
714 ret = kstrtoul(buf, 0, &val);
721 trace_cpufreq_interactive_boost("on");
722 cpufreq_interactive_boost();
724 trace_cpufreq_interactive_unboost("off");
730 define_one_global_rw(boost);
732 static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
733 const char *buf, size_t count)
738 ret = kstrtoul(buf, 0, &val);
742 trace_cpufreq_interactive_boost("pulse");
743 cpufreq_interactive_boost();
747 static struct global_attr boostpulse =
748 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
750 static struct attribute *interactive_attributes[] = {
751 &hispeed_freq_attr.attr,
752 &go_hispeed_load_attr.attr,
753 &above_hispeed_delay.attr,
754 &min_sample_time_attr.attr,
755 &timer_rate_attr.attr,
762 static struct attribute_group interactive_attr_group = {
763 .attrs = interactive_attributes,
764 .name = "interactive",
767 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
773 cpufreq_interactive_idle_start();
776 cpufreq_interactive_idle_end();
783 static struct notifier_block cpufreq_interactive_idle_nb = {
784 .notifier_call = cpufreq_interactive_idle_notifier,
787 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
792 struct cpufreq_interactive_cpuinfo *pcpu;
793 struct cpufreq_frequency_table *freq_table;
796 case CPUFREQ_GOV_START:
797 if (!cpu_online(policy->cpu))
801 cpufreq_frequency_get_table(policy->cpu);
803 for_each_cpu(j, policy->cpus) {
804 pcpu = &per_cpu(cpuinfo, j);
805 pcpu->policy = policy;
806 pcpu->target_freq = policy->cur;
807 pcpu->freq_table = freq_table;
808 pcpu->target_set_time_in_idle =
809 get_cpu_idle_time_us(j,
810 &pcpu->target_set_time);
811 pcpu->floor_freq = pcpu->target_freq;
812 pcpu->floor_validate_time =
813 pcpu->target_set_time;
814 pcpu->hispeed_validate_time =
815 pcpu->target_set_time;
816 pcpu->governor_enabled = 1;
821 hispeed_freq = policy->max;
824 * Do not register the idle hook and create sysfs
825 * entries if we have already done so.
827 if (atomic_inc_return(&active_count) > 1)
830 rc = sysfs_create_group(cpufreq_global_kobject,
831 &interactive_attr_group);
835 rc = input_register_handler(&cpufreq_interactive_input_handler);
837 pr_warn("%s: failed to register input handler\n",
840 idle_notifier_register(&cpufreq_interactive_idle_nb);
843 case CPUFREQ_GOV_STOP:
844 for_each_cpu(j, policy->cpus) {
845 pcpu = &per_cpu(cpuinfo, j);
846 pcpu->governor_enabled = 0;
848 del_timer_sync(&pcpu->cpu_timer);
851 * Reset idle exit time since we may cancel the timer
852 * before it can run after the last idle exit time,
853 * to avoid tripping the check in idle exit for a timer
854 * that is trying to run.
856 pcpu->idle_exit_time = 0;
859 flush_work(&inputopen.inputopen_work);
860 if (atomic_dec_return(&active_count) > 0)
863 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
864 input_unregister_handler(&cpufreq_interactive_input_handler);
865 sysfs_remove_group(cpufreq_global_kobject,
866 &interactive_attr_group);
870 case CPUFREQ_GOV_LIMITS:
871 if (policy->max < policy->cur)
872 __cpufreq_driver_target(policy,
873 policy->max, CPUFREQ_RELATION_H);
874 else if (policy->min > policy->cur)
875 __cpufreq_driver_target(policy,
876 policy->min, CPUFREQ_RELATION_L);
882 static int __init cpufreq_interactive_init(void)
885 struct cpufreq_interactive_cpuinfo *pcpu;
886 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
888 go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
889 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
890 above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
891 timer_rate = DEFAULT_TIMER_RATE;
893 /* Initalize per-cpu timers */
894 for_each_possible_cpu(i) {
895 pcpu = &per_cpu(cpuinfo, i);
896 init_timer(&pcpu->cpu_timer);
897 pcpu->cpu_timer.function = cpufreq_interactive_timer;
898 pcpu->cpu_timer.data = i;
901 spin_lock_init(&speedchange_cpumask_lock);
903 kthread_create(cpufreq_interactive_speedchange_task, NULL,
905 if (IS_ERR(speedchange_task))
906 return PTR_ERR(speedchange_task);
908 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, ¶m);
909 get_task_struct(speedchange_task);
911 inputopen_wq = create_workqueue("cfinteractive");
916 INIT_WORK(&inputopen.inputopen_work, cpufreq_interactive_input_open);
918 /* NB: wake up so the thread does not look hung to the freezer */
919 wake_up_process(speedchange_task);
921 return cpufreq_register_governor(&cpufreq_gov_interactive);
924 put_task_struct(speedchange_task);
928 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
929 fs_initcall(cpufreq_interactive_init);
931 module_init(cpufreq_interactive_init);
934 static void __exit cpufreq_interactive_exit(void)
936 cpufreq_unregister_governor(&cpufreq_gov_interactive);
937 kthread_stop(speedchange_task);
938 put_task_struct(speedchange_task);
939 destroy_workqueue(inputopen_wq);
942 module_exit(cpufreq_interactive_exit);
944 MODULE_AUTHOR("Mike Chan <mike@android.com>");
945 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
946 "Latency sensitive workloads");
947 MODULE_LICENSE("GPL");