cpufreq: interactive: trace actual speed in target speed decisions
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_interactive.c
1 /*
2  * drivers/cpufreq/cpufreq_interactive.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * Author: Mike Chan (mike@android.com)
16  *
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/mutex.h>
25 #include <linux/sched.h>
26 #include <linux/sched/rt.h>
27 #include <linux/tick.h>
28 #include <linux/time.h>
29 #include <linux/timer.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/mutex.h>
33 #include <linux/slab.h>
34 #include <asm/cputime.h>
35
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/cpufreq_interactive.h>
38
39 static atomic_t active_count = ATOMIC_INIT(0);
40
41 struct cpufreq_interactive_cpuinfo {
42         struct timer_list cpu_timer;
43         int timer_idlecancel;
44         u64 time_in_idle;
45         u64 time_in_idle_timestamp;
46         u64 target_set_time;
47         u64 target_set_time_in_idle;
48         struct cpufreq_policy *policy;
49         struct cpufreq_frequency_table *freq_table;
50         unsigned int target_freq;
51         unsigned int floor_freq;
52         u64 floor_validate_time;
53         u64 hispeed_validate_time;
54         int governor_enabled;
55 };
56
57 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
58
59 /* realtime thread handles frequency scaling */
60 static struct task_struct *speedchange_task;
61 static cpumask_t speedchange_cpumask;
62 static spinlock_t speedchange_cpumask_lock;
63
64 /* Hi speed to bump to from lo speed when load burst (default max) */
65 static unsigned int hispeed_freq;
66
67 /* Go to hi speed when CPU load at or above this value. */
68 #define DEFAULT_GO_HISPEED_LOAD 85
69 static unsigned long go_hispeed_load;
70
71 /*
72  * The minimum amount of time to spend at a frequency before we can ramp down.
73  */
74 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
75 static unsigned long min_sample_time;
76
77 /*
78  * The sample rate of the timer used to increase frequency
79  */
80 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
81 static unsigned long timer_rate;
82
83 /*
84  * Wait this long before raising speed above hispeed, by default a single
85  * timer interval.
86  */
87 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
88 static unsigned long above_hispeed_delay_val;
89
90 /*
91  * Non-zero means longer-term speed boost active.
92  */
93
94 static int boost_val;
95
96 static bool governidle;
97 module_param(governidle, bool, S_IWUSR | S_IRUGO);
98 MODULE_PARM_DESC(governidle,
99         "Set to 1 to wake up CPUs from idle to reduce speed (default 0)");
100
101 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
102                 unsigned int event);
103
104 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
105 static
106 #endif
107 struct cpufreq_governor cpufreq_gov_interactive = {
108         .name = "interactive",
109         .governor = cpufreq_governor_interactive,
110         .max_transition_latency = 10000000,
111         .owner = THIS_MODULE,
112 };
113
114 static void cpufreq_interactive_timer_resched(
115         struct cpufreq_interactive_cpuinfo *pcpu)
116 {
117         mod_timer_pinned(&pcpu->cpu_timer,
118                          jiffies + usecs_to_jiffies(timer_rate));
119         pcpu->time_in_idle =
120                 get_cpu_idle_time_us(smp_processor_id(),
121                                      &pcpu->time_in_idle_timestamp);
122 }
123
124 static void cpufreq_interactive_timer(unsigned long data)
125 {
126         u64 now;
127         unsigned int delta_idle;
128         unsigned int delta_time;
129         int cpu_load;
130         int load_since_change;
131         struct cpufreq_interactive_cpuinfo *pcpu =
132                 &per_cpu(cpuinfo, data);
133         u64 now_idle;
134         unsigned int new_freq;
135         unsigned int index;
136         unsigned long flags;
137
138         smp_rmb();
139
140         if (!pcpu->governor_enabled)
141                 goto exit;
142
143         now_idle = get_cpu_idle_time_us(data, &now);
144         delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
145         delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
146
147         /*
148          * If timer ran less than 1ms after short-term sample started, retry.
149          */
150         if (delta_time < 1000)
151                 goto rearm;
152
153         if (delta_idle > delta_time)
154                 cpu_load = 0;
155         else
156                 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
157
158         delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
159         delta_time = (unsigned int)(now - pcpu->target_set_time);
160
161         if ((delta_time == 0) || (delta_idle > delta_time))
162                 load_since_change = 0;
163         else
164                 load_since_change =
165                         100 * (delta_time - delta_idle) / delta_time;
166
167         /*
168          * Choose greater of short-term load (since last idle timer
169          * started or timer function re-armed itself) or long-term load
170          * (since last frequency change).
171          */
172         if (load_since_change > cpu_load)
173                 cpu_load = load_since_change;
174
175         if (cpu_load >= go_hispeed_load || boost_val) {
176                 if (pcpu->target_freq < hispeed_freq &&
177                     hispeed_freq < pcpu->policy->max) {
178                         new_freq = hispeed_freq;
179                 } else {
180                         new_freq = pcpu->policy->max * cpu_load / 100;
181
182                         if (new_freq < hispeed_freq)
183                                 new_freq = hispeed_freq;
184
185                         if (pcpu->target_freq == hispeed_freq &&
186                             new_freq > hispeed_freq &&
187                             now - pcpu->hispeed_validate_time
188                             < above_hispeed_delay_val) {
189                                 trace_cpufreq_interactive_notyet(
190                                         data, cpu_load, pcpu->target_freq,
191                                         pcpu->policy->cur, new_freq);
192                                 goto rearm;
193                         }
194                 }
195         } else {
196                 new_freq = hispeed_freq * cpu_load / 100;
197         }
198
199         if (new_freq <= hispeed_freq)
200                 pcpu->hispeed_validate_time = now;
201
202         if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
203                                            new_freq, CPUFREQ_RELATION_H,
204                                            &index)) {
205                 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
206                              (int) data);
207                 goto rearm;
208         }
209
210         new_freq = pcpu->freq_table[index].frequency;
211
212         /*
213          * Do not scale below floor_freq unless we have been at or above the
214          * floor frequency for the minimum sample time since last validated.
215          */
216         if (new_freq < pcpu->floor_freq) {
217                 if (now - pcpu->floor_validate_time < min_sample_time) {
218                         trace_cpufreq_interactive_notyet(
219                                 data, cpu_load, pcpu->target_freq,
220                                 pcpu->policy->cur, new_freq);
221                         goto rearm;
222                 }
223         }
224
225         pcpu->floor_freq = new_freq;
226         pcpu->floor_validate_time = now;
227
228         if (pcpu->target_freq == new_freq) {
229                 trace_cpufreq_interactive_already(
230                         data, cpu_load, pcpu->target_freq,
231                         pcpu->policy->cur, new_freq);
232                 goto rearm_if_notmax;
233         }
234
235         trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
236                                          pcpu->policy->cur, new_freq);
237         pcpu->target_set_time_in_idle = now_idle;
238         pcpu->target_set_time = now;
239
240         pcpu->target_freq = new_freq;
241         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
242         cpumask_set_cpu(data, &speedchange_cpumask);
243         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
244         wake_up_process(speedchange_task);
245
246 rearm_if_notmax:
247         /*
248          * Already set max speed and don't see a need to change that,
249          * wait until next idle to re-evaluate, don't need timer.
250          */
251         if (pcpu->target_freq == pcpu->policy->max)
252                 goto exit;
253
254 rearm:
255         if (!timer_pending(&pcpu->cpu_timer)) {
256                 /*
257                  * If governing speed in idle and already at min, cancel the
258                  * timer if that CPU goes idle.  We don't need to re-evaluate
259                  * speed until the next idle exit.
260                  */
261                 if (governidle && pcpu->target_freq == pcpu->policy->min)
262                         pcpu->timer_idlecancel = 1;
263
264                 cpufreq_interactive_timer_resched(pcpu);
265         }
266
267 exit:
268         return;
269 }
270
271 static void cpufreq_interactive_idle_start(void)
272 {
273         struct cpufreq_interactive_cpuinfo *pcpu =
274                 &per_cpu(cpuinfo, smp_processor_id());
275         int pending;
276
277         if (!pcpu->governor_enabled)
278                 return;
279
280         pending = timer_pending(&pcpu->cpu_timer);
281
282         if (pcpu->target_freq != pcpu->policy->min) {
283                 /*
284                  * Entering idle while not at lowest speed.  On some
285                  * platforms this can hold the other CPU(s) at that speed
286                  * even though the CPU is idle. Set a timer to re-evaluate
287                  * speed so this idle CPU doesn't hold the other CPUs above
288                  * min indefinitely.  This should probably be a quirk of
289                  * the CPUFreq driver.
290                  */
291                 if (!pending) {
292                         pcpu->timer_idlecancel = 0;
293                         cpufreq_interactive_timer_resched(pcpu);
294                 }
295         } else if (governidle) {
296                 /*
297                  * If at min speed and entering idle after load has
298                  * already been evaluated, and a timer has been set just in
299                  * case the CPU suddenly goes busy, cancel that timer.  The
300                  * CPU didn't go busy; we'll recheck things upon idle exit.
301                  */
302                 if (pending && pcpu->timer_idlecancel) {
303                         del_timer(&pcpu->cpu_timer);
304                         pcpu->timer_idlecancel = 0;
305                 }
306         }
307
308 }
309
310 static void cpufreq_interactive_idle_end(void)
311 {
312         struct cpufreq_interactive_cpuinfo *pcpu =
313                 &per_cpu(cpuinfo, smp_processor_id());
314
315         if (!pcpu->governor_enabled)
316                 return;
317
318         /* Arm the timer for 1-2 ticks later if not already. */
319         if (!timer_pending(&pcpu->cpu_timer)) {
320                 pcpu->timer_idlecancel = 0;
321                 cpufreq_interactive_timer_resched(pcpu);
322         } else if (!governidle &&
323                    time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
324                 del_timer(&pcpu->cpu_timer);
325                 cpufreq_interactive_timer(smp_processor_id());
326         }
327 }
328
329 static int cpufreq_interactive_speedchange_task(void *data)
330 {
331         unsigned int cpu;
332         cpumask_t tmp_mask;
333         unsigned long flags;
334         struct cpufreq_interactive_cpuinfo *pcpu;
335
336         while (1) {
337                 set_current_state(TASK_INTERRUPTIBLE);
338                 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
339
340                 if (cpumask_empty(&speedchange_cpumask)) {
341                         spin_unlock_irqrestore(&speedchange_cpumask_lock,
342                                                flags);
343                         schedule();
344
345                         if (kthread_should_stop())
346                                 break;
347
348                         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
349                 }
350
351                 set_current_state(TASK_RUNNING);
352                 tmp_mask = speedchange_cpumask;
353                 cpumask_clear(&speedchange_cpumask);
354                 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
355
356                 for_each_cpu(cpu, &tmp_mask) {
357                         unsigned int j;
358                         unsigned int max_freq = 0;
359
360                         pcpu = &per_cpu(cpuinfo, cpu);
361                         smp_rmb();
362
363                         if (!pcpu->governor_enabled)
364                                 continue;
365
366                         for_each_cpu(j, pcpu->policy->cpus) {
367                                 struct cpufreq_interactive_cpuinfo *pjcpu =
368                                         &per_cpu(cpuinfo, j);
369
370                                 if (pjcpu->target_freq > max_freq)
371                                         max_freq = pjcpu->target_freq;
372                         }
373
374                         if (max_freq != pcpu->policy->cur)
375                                 __cpufreq_driver_target(pcpu->policy,
376                                                         max_freq,
377                                                         CPUFREQ_RELATION_H);
378                         trace_cpufreq_interactive_setspeed(cpu,
379                                                      pcpu->target_freq,
380                                                      pcpu->policy->cur);
381                 }
382         }
383
384         return 0;
385 }
386
387 static void cpufreq_interactive_boost(void)
388 {
389         int i;
390         int anyboost = 0;
391         unsigned long flags;
392         struct cpufreq_interactive_cpuinfo *pcpu;
393
394         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
395
396         for_each_online_cpu(i) {
397                 pcpu = &per_cpu(cpuinfo, i);
398
399                 if (pcpu->target_freq < hispeed_freq) {
400                         pcpu->target_freq = hispeed_freq;
401                         cpumask_set_cpu(i, &speedchange_cpumask);
402                         pcpu->target_set_time_in_idle =
403                                 get_cpu_idle_time_us(i, &pcpu->target_set_time);
404                         pcpu->hispeed_validate_time = pcpu->target_set_time;
405                         anyboost = 1;
406                 }
407
408                 /*
409                  * Set floor freq and (re)start timer for when last
410                  * validated.
411                  */
412
413                 pcpu->floor_freq = hispeed_freq;
414                 pcpu->floor_validate_time = ktime_to_us(ktime_get());
415         }
416
417         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
418
419         if (anyboost)
420                 wake_up_process(speedchange_task);
421 }
422
423 static ssize_t show_hispeed_freq(struct kobject *kobj,
424                                  struct attribute *attr, char *buf)
425 {
426         return sprintf(buf, "%u\n", hispeed_freq);
427 }
428
429 static ssize_t store_hispeed_freq(struct kobject *kobj,
430                                   struct attribute *attr, const char *buf,
431                                   size_t count)
432 {
433         int ret;
434         long unsigned int val;
435
436         ret = strict_strtoul(buf, 0, &val);
437         if (ret < 0)
438                 return ret;
439         hispeed_freq = val;
440         return count;
441 }
442
443 static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
444                 show_hispeed_freq, store_hispeed_freq);
445
446
447 static ssize_t show_go_hispeed_load(struct kobject *kobj,
448                                      struct attribute *attr, char *buf)
449 {
450         return sprintf(buf, "%lu\n", go_hispeed_load);
451 }
452
453 static ssize_t store_go_hispeed_load(struct kobject *kobj,
454                         struct attribute *attr, const char *buf, size_t count)
455 {
456         int ret;
457         unsigned long val;
458
459         ret = strict_strtoul(buf, 0, &val);
460         if (ret < 0)
461                 return ret;
462         go_hispeed_load = val;
463         return count;
464 }
465
466 static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
467                 show_go_hispeed_load, store_go_hispeed_load);
468
469 static ssize_t show_min_sample_time(struct kobject *kobj,
470                                 struct attribute *attr, char *buf)
471 {
472         return sprintf(buf, "%lu\n", min_sample_time);
473 }
474
475 static ssize_t store_min_sample_time(struct kobject *kobj,
476                         struct attribute *attr, const char *buf, size_t count)
477 {
478         int ret;
479         unsigned long val;
480
481         ret = strict_strtoul(buf, 0, &val);
482         if (ret < 0)
483                 return ret;
484         min_sample_time = val;
485         return count;
486 }
487
488 static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
489                 show_min_sample_time, store_min_sample_time);
490
491 static ssize_t show_above_hispeed_delay(struct kobject *kobj,
492                                         struct attribute *attr, char *buf)
493 {
494         return sprintf(buf, "%lu\n", above_hispeed_delay_val);
495 }
496
497 static ssize_t store_above_hispeed_delay(struct kobject *kobj,
498                                          struct attribute *attr,
499                                          const char *buf, size_t count)
500 {
501         int ret;
502         unsigned long val;
503
504         ret = strict_strtoul(buf, 0, &val);
505         if (ret < 0)
506                 return ret;
507         above_hispeed_delay_val = val;
508         return count;
509 }
510
511 define_one_global_rw(above_hispeed_delay);
512
513 static ssize_t show_timer_rate(struct kobject *kobj,
514                         struct attribute *attr, char *buf)
515 {
516         return sprintf(buf, "%lu\n", timer_rate);
517 }
518
519 static ssize_t store_timer_rate(struct kobject *kobj,
520                         struct attribute *attr, const char *buf, size_t count)
521 {
522         int ret;
523         unsigned long val;
524
525         ret = strict_strtoul(buf, 0, &val);
526         if (ret < 0)
527                 return ret;
528         timer_rate = val;
529         return count;
530 }
531
532 static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
533                 show_timer_rate, store_timer_rate);
534
535 static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
536                           char *buf)
537 {
538         return sprintf(buf, "%d\n", boost_val);
539 }
540
541 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
542                            const char *buf, size_t count)
543 {
544         int ret;
545         unsigned long val;
546
547         ret = kstrtoul(buf, 0, &val);
548         if (ret < 0)
549                 return ret;
550
551         boost_val = val;
552
553         if (boost_val) {
554                 trace_cpufreq_interactive_boost("on");
555                 cpufreq_interactive_boost();
556         } else {
557                 trace_cpufreq_interactive_unboost("off");
558         }
559
560         return count;
561 }
562
563 define_one_global_rw(boost);
564
565 static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
566                                 const char *buf, size_t count)
567 {
568         int ret;
569         unsigned long val;
570
571         ret = kstrtoul(buf, 0, &val);
572         if (ret < 0)
573                 return ret;
574
575         trace_cpufreq_interactive_boost("pulse");
576         cpufreq_interactive_boost();
577         return count;
578 }
579
580 static struct global_attr boostpulse =
581         __ATTR(boostpulse, 0200, NULL, store_boostpulse);
582
583 static struct attribute *interactive_attributes[] = {
584         &hispeed_freq_attr.attr,
585         &go_hispeed_load_attr.attr,
586         &above_hispeed_delay.attr,
587         &min_sample_time_attr.attr,
588         &timer_rate_attr.attr,
589         &boost.attr,
590         &boostpulse.attr,
591         NULL,
592 };
593
594 static struct attribute_group interactive_attr_group = {
595         .attrs = interactive_attributes,
596         .name = "interactive",
597 };
598
599 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
600                                              unsigned long val,
601                                              void *data)
602 {
603         switch (val) {
604         case IDLE_START:
605                 cpufreq_interactive_idle_start();
606                 break;
607         case IDLE_END:
608                 cpufreq_interactive_idle_end();
609                 break;
610         }
611
612         return 0;
613 }
614
615 static struct notifier_block cpufreq_interactive_idle_nb = {
616         .notifier_call = cpufreq_interactive_idle_notifier,
617 };
618
619 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
620                 unsigned int event)
621 {
622         int rc;
623         unsigned int j;
624         struct cpufreq_interactive_cpuinfo *pcpu;
625         struct cpufreq_frequency_table *freq_table;
626
627         switch (event) {
628         case CPUFREQ_GOV_START:
629                 if (!cpu_online(policy->cpu))
630                         return -EINVAL;
631
632                 freq_table =
633                         cpufreq_frequency_get_table(policy->cpu);
634                 if (!hispeed_freq)
635                         hispeed_freq = policy->max;
636
637                 for_each_cpu(j, policy->cpus) {
638                         pcpu = &per_cpu(cpuinfo, j);
639                         pcpu->policy = policy;
640                         pcpu->target_freq = policy->cur;
641                         pcpu->freq_table = freq_table;
642                         pcpu->target_set_time_in_idle =
643                                 get_cpu_idle_time_us(j,
644                                              &pcpu->target_set_time);
645                         pcpu->floor_freq = pcpu->target_freq;
646                         pcpu->floor_validate_time =
647                                 pcpu->target_set_time;
648                         pcpu->hispeed_validate_time =
649                                 pcpu->target_set_time;
650                         pcpu->governor_enabled = 1;
651                         smp_wmb();
652                         pcpu->cpu_timer.expires =
653                                 jiffies + usecs_to_jiffies(timer_rate);
654                         add_timer_on(&pcpu->cpu_timer, j);
655                 }
656
657                 /*
658                  * Do not register the idle hook and create sysfs
659                  * entries if we have already done so.
660                  */
661                 if (atomic_inc_return(&active_count) > 1)
662                         return 0;
663
664                 rc = sysfs_create_group(cpufreq_global_kobject,
665                                 &interactive_attr_group);
666                 if (rc)
667                         return rc;
668
669                 idle_notifier_register(&cpufreq_interactive_idle_nb);
670                 break;
671
672         case CPUFREQ_GOV_STOP:
673                 for_each_cpu(j, policy->cpus) {
674                         pcpu = &per_cpu(cpuinfo, j);
675                         pcpu->governor_enabled = 0;
676                         smp_wmb();
677                         del_timer_sync(&pcpu->cpu_timer);
678                 }
679
680                 if (atomic_dec_return(&active_count) > 0)
681                         return 0;
682
683                 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
684                 sysfs_remove_group(cpufreq_global_kobject,
685                                 &interactive_attr_group);
686
687                 break;
688
689         case CPUFREQ_GOV_LIMITS:
690                 if (policy->max < policy->cur)
691                         __cpufreq_driver_target(policy,
692                                         policy->max, CPUFREQ_RELATION_H);
693                 else if (policy->min > policy->cur)
694                         __cpufreq_driver_target(policy,
695                                         policy->min, CPUFREQ_RELATION_L);
696                 break;
697         }
698         return 0;
699 }
700
701 static int __init cpufreq_interactive_init(void)
702 {
703         unsigned int i;
704         struct cpufreq_interactive_cpuinfo *pcpu;
705         struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
706
707         go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
708         min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
709         above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
710         timer_rate = DEFAULT_TIMER_RATE;
711
712         /* Initalize per-cpu timers */
713         for_each_possible_cpu(i) {
714                 pcpu = &per_cpu(cpuinfo, i);
715                 if (governidle)
716                         init_timer(&pcpu->cpu_timer);
717                 else
718                         init_timer_deferrable(&pcpu->cpu_timer);
719                 pcpu->cpu_timer.function = cpufreq_interactive_timer;
720                 pcpu->cpu_timer.data = i;
721         }
722
723         spin_lock_init(&speedchange_cpumask_lock);
724         speedchange_task =
725                 kthread_create(cpufreq_interactive_speedchange_task, NULL,
726                                "cfinteractive");
727         if (IS_ERR(speedchange_task))
728                 return PTR_ERR(speedchange_task);
729
730         sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
731         get_task_struct(speedchange_task);
732
733         /* NB: wake up so the thread does not look hung to the freezer */
734         wake_up_process(speedchange_task);
735
736         return cpufreq_register_governor(&cpufreq_gov_interactive);
737 }
738
739 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
740 fs_initcall(cpufreq_interactive_init);
741 #else
742 module_init(cpufreq_interactive_init);
743 #endif
744
745 static void __exit cpufreq_interactive_exit(void)
746 {
747         cpufreq_unregister_governor(&cpufreq_gov_interactive);
748         kthread_stop(speedchange_task);
749         put_task_struct(speedchange_task);
750 }
751
752 module_exit(cpufreq_interactive_exit);
753
754 MODULE_AUTHOR("Mike Chan <mike@android.com>");
755 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
756         "Latency sensitive workloads");
757 MODULE_LICENSE("GPL");