cpufreq: interactive: use deferrable timer by default
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_interactive.c
1 /*
2  * drivers/cpufreq/cpufreq_interactive.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * Author: Mike Chan (mike@android.com)
16  *
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/mutex.h>
25 #include <linux/sched.h>
26 #include <linux/sched/rt.h>
27 #include <linux/tick.h>
28 #include <linux/time.h>
29 #include <linux/timer.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/mutex.h>
33 #include <linux/slab.h>
34 #include <asm/cputime.h>
35
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/cpufreq_interactive.h>
38
39 static atomic_t active_count = ATOMIC_INIT(0);
40
41 struct cpufreq_interactive_cpuinfo {
42         struct timer_list cpu_timer;
43         int timer_idlecancel;
44         u64 time_in_idle;
45         u64 idle_exit_time;
46         u64 target_set_time;
47         u64 target_set_time_in_idle;
48         struct cpufreq_policy *policy;
49         struct cpufreq_frequency_table *freq_table;
50         unsigned int target_freq;
51         unsigned int floor_freq;
52         u64 floor_validate_time;
53         u64 hispeed_validate_time;
54         int governor_enabled;
55 };
56
57 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
58
59 /* realtime thread handles frequency scaling */
60 static struct task_struct *speedchange_task;
61 static cpumask_t speedchange_cpumask;
62 static spinlock_t speedchange_cpumask_lock;
63
64 /* Hi speed to bump to from lo speed when load burst (default max) */
65 static unsigned int hispeed_freq;
66
67 /* Go to hi speed when CPU load at or above this value. */
68 #define DEFAULT_GO_HISPEED_LOAD 85
69 static unsigned long go_hispeed_load;
70
71 /*
72  * The minimum amount of time to spend at a frequency before we can ramp down.
73  */
74 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
75 static unsigned long min_sample_time;
76
77 /*
78  * The sample rate of the timer used to increase frequency
79  */
80 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
81 static unsigned long timer_rate;
82
83 /*
84  * Wait this long before raising speed above hispeed, by default a single
85  * timer interval.
86  */
87 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
88 static unsigned long above_hispeed_delay_val;
89
90 /*
91  * Non-zero means longer-term speed boost active.
92  */
93
94 static int boost_val;
95
96 static bool governidle;
97 module_param(governidle, bool, S_IWUSR | S_IRUGO);
98 MODULE_PARM_DESC(governidle,
99         "Set to 1 to wake up CPUs from idle to reduce speed (default 0)");
100
101 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
102                 unsigned int event);
103
104 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
105 static
106 #endif
107 struct cpufreq_governor cpufreq_gov_interactive = {
108         .name = "interactive",
109         .governor = cpufreq_governor_interactive,
110         .max_transition_latency = 10000000,
111         .owner = THIS_MODULE,
112 };
113
114 static void cpufreq_interactive_timer(unsigned long data)
115 {
116         u64 now;
117         unsigned int delta_idle;
118         unsigned int delta_time;
119         int cpu_load;
120         int load_since_change;
121         u64 time_in_idle;
122         u64 idle_exit_time;
123         struct cpufreq_interactive_cpuinfo *pcpu =
124                 &per_cpu(cpuinfo, data);
125         u64 now_idle;
126         unsigned int new_freq;
127         unsigned int index;
128         unsigned long flags;
129
130         smp_rmb();
131
132         if (!pcpu->governor_enabled)
133                 goto exit;
134
135         time_in_idle = pcpu->time_in_idle;
136         idle_exit_time = pcpu->idle_exit_time;
137         now_idle = get_cpu_idle_time_us(data, &now);
138         delta_idle = (unsigned int)(now_idle - time_in_idle);
139         delta_time = (unsigned int)(now - idle_exit_time);
140
141         /*
142          * If timer ran less than 1ms after short-term sample started, retry.
143          */
144         if (delta_time < 1000)
145                 goto rearm;
146
147         if (delta_idle > delta_time)
148                 cpu_load = 0;
149         else
150                 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
151
152         delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
153         delta_time = (unsigned int)(now - pcpu->target_set_time);
154
155         if ((delta_time == 0) || (delta_idle > delta_time))
156                 load_since_change = 0;
157         else
158                 load_since_change =
159                         100 * (delta_time - delta_idle) / delta_time;
160
161         /*
162          * Choose greater of short-term load (since last idle timer
163          * started or timer function re-armed itself) or long-term load
164          * (since last frequency change).
165          */
166         if (load_since_change > cpu_load)
167                 cpu_load = load_since_change;
168
169         if (cpu_load >= go_hispeed_load || boost_val) {
170                 if (pcpu->target_freq < hispeed_freq &&
171                     hispeed_freq < pcpu->policy->max) {
172                         new_freq = hispeed_freq;
173                 } else {
174                         new_freq = pcpu->policy->max * cpu_load / 100;
175
176                         if (new_freq < hispeed_freq)
177                                 new_freq = hispeed_freq;
178
179                         if (pcpu->target_freq == hispeed_freq &&
180                             new_freq > hispeed_freq &&
181                             now - pcpu->hispeed_validate_time
182                             < above_hispeed_delay_val) {
183                                 trace_cpufreq_interactive_notyet(data, cpu_load,
184                                                                  pcpu->target_freq,
185                                                                  new_freq);
186                                 goto rearm;
187                         }
188                 }
189         } else {
190                 new_freq = hispeed_freq * cpu_load / 100;
191         }
192
193         if (new_freq <= hispeed_freq)
194                 pcpu->hispeed_validate_time = now;
195
196         if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
197                                            new_freq, CPUFREQ_RELATION_H,
198                                            &index)) {
199                 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
200                              (int) data);
201                 goto rearm;
202         }
203
204         new_freq = pcpu->freq_table[index].frequency;
205
206         /*
207          * Do not scale below floor_freq unless we have been at or above the
208          * floor frequency for the minimum sample time since last validated.
209          */
210         if (new_freq < pcpu->floor_freq) {
211                 if (now - pcpu->floor_validate_time < min_sample_time) {
212                         trace_cpufreq_interactive_notyet(data, cpu_load,
213                                          pcpu->target_freq, new_freq);
214                         goto rearm;
215                 }
216         }
217
218         pcpu->floor_freq = new_freq;
219         pcpu->floor_validate_time = now;
220
221         if (pcpu->target_freq == new_freq) {
222                 trace_cpufreq_interactive_already(data, cpu_load,
223                                                   pcpu->target_freq, new_freq);
224                 goto rearm_if_notmax;
225         }
226
227         trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
228                                          new_freq);
229         pcpu->target_set_time_in_idle = now_idle;
230         pcpu->target_set_time = now;
231
232         pcpu->target_freq = new_freq;
233         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
234         cpumask_set_cpu(data, &speedchange_cpumask);
235         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
236         wake_up_process(speedchange_task);
237
238 rearm_if_notmax:
239         /*
240          * Already set max speed and don't see a need to change that,
241          * wait until next idle to re-evaluate, don't need timer.
242          */
243         if (pcpu->target_freq == pcpu->policy->max)
244                 goto exit;
245
246 rearm:
247         if (!timer_pending(&pcpu->cpu_timer)) {
248                 /*
249                  * If governing speed in idle and already at min, cancel the
250                  * timer if that CPU goes idle.  We don't need to re-evaluate
251                  * speed until the next idle exit.
252                  */
253                 if (governidle && pcpu->target_freq == pcpu->policy->min)
254                         pcpu->timer_idlecancel = 1;
255
256                 pcpu->time_in_idle = get_cpu_idle_time_us(
257                         data, &pcpu->idle_exit_time);
258                 mod_timer_pinned(&pcpu->cpu_timer,
259                                  jiffies + usecs_to_jiffies(timer_rate));
260         }
261
262 exit:
263         return;
264 }
265
266 static void cpufreq_interactive_idle_start(void)
267 {
268         struct cpufreq_interactive_cpuinfo *pcpu =
269                 &per_cpu(cpuinfo, smp_processor_id());
270         int pending;
271
272         if (!pcpu->governor_enabled)
273                 return;
274
275         pending = timer_pending(&pcpu->cpu_timer);
276
277         if (pcpu->target_freq != pcpu->policy->min) {
278                 /*
279                  * Entering idle while not at lowest speed.  On some
280                  * platforms this can hold the other CPU(s) at that speed
281                  * even though the CPU is idle. Set a timer to re-evaluate
282                  * speed so this idle CPU doesn't hold the other CPUs above
283                  * min indefinitely.  This should probably be a quirk of
284                  * the CPUFreq driver.
285                  */
286                 if (!pending) {
287                         pcpu->time_in_idle = get_cpu_idle_time_us(
288                                 smp_processor_id(), &pcpu->idle_exit_time);
289                         pcpu->timer_idlecancel = 0;
290                         mod_timer_pinned(
291                                 &pcpu->cpu_timer,
292                                 jiffies + usecs_to_jiffies(timer_rate));
293                 }
294         } else if (governidle) {
295                 /*
296                  * If at min speed and entering idle after load has
297                  * already been evaluated, and a timer has been set just in
298                  * case the CPU suddenly goes busy, cancel that timer.  The
299                  * CPU didn't go busy; we'll recheck things upon idle exit.
300                  */
301                 if (pending && pcpu->timer_idlecancel) {
302                         del_timer(&pcpu->cpu_timer);
303                         pcpu->timer_idlecancel = 0;
304                 }
305         }
306
307 }
308
309 static void cpufreq_interactive_idle_end(void)
310 {
311         struct cpufreq_interactive_cpuinfo *pcpu =
312                 &per_cpu(cpuinfo, smp_processor_id());
313
314         if (!pcpu->governor_enabled)
315                 return;
316
317         /* Arm the timer for 1-2 ticks later if not already. */
318         if (!timer_pending(&pcpu->cpu_timer)) {
319                 pcpu->time_in_idle =
320                         get_cpu_idle_time_us(smp_processor_id(),
321                                              &pcpu->idle_exit_time);
322                 pcpu->timer_idlecancel = 0;
323                 mod_timer_pinned(
324                         &pcpu->cpu_timer,
325                         jiffies + usecs_to_jiffies(timer_rate));
326         }
327
328 }
329
330 static int cpufreq_interactive_speedchange_task(void *data)
331 {
332         unsigned int cpu;
333         cpumask_t tmp_mask;
334         unsigned long flags;
335         struct cpufreq_interactive_cpuinfo *pcpu;
336
337         while (1) {
338                 set_current_state(TASK_INTERRUPTIBLE);
339                 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
340
341                 if (cpumask_empty(&speedchange_cpumask)) {
342                         spin_unlock_irqrestore(&speedchange_cpumask_lock,
343                                                flags);
344                         schedule();
345
346                         if (kthread_should_stop())
347                                 break;
348
349                         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
350                 }
351
352                 set_current_state(TASK_RUNNING);
353                 tmp_mask = speedchange_cpumask;
354                 cpumask_clear(&speedchange_cpumask);
355                 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
356
357                 for_each_cpu(cpu, &tmp_mask) {
358                         unsigned int j;
359                         unsigned int max_freq = 0;
360
361                         pcpu = &per_cpu(cpuinfo, cpu);
362                         smp_rmb();
363
364                         if (!pcpu->governor_enabled)
365                                 continue;
366
367                         for_each_cpu(j, pcpu->policy->cpus) {
368                                 struct cpufreq_interactive_cpuinfo *pjcpu =
369                                         &per_cpu(cpuinfo, j);
370
371                                 if (pjcpu->target_freq > max_freq)
372                                         max_freq = pjcpu->target_freq;
373                         }
374
375                         if (max_freq != pcpu->policy->cur)
376                                 __cpufreq_driver_target(pcpu->policy,
377                                                         max_freq,
378                                                         CPUFREQ_RELATION_H);
379                         trace_cpufreq_interactive_setspeed(cpu,
380                                                      pcpu->target_freq,
381                                                      pcpu->policy->cur);
382                 }
383         }
384
385         return 0;
386 }
387
388 static void cpufreq_interactive_boost(void)
389 {
390         int i;
391         int anyboost = 0;
392         unsigned long flags;
393         struct cpufreq_interactive_cpuinfo *pcpu;
394
395         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
396
397         for_each_online_cpu(i) {
398                 pcpu = &per_cpu(cpuinfo, i);
399
400                 if (pcpu->target_freq < hispeed_freq) {
401                         pcpu->target_freq = hispeed_freq;
402                         cpumask_set_cpu(i, &speedchange_cpumask);
403                         pcpu->target_set_time_in_idle =
404                                 get_cpu_idle_time_us(i, &pcpu->target_set_time);
405                         pcpu->hispeed_validate_time = pcpu->target_set_time;
406                         anyboost = 1;
407                 }
408
409                 /*
410                  * Set floor freq and (re)start timer for when last
411                  * validated.
412                  */
413
414                 pcpu->floor_freq = hispeed_freq;
415                 pcpu->floor_validate_time = ktime_to_us(ktime_get());
416         }
417
418         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
419
420         if (anyboost)
421                 wake_up_process(speedchange_task);
422 }
423
424 static ssize_t show_hispeed_freq(struct kobject *kobj,
425                                  struct attribute *attr, char *buf)
426 {
427         return sprintf(buf, "%u\n", hispeed_freq);
428 }
429
430 static ssize_t store_hispeed_freq(struct kobject *kobj,
431                                   struct attribute *attr, const char *buf,
432                                   size_t count)
433 {
434         int ret;
435         long unsigned int val;
436
437         ret = strict_strtoul(buf, 0, &val);
438         if (ret < 0)
439                 return ret;
440         hispeed_freq = val;
441         return count;
442 }
443
444 static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
445                 show_hispeed_freq, store_hispeed_freq);
446
447
448 static ssize_t show_go_hispeed_load(struct kobject *kobj,
449                                      struct attribute *attr, char *buf)
450 {
451         return sprintf(buf, "%lu\n", go_hispeed_load);
452 }
453
454 static ssize_t store_go_hispeed_load(struct kobject *kobj,
455                         struct attribute *attr, const char *buf, size_t count)
456 {
457         int ret;
458         unsigned long val;
459
460         ret = strict_strtoul(buf, 0, &val);
461         if (ret < 0)
462                 return ret;
463         go_hispeed_load = val;
464         return count;
465 }
466
467 static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
468                 show_go_hispeed_load, store_go_hispeed_load);
469
470 static ssize_t show_min_sample_time(struct kobject *kobj,
471                                 struct attribute *attr, char *buf)
472 {
473         return sprintf(buf, "%lu\n", min_sample_time);
474 }
475
476 static ssize_t store_min_sample_time(struct kobject *kobj,
477                         struct attribute *attr, const char *buf, size_t count)
478 {
479         int ret;
480         unsigned long val;
481
482         ret = strict_strtoul(buf, 0, &val);
483         if (ret < 0)
484                 return ret;
485         min_sample_time = val;
486         return count;
487 }
488
489 static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
490                 show_min_sample_time, store_min_sample_time);
491
492 static ssize_t show_above_hispeed_delay(struct kobject *kobj,
493                                         struct attribute *attr, char *buf)
494 {
495         return sprintf(buf, "%lu\n", above_hispeed_delay_val);
496 }
497
498 static ssize_t store_above_hispeed_delay(struct kobject *kobj,
499                                          struct attribute *attr,
500                                          const char *buf, size_t count)
501 {
502         int ret;
503         unsigned long val;
504
505         ret = strict_strtoul(buf, 0, &val);
506         if (ret < 0)
507                 return ret;
508         above_hispeed_delay_val = val;
509         return count;
510 }
511
512 define_one_global_rw(above_hispeed_delay);
513
514 static ssize_t show_timer_rate(struct kobject *kobj,
515                         struct attribute *attr, char *buf)
516 {
517         return sprintf(buf, "%lu\n", timer_rate);
518 }
519
520 static ssize_t store_timer_rate(struct kobject *kobj,
521                         struct attribute *attr, const char *buf, size_t count)
522 {
523         int ret;
524         unsigned long val;
525
526         ret = strict_strtoul(buf, 0, &val);
527         if (ret < 0)
528                 return ret;
529         timer_rate = val;
530         return count;
531 }
532
533 static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
534                 show_timer_rate, store_timer_rate);
535
536 static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
537                           char *buf)
538 {
539         return sprintf(buf, "%d\n", boost_val);
540 }
541
542 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
543                            const char *buf, size_t count)
544 {
545         int ret;
546         unsigned long val;
547
548         ret = kstrtoul(buf, 0, &val);
549         if (ret < 0)
550                 return ret;
551
552         boost_val = val;
553
554         if (boost_val) {
555                 trace_cpufreq_interactive_boost("on");
556                 cpufreq_interactive_boost();
557         } else {
558                 trace_cpufreq_interactive_unboost("off");
559         }
560
561         return count;
562 }
563
564 define_one_global_rw(boost);
565
566 static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
567                                 const char *buf, size_t count)
568 {
569         int ret;
570         unsigned long val;
571
572         ret = kstrtoul(buf, 0, &val);
573         if (ret < 0)
574                 return ret;
575
576         trace_cpufreq_interactive_boost("pulse");
577         cpufreq_interactive_boost();
578         return count;
579 }
580
581 static struct global_attr boostpulse =
582         __ATTR(boostpulse, 0200, NULL, store_boostpulse);
583
584 static struct attribute *interactive_attributes[] = {
585         &hispeed_freq_attr.attr,
586         &go_hispeed_load_attr.attr,
587         &above_hispeed_delay.attr,
588         &min_sample_time_attr.attr,
589         &timer_rate_attr.attr,
590         &boost.attr,
591         &boostpulse.attr,
592         NULL,
593 };
594
595 static struct attribute_group interactive_attr_group = {
596         .attrs = interactive_attributes,
597         .name = "interactive",
598 };
599
600 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
601                                              unsigned long val,
602                                              void *data)
603 {
604         switch (val) {
605         case IDLE_START:
606                 cpufreq_interactive_idle_start();
607                 break;
608         case IDLE_END:
609                 cpufreq_interactive_idle_end();
610                 break;
611         }
612
613         return 0;
614 }
615
616 static struct notifier_block cpufreq_interactive_idle_nb = {
617         .notifier_call = cpufreq_interactive_idle_notifier,
618 };
619
620 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
621                 unsigned int event)
622 {
623         int rc;
624         unsigned int j;
625         struct cpufreq_interactive_cpuinfo *pcpu;
626         struct cpufreq_frequency_table *freq_table;
627
628         switch (event) {
629         case CPUFREQ_GOV_START:
630                 if (!cpu_online(policy->cpu))
631                         return -EINVAL;
632
633                 freq_table =
634                         cpufreq_frequency_get_table(policy->cpu);
635                 if (!hispeed_freq)
636                         hispeed_freq = policy->max;
637
638                 for_each_cpu(j, policy->cpus) {
639                         pcpu = &per_cpu(cpuinfo, j);
640                         pcpu->policy = policy;
641                         pcpu->target_freq = policy->cur;
642                         pcpu->freq_table = freq_table;
643                         pcpu->target_set_time_in_idle =
644                                 get_cpu_idle_time_us(j,
645                                              &pcpu->target_set_time);
646                         pcpu->floor_freq = pcpu->target_freq;
647                         pcpu->floor_validate_time =
648                                 pcpu->target_set_time;
649                         pcpu->hispeed_validate_time =
650                                 pcpu->target_set_time;
651                         pcpu->governor_enabled = 1;
652                         smp_wmb();
653                         pcpu->cpu_timer.expires =
654                                 jiffies + usecs_to_jiffies(timer_rate);
655                         add_timer_on(&pcpu->cpu_timer, j);
656                 }
657
658                 /*
659                  * Do not register the idle hook and create sysfs
660                  * entries if we have already done so.
661                  */
662                 if (atomic_inc_return(&active_count) > 1)
663                         return 0;
664
665                 rc = sysfs_create_group(cpufreq_global_kobject,
666                                 &interactive_attr_group);
667                 if (rc)
668                         return rc;
669
670                 idle_notifier_register(&cpufreq_interactive_idle_nb);
671                 break;
672
673         case CPUFREQ_GOV_STOP:
674                 for_each_cpu(j, policy->cpus) {
675                         pcpu = &per_cpu(cpuinfo, j);
676                         pcpu->governor_enabled = 0;
677                         smp_wmb();
678                         del_timer_sync(&pcpu->cpu_timer);
679                 }
680
681                 if (atomic_dec_return(&active_count) > 0)
682                         return 0;
683
684                 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
685                 sysfs_remove_group(cpufreq_global_kobject,
686                                 &interactive_attr_group);
687
688                 break;
689
690         case CPUFREQ_GOV_LIMITS:
691                 if (policy->max < policy->cur)
692                         __cpufreq_driver_target(policy,
693                                         policy->max, CPUFREQ_RELATION_H);
694                 else if (policy->min > policy->cur)
695                         __cpufreq_driver_target(policy,
696                                         policy->min, CPUFREQ_RELATION_L);
697                 break;
698         }
699         return 0;
700 }
701
702 static int __init cpufreq_interactive_init(void)
703 {
704         unsigned int i;
705         struct cpufreq_interactive_cpuinfo *pcpu;
706         struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
707
708         go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
709         min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
710         above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
711         timer_rate = DEFAULT_TIMER_RATE;
712
713         /* Initalize per-cpu timers */
714         for_each_possible_cpu(i) {
715                 pcpu = &per_cpu(cpuinfo, i);
716                 if (governidle)
717                         init_timer(&pcpu->cpu_timer);
718                 else
719                         init_timer_deferrable(&pcpu->cpu_timer);
720                 pcpu->cpu_timer.function = cpufreq_interactive_timer;
721                 pcpu->cpu_timer.data = i;
722         }
723
724         spin_lock_init(&speedchange_cpumask_lock);
725         speedchange_task =
726                 kthread_create(cpufreq_interactive_speedchange_task, NULL,
727                                "cfinteractive");
728         if (IS_ERR(speedchange_task))
729                 return PTR_ERR(speedchange_task);
730
731         sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
732         get_task_struct(speedchange_task);
733
734         /* NB: wake up so the thread does not look hung to the freezer */
735         wake_up_process(speedchange_task);
736
737         return cpufreq_register_governor(&cpufreq_gov_interactive);
738 }
739
740 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
741 fs_initcall(cpufreq_interactive_init);
742 #else
743 module_init(cpufreq_interactive_init);
744 #endif
745
746 static void __exit cpufreq_interactive_exit(void)
747 {
748         cpufreq_unregister_governor(&cpufreq_gov_interactive);
749         kthread_stop(speedchange_task);
750         put_task_struct(speedchange_task);
751 }
752
753 module_exit(cpufreq_interactive_exit);
754
755 MODULE_AUTHOR("Mike Chan <mike@android.com>");
756 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
757         "Latency sensitive workloads");
758 MODULE_LICENSE("GPL");