cpufreq: interactive: apply above_hispeed_delay to each step above hispeed
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_interactive.c
1 /*
2  * drivers/cpufreq/cpufreq_interactive.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * Author: Mike Chan (mike@android.com)
16  *
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/mutex.h>
25 #include <linux/sched.h>
26 #include <linux/sched/rt.h>
27 #include <linux/tick.h>
28 #include <linux/time.h>
29 #include <linux/timer.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/mutex.h>
33 #include <linux/slab.h>
34 #include <asm/cputime.h>
35
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/cpufreq_interactive.h>
38
39 static atomic_t active_count = ATOMIC_INIT(0);
40
41 struct cpufreq_interactive_cpuinfo {
42         struct timer_list cpu_timer;
43         int timer_idlecancel;
44         u64 time_in_idle;
45         u64 time_in_idle_timestamp;
46         u64 target_set_time;
47         u64 target_set_time_in_idle;
48         struct cpufreq_policy *policy;
49         struct cpufreq_frequency_table *freq_table;
50         unsigned int target_freq;
51         unsigned int floor_freq;
52         u64 floor_validate_time;
53         u64 hispeed_validate_time;
54         int governor_enabled;
55 };
56
57 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
58
59 /* realtime thread handles frequency scaling */
60 static struct task_struct *speedchange_task;
61 static cpumask_t speedchange_cpumask;
62 static spinlock_t speedchange_cpumask_lock;
63
64 /* Hi speed to bump to from lo speed when load burst (default max) */
65 static unsigned int hispeed_freq;
66
67 /* Go to hi speed when CPU load at or above this value. */
68 #define DEFAULT_GO_HISPEED_LOAD 85
69 static unsigned long go_hispeed_load;
70
71 /* Target load.  Lower values result in higher CPU speeds. */
72 #define DEFAULT_TARGET_LOAD 90
73 static unsigned long target_load = DEFAULT_TARGET_LOAD;
74
75 /*
76  * The minimum amount of time to spend at a frequency before we can ramp down.
77  */
78 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
79 static unsigned long min_sample_time;
80
81 /*
82  * The sample rate of the timer used to increase frequency
83  */
84 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
85 static unsigned long timer_rate;
86
87 /*
88  * Wait this long before raising speed above hispeed, by default a single
89  * timer interval.
90  */
91 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
92 static unsigned long above_hispeed_delay_val;
93
94 /*
95  * Non-zero means longer-term speed boost active.
96  */
97
98 static int boost_val;
99
100 static bool governidle;
101 module_param(governidle, bool, S_IWUSR | S_IRUGO);
102 MODULE_PARM_DESC(governidle,
103         "Set to 1 to wake up CPUs from idle to reduce speed (default 0)");
104
105 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
106                 unsigned int event);
107
108 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
109 static
110 #endif
111 struct cpufreq_governor cpufreq_gov_interactive = {
112         .name = "interactive",
113         .governor = cpufreq_governor_interactive,
114         .max_transition_latency = 10000000,
115         .owner = THIS_MODULE,
116 };
117
118 static void cpufreq_interactive_timer_resched(
119         struct cpufreq_interactive_cpuinfo *pcpu)
120 {
121         mod_timer_pinned(&pcpu->cpu_timer,
122                          jiffies + usecs_to_jiffies(timer_rate));
123         pcpu->time_in_idle =
124                 get_cpu_idle_time_us(smp_processor_id(),
125                                      &pcpu->time_in_idle_timestamp);
126 }
127
128 static void cpufreq_interactive_timer(unsigned long data)
129 {
130         u64 now;
131         unsigned int delta_idle;
132         unsigned int delta_time;
133         int cpu_load;
134         int load_since_change;
135         struct cpufreq_interactive_cpuinfo *pcpu =
136                 &per_cpu(cpuinfo, data);
137         u64 now_idle;
138         unsigned int new_freq;
139         unsigned int index;
140         unsigned long flags;
141
142         smp_rmb();
143
144         if (!pcpu->governor_enabled)
145                 goto exit;
146
147         now_idle = get_cpu_idle_time_us(data, &now);
148         delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
149         delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
150
151         /*
152          * If timer ran less than 1ms after short-term sample started, retry.
153          */
154         if (delta_time < 1000)
155                 goto rearm;
156
157         if (delta_idle > delta_time)
158                 cpu_load = 0;
159         else
160                 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
161
162         delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
163         delta_time = (unsigned int)(now - pcpu->target_set_time);
164
165         if ((delta_time == 0) || (delta_idle > delta_time))
166                 load_since_change = 0;
167         else
168                 load_since_change =
169                         100 * (delta_time - delta_idle) / delta_time;
170
171         /*
172          * Choose greater of short-term load (since last idle timer
173          * started or timer function re-armed itself) or long-term load
174          * (since last frequency change).
175          */
176         if (load_since_change > cpu_load)
177                 cpu_load = load_since_change;
178
179         if ((cpu_load >= go_hispeed_load || boost_val) &&
180             pcpu->target_freq < hispeed_freq)
181                 new_freq = hispeed_freq;
182         else
183                 new_freq = pcpu->policy->cur * cpu_load / target_load;
184
185         if (pcpu->target_freq >= hispeed_freq &&
186             new_freq > pcpu->target_freq &&
187             now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
188                 trace_cpufreq_interactive_notyet(
189                         data, cpu_load, pcpu->target_freq,
190                         pcpu->policy->cur, new_freq);
191                 goto rearm;
192         }
193
194         pcpu->hispeed_validate_time = now;
195
196         if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
197                                            new_freq, CPUFREQ_RELATION_L,
198                                            &index)) {
199                 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
200                              (int) data);
201                 goto rearm;
202         }
203
204         new_freq = pcpu->freq_table[index].frequency;
205
206         /*
207          * Do not scale below floor_freq unless we have been at or above the
208          * floor frequency for the minimum sample time since last validated.
209          */
210         if (new_freq < pcpu->floor_freq) {
211                 if (now - pcpu->floor_validate_time < min_sample_time) {
212                         trace_cpufreq_interactive_notyet(
213                                 data, cpu_load, pcpu->target_freq,
214                                 pcpu->policy->cur, new_freq);
215                         goto rearm;
216                 }
217         }
218
219         pcpu->floor_freq = new_freq;
220         pcpu->floor_validate_time = now;
221
222         if (pcpu->target_freq == new_freq) {
223                 trace_cpufreq_interactive_already(
224                         data, cpu_load, pcpu->target_freq,
225                         pcpu->policy->cur, new_freq);
226                 goto rearm_if_notmax;
227         }
228
229         trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
230                                          pcpu->policy->cur, new_freq);
231         pcpu->target_set_time_in_idle = now_idle;
232         pcpu->target_set_time = now;
233
234         pcpu->target_freq = new_freq;
235         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
236         cpumask_set_cpu(data, &speedchange_cpumask);
237         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
238         wake_up_process(speedchange_task);
239
240 rearm_if_notmax:
241         /*
242          * Already set max speed and don't see a need to change that,
243          * wait until next idle to re-evaluate, don't need timer.
244          */
245         if (pcpu->target_freq == pcpu->policy->max)
246                 goto exit;
247
248 rearm:
249         if (!timer_pending(&pcpu->cpu_timer)) {
250                 /*
251                  * If governing speed in idle and already at min, cancel the
252                  * timer if that CPU goes idle.  We don't need to re-evaluate
253                  * speed until the next idle exit.
254                  */
255                 if (governidle && pcpu->target_freq == pcpu->policy->min)
256                         pcpu->timer_idlecancel = 1;
257
258                 cpufreq_interactive_timer_resched(pcpu);
259         }
260
261 exit:
262         return;
263 }
264
265 static void cpufreq_interactive_idle_start(void)
266 {
267         struct cpufreq_interactive_cpuinfo *pcpu =
268                 &per_cpu(cpuinfo, smp_processor_id());
269         int pending;
270
271         if (!pcpu->governor_enabled)
272                 return;
273
274         pending = timer_pending(&pcpu->cpu_timer);
275
276         if (pcpu->target_freq != pcpu->policy->min) {
277                 /*
278                  * Entering idle while not at lowest speed.  On some
279                  * platforms this can hold the other CPU(s) at that speed
280                  * even though the CPU is idle. Set a timer to re-evaluate
281                  * speed so this idle CPU doesn't hold the other CPUs above
282                  * min indefinitely.  This should probably be a quirk of
283                  * the CPUFreq driver.
284                  */
285                 if (!pending) {
286                         pcpu->timer_idlecancel = 0;
287                         cpufreq_interactive_timer_resched(pcpu);
288                 }
289         } else if (governidle) {
290                 /*
291                  * If at min speed and entering idle after load has
292                  * already been evaluated, and a timer has been set just in
293                  * case the CPU suddenly goes busy, cancel that timer.  The
294                  * CPU didn't go busy; we'll recheck things upon idle exit.
295                  */
296                 if (pending && pcpu->timer_idlecancel) {
297                         del_timer(&pcpu->cpu_timer);
298                         pcpu->timer_idlecancel = 0;
299                 }
300         }
301
302 }
303
304 static void cpufreq_interactive_idle_end(void)
305 {
306         struct cpufreq_interactive_cpuinfo *pcpu =
307                 &per_cpu(cpuinfo, smp_processor_id());
308
309         if (!pcpu->governor_enabled)
310                 return;
311
312         /* Arm the timer for 1-2 ticks later if not already. */
313         if (!timer_pending(&pcpu->cpu_timer)) {
314                 pcpu->timer_idlecancel = 0;
315                 cpufreq_interactive_timer_resched(pcpu);
316         } else if (!governidle &&
317                    time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
318                 del_timer(&pcpu->cpu_timer);
319                 cpufreq_interactive_timer(smp_processor_id());
320         }
321 }
322
323 static int cpufreq_interactive_speedchange_task(void *data)
324 {
325         unsigned int cpu;
326         cpumask_t tmp_mask;
327         unsigned long flags;
328         struct cpufreq_interactive_cpuinfo *pcpu;
329
330         while (1) {
331                 set_current_state(TASK_INTERRUPTIBLE);
332                 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
333
334                 if (cpumask_empty(&speedchange_cpumask)) {
335                         spin_unlock_irqrestore(&speedchange_cpumask_lock,
336                                                flags);
337                         schedule();
338
339                         if (kthread_should_stop())
340                                 break;
341
342                         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
343                 }
344
345                 set_current_state(TASK_RUNNING);
346                 tmp_mask = speedchange_cpumask;
347                 cpumask_clear(&speedchange_cpumask);
348                 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
349
350                 for_each_cpu(cpu, &tmp_mask) {
351                         unsigned int j;
352                         unsigned int max_freq = 0;
353
354                         pcpu = &per_cpu(cpuinfo, cpu);
355                         smp_rmb();
356
357                         if (!pcpu->governor_enabled)
358                                 continue;
359
360                         for_each_cpu(j, pcpu->policy->cpus) {
361                                 struct cpufreq_interactive_cpuinfo *pjcpu =
362                                         &per_cpu(cpuinfo, j);
363
364                                 if (pjcpu->target_freq > max_freq)
365                                         max_freq = pjcpu->target_freq;
366                         }
367
368                         if (max_freq != pcpu->policy->cur)
369                                 __cpufreq_driver_target(pcpu->policy,
370                                                         max_freq,
371                                                         CPUFREQ_RELATION_H);
372                         trace_cpufreq_interactive_setspeed(cpu,
373                                                      pcpu->target_freq,
374                                                      pcpu->policy->cur);
375                 }
376         }
377
378         return 0;
379 }
380
381 static void cpufreq_interactive_boost(void)
382 {
383         int i;
384         int anyboost = 0;
385         unsigned long flags;
386         struct cpufreq_interactive_cpuinfo *pcpu;
387
388         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
389
390         for_each_online_cpu(i) {
391                 pcpu = &per_cpu(cpuinfo, i);
392
393                 if (pcpu->target_freq < hispeed_freq) {
394                         pcpu->target_freq = hispeed_freq;
395                         cpumask_set_cpu(i, &speedchange_cpumask);
396                         pcpu->target_set_time_in_idle =
397                                 get_cpu_idle_time_us(i, &pcpu->target_set_time);
398                         pcpu->hispeed_validate_time = pcpu->target_set_time;
399                         anyboost = 1;
400                 }
401
402                 /*
403                  * Set floor freq and (re)start timer for when last
404                  * validated.
405                  */
406
407                 pcpu->floor_freq = hispeed_freq;
408                 pcpu->floor_validate_time = ktime_to_us(ktime_get());
409         }
410
411         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
412
413         if (anyboost)
414                 wake_up_process(speedchange_task);
415 }
416
417 static ssize_t show_target_load(
418         struct kobject *kobj, struct attribute *attr, char *buf)
419 {
420         return sprintf(buf, "%lu\n", target_load);
421 }
422
423 static ssize_t store_target_load(
424         struct kobject *kobj, struct attribute *attr, const char *buf,
425         size_t count)
426 {
427         int ret;
428         unsigned long val;
429
430         ret = strict_strtoul(buf, 0, &val);
431         if (ret < 0)
432                 return ret;
433         target_load = val;
434         return count;
435 }
436
437 static struct global_attr target_load_attr =
438         __ATTR(target_load, S_IRUGO | S_IWUSR,
439                 show_target_load, store_target_load);
440
441 static ssize_t show_hispeed_freq(struct kobject *kobj,
442                                  struct attribute *attr, char *buf)
443 {
444         return sprintf(buf, "%u\n", hispeed_freq);
445 }
446
447 static ssize_t store_hispeed_freq(struct kobject *kobj,
448                                   struct attribute *attr, const char *buf,
449                                   size_t count)
450 {
451         int ret;
452         long unsigned int val;
453
454         ret = strict_strtoul(buf, 0, &val);
455         if (ret < 0)
456                 return ret;
457         hispeed_freq = val;
458         return count;
459 }
460
461 static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
462                 show_hispeed_freq, store_hispeed_freq);
463
464
465 static ssize_t show_go_hispeed_load(struct kobject *kobj,
466                                      struct attribute *attr, char *buf)
467 {
468         return sprintf(buf, "%lu\n", go_hispeed_load);
469 }
470
471 static ssize_t store_go_hispeed_load(struct kobject *kobj,
472                         struct attribute *attr, const char *buf, size_t count)
473 {
474         int ret;
475         unsigned long val;
476
477         ret = strict_strtoul(buf, 0, &val);
478         if (ret < 0)
479                 return ret;
480         go_hispeed_load = val;
481         return count;
482 }
483
484 static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
485                 show_go_hispeed_load, store_go_hispeed_load);
486
487 static ssize_t show_min_sample_time(struct kobject *kobj,
488                                 struct attribute *attr, char *buf)
489 {
490         return sprintf(buf, "%lu\n", min_sample_time);
491 }
492
493 static ssize_t store_min_sample_time(struct kobject *kobj,
494                         struct attribute *attr, const char *buf, size_t count)
495 {
496         int ret;
497         unsigned long val;
498
499         ret = strict_strtoul(buf, 0, &val);
500         if (ret < 0)
501                 return ret;
502         min_sample_time = val;
503         return count;
504 }
505
506 static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
507                 show_min_sample_time, store_min_sample_time);
508
509 static ssize_t show_above_hispeed_delay(struct kobject *kobj,
510                                         struct attribute *attr, char *buf)
511 {
512         return sprintf(buf, "%lu\n", above_hispeed_delay_val);
513 }
514
515 static ssize_t store_above_hispeed_delay(struct kobject *kobj,
516                                          struct attribute *attr,
517                                          const char *buf, size_t count)
518 {
519         int ret;
520         unsigned long val;
521
522         ret = strict_strtoul(buf, 0, &val);
523         if (ret < 0)
524                 return ret;
525         above_hispeed_delay_val = val;
526         return count;
527 }
528
529 define_one_global_rw(above_hispeed_delay);
530
531 static ssize_t show_timer_rate(struct kobject *kobj,
532                         struct attribute *attr, char *buf)
533 {
534         return sprintf(buf, "%lu\n", timer_rate);
535 }
536
537 static ssize_t store_timer_rate(struct kobject *kobj,
538                         struct attribute *attr, const char *buf, size_t count)
539 {
540         int ret;
541         unsigned long val;
542
543         ret = strict_strtoul(buf, 0, &val);
544         if (ret < 0)
545                 return ret;
546         timer_rate = val;
547         return count;
548 }
549
550 static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
551                 show_timer_rate, store_timer_rate);
552
553 static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
554                           char *buf)
555 {
556         return sprintf(buf, "%d\n", boost_val);
557 }
558
559 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
560                            const char *buf, size_t count)
561 {
562         int ret;
563         unsigned long val;
564
565         ret = kstrtoul(buf, 0, &val);
566         if (ret < 0)
567                 return ret;
568
569         boost_val = val;
570
571         if (boost_val) {
572                 trace_cpufreq_interactive_boost("on");
573                 cpufreq_interactive_boost();
574         } else {
575                 trace_cpufreq_interactive_unboost("off");
576         }
577
578         return count;
579 }
580
581 define_one_global_rw(boost);
582
583 static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
584                                 const char *buf, size_t count)
585 {
586         int ret;
587         unsigned long val;
588
589         ret = kstrtoul(buf, 0, &val);
590         if (ret < 0)
591                 return ret;
592
593         trace_cpufreq_interactive_boost("pulse");
594         cpufreq_interactive_boost();
595         return count;
596 }
597
598 static struct global_attr boostpulse =
599         __ATTR(boostpulse, 0200, NULL, store_boostpulse);
600
601 static struct attribute *interactive_attributes[] = {
602         &target_load_attr.attr,
603         &hispeed_freq_attr.attr,
604         &go_hispeed_load_attr.attr,
605         &above_hispeed_delay.attr,
606         &min_sample_time_attr.attr,
607         &timer_rate_attr.attr,
608         &boost.attr,
609         &boostpulse.attr,
610         NULL,
611 };
612
613 static struct attribute_group interactive_attr_group = {
614         .attrs = interactive_attributes,
615         .name = "interactive",
616 };
617
618 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
619                                              unsigned long val,
620                                              void *data)
621 {
622         switch (val) {
623         case IDLE_START:
624                 cpufreq_interactive_idle_start();
625                 break;
626         case IDLE_END:
627                 cpufreq_interactive_idle_end();
628                 break;
629         }
630
631         return 0;
632 }
633
634 static struct notifier_block cpufreq_interactive_idle_nb = {
635         .notifier_call = cpufreq_interactive_idle_notifier,
636 };
637
638 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
639                 unsigned int event)
640 {
641         int rc;
642         unsigned int j;
643         struct cpufreq_interactive_cpuinfo *pcpu;
644         struct cpufreq_frequency_table *freq_table;
645
646         switch (event) {
647         case CPUFREQ_GOV_START:
648                 if (!cpu_online(policy->cpu))
649                         return -EINVAL;
650
651                 freq_table =
652                         cpufreq_frequency_get_table(policy->cpu);
653                 if (!hispeed_freq)
654                         hispeed_freq = policy->max;
655
656                 for_each_cpu(j, policy->cpus) {
657                         pcpu = &per_cpu(cpuinfo, j);
658                         pcpu->policy = policy;
659                         pcpu->target_freq = policy->cur;
660                         pcpu->freq_table = freq_table;
661                         pcpu->target_set_time_in_idle =
662                                 get_cpu_idle_time_us(j,
663                                              &pcpu->target_set_time);
664                         pcpu->floor_freq = pcpu->target_freq;
665                         pcpu->floor_validate_time =
666                                 pcpu->target_set_time;
667                         pcpu->hispeed_validate_time =
668                                 pcpu->target_set_time;
669                         pcpu->governor_enabled = 1;
670                         smp_wmb();
671                         pcpu->cpu_timer.expires =
672                                 jiffies + usecs_to_jiffies(timer_rate);
673                         add_timer_on(&pcpu->cpu_timer, j);
674                 }
675
676                 /*
677                  * Do not register the idle hook and create sysfs
678                  * entries if we have already done so.
679                  */
680                 if (atomic_inc_return(&active_count) > 1)
681                         return 0;
682
683                 rc = sysfs_create_group(cpufreq_global_kobject,
684                                 &interactive_attr_group);
685                 if (rc)
686                         return rc;
687
688                 idle_notifier_register(&cpufreq_interactive_idle_nb);
689                 break;
690
691         case CPUFREQ_GOV_STOP:
692                 for_each_cpu(j, policy->cpus) {
693                         pcpu = &per_cpu(cpuinfo, j);
694                         pcpu->governor_enabled = 0;
695                         smp_wmb();
696                         del_timer_sync(&pcpu->cpu_timer);
697                 }
698
699                 if (atomic_dec_return(&active_count) > 0)
700                         return 0;
701
702                 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
703                 sysfs_remove_group(cpufreq_global_kobject,
704                                 &interactive_attr_group);
705
706                 break;
707
708         case CPUFREQ_GOV_LIMITS:
709                 if (policy->max < policy->cur)
710                         __cpufreq_driver_target(policy,
711                                         policy->max, CPUFREQ_RELATION_H);
712                 else if (policy->min > policy->cur)
713                         __cpufreq_driver_target(policy,
714                                         policy->min, CPUFREQ_RELATION_L);
715                 break;
716         }
717         return 0;
718 }
719
720 static int __init cpufreq_interactive_init(void)
721 {
722         unsigned int i;
723         struct cpufreq_interactive_cpuinfo *pcpu;
724         struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
725
726         go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
727         min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
728         above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
729         timer_rate = DEFAULT_TIMER_RATE;
730
731         /* Initalize per-cpu timers */
732         for_each_possible_cpu(i) {
733                 pcpu = &per_cpu(cpuinfo, i);
734                 if (governidle)
735                         init_timer(&pcpu->cpu_timer);
736                 else
737                         init_timer_deferrable(&pcpu->cpu_timer);
738                 pcpu->cpu_timer.function = cpufreq_interactive_timer;
739                 pcpu->cpu_timer.data = i;
740         }
741
742         spin_lock_init(&speedchange_cpumask_lock);
743         speedchange_task =
744                 kthread_create(cpufreq_interactive_speedchange_task, NULL,
745                                "cfinteractive");
746         if (IS_ERR(speedchange_task))
747                 return PTR_ERR(speedchange_task);
748
749         sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
750         get_task_struct(speedchange_task);
751
752         /* NB: wake up so the thread does not look hung to the freezer */
753         wake_up_process(speedchange_task);
754
755         return cpufreq_register_governor(&cpufreq_gov_interactive);
756 }
757
758 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
759 fs_initcall(cpufreq_interactive_init);
760 #else
761 module_init(cpufreq_interactive_init);
762 #endif
763
764 static void __exit cpufreq_interactive_exit(void)
765 {
766         cpufreq_unregister_governor(&cpufreq_gov_interactive);
767         kthread_stop(speedchange_task);
768         put_task_struct(speedchange_task);
769 }
770
771 module_exit(cpufreq_interactive_exit);
772
773 MODULE_AUTHOR("Mike Chan <mike@android.com>");
774 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
775         "Latency sensitive workloads");
776 MODULE_LICENSE("GPL");