cpufreq: interactive: always limit initial speed bump to hispeed
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_interactive.c
1 /*
2  * drivers/cpufreq/cpufreq_interactive.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * Author: Mike Chan (mike@android.com)
16  *
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/mutex.h>
24 #include <linux/sched.h>
25 #include <linux/sched/rt.h>
26 #include <linux/tick.h>
27 #include <linux/time.h>
28 #include <linux/timer.h>
29 #include <linux/workqueue.h>
30 #include <linux/kthread.h>
31 #include <linux/mutex.h>
32 #include <linux/slab.h>
33 #include <asm/cputime.h>
34
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/cpufreq_interactive.h>
37
38 static atomic_t active_count = ATOMIC_INIT(0);
39
40 struct cpufreq_interactive_cpuinfo {
41         struct timer_list cpu_timer;
42         int timer_idlecancel;
43         u64 time_in_idle;
44         u64 idle_exit_time;
45         u64 timer_run_time;
46         int idling;
47         u64 target_set_time;
48         u64 target_set_time_in_idle;
49         struct cpufreq_policy *policy;
50         struct cpufreq_frequency_table *freq_table;
51         unsigned int target_freq;
52         unsigned int floor_freq;
53         u64 floor_validate_time;
54         u64 hispeed_validate_time;
55         int governor_enabled;
56 };
57
58 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
59
60 /* realtime thread handles frequency scaling */
61 static struct task_struct *speedchange_task;
62 static cpumask_t speedchange_cpumask;
63 static spinlock_t speedchange_cpumask_lock;
64
65 /* Hi speed to bump to from lo speed when load burst (default max) */
66 static u64 hispeed_freq;
67
68 /* Go to hi speed when CPU load at or above this value. */
69 #define DEFAULT_GO_HISPEED_LOAD 85
70 static unsigned long go_hispeed_load;
71
72 /*
73  * The minimum amount of time to spend at a frequency before we can ramp down.
74  */
75 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
76 static unsigned long min_sample_time;
77
78 /*
79  * The sample rate of the timer used to increase frequency
80  */
81 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
82 static unsigned long timer_rate;
83
84 /*
85  * Wait this long before raising speed above hispeed, by default a single
86  * timer interval.
87  */
88 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
89 static unsigned long above_hispeed_delay_val;
90
91 /*
92  * Non-zero means longer-term speed boost active.
93  */
94
95 static int boost_val;
96
97 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
98                 unsigned int event);
99
100 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
101 static
102 #endif
103 struct cpufreq_governor cpufreq_gov_interactive = {
104         .name = "interactive",
105         .governor = cpufreq_governor_interactive,
106         .max_transition_latency = 10000000,
107         .owner = THIS_MODULE,
108 };
109
110 static void cpufreq_interactive_timer(unsigned long data)
111 {
112         unsigned int delta_idle;
113         unsigned int delta_time;
114         int cpu_load;
115         int load_since_change;
116         u64 time_in_idle;
117         u64 idle_exit_time;
118         struct cpufreq_interactive_cpuinfo *pcpu =
119                 &per_cpu(cpuinfo, data);
120         u64 now_idle;
121         unsigned int new_freq;
122         unsigned int index;
123         unsigned long flags;
124
125         smp_rmb();
126
127         if (!pcpu->governor_enabled)
128                 goto exit;
129
130         /*
131          * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
132          * this lets idle exit know the current idle time sample has
133          * been processed, and idle exit can generate a new sample and
134          * re-arm the timer.  This prevents a concurrent idle
135          * exit on that CPU from writing a new set of info at the same time
136          * the timer function runs (the timer function can't use that info
137          * until more time passes).
138          */
139         time_in_idle = pcpu->time_in_idle;
140         idle_exit_time = pcpu->idle_exit_time;
141         now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
142         smp_wmb();
143
144         /* If we raced with cancelling a timer, skip. */
145         if (!idle_exit_time)
146                 goto exit;
147
148         delta_idle = (unsigned int)(now_idle - time_in_idle);
149         delta_time = (unsigned int)(pcpu->timer_run_time - idle_exit_time);
150
151         /*
152          * If timer ran less than 1ms after short-term sample started, retry.
153          */
154         if (delta_time < 1000)
155                 goto rearm;
156
157         if (delta_idle > delta_time)
158                 cpu_load = 0;
159         else
160                 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
161
162         delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
163         delta_time = (unsigned int)(pcpu->timer_run_time -
164                                     pcpu->target_set_time);
165
166         if ((delta_time == 0) || (delta_idle > delta_time))
167                 load_since_change = 0;
168         else
169                 load_since_change =
170                         100 * (delta_time - delta_idle) / delta_time;
171
172         /*
173          * Choose greater of short-term load (since last idle timer
174          * started or timer function re-armed itself) or long-term load
175          * (since last frequency change).
176          */
177         if (load_since_change > cpu_load)
178                 cpu_load = load_since_change;
179
180         if (cpu_load >= go_hispeed_load || boost_val) {
181                 if (pcpu->target_freq < hispeed_freq &&
182                     hispeed_freq < pcpu->policy->max) {
183                         new_freq = hispeed_freq;
184                 } else {
185                         new_freq = pcpu->policy->max * cpu_load / 100;
186
187                         if (new_freq < hispeed_freq)
188                                 new_freq = hispeed_freq;
189
190                         if (pcpu->target_freq == hispeed_freq &&
191                             new_freq > hispeed_freq &&
192                             pcpu->timer_run_time - pcpu->hispeed_validate_time
193                             < above_hispeed_delay_val) {
194                                 trace_cpufreq_interactive_notyet(data, cpu_load,
195                                                                  pcpu->target_freq,
196                                                                  new_freq);
197                                 goto rearm;
198                         }
199                 }
200         } else {
201                 new_freq = pcpu->policy->max * cpu_load / 100;
202         }
203
204         if (new_freq <= hispeed_freq)
205                 pcpu->hispeed_validate_time = pcpu->timer_run_time;
206
207         if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
208                                            new_freq, CPUFREQ_RELATION_H,
209                                            &index)) {
210                 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
211                              (int) data);
212                 goto rearm;
213         }
214
215         new_freq = pcpu->freq_table[index].frequency;
216
217         /*
218          * Do not scale below floor_freq unless we have been at or above the
219          * floor frequency for the minimum sample time since last validated.
220          */
221         if (new_freq < pcpu->floor_freq) {
222                 if (pcpu->timer_run_time - pcpu->floor_validate_time
223                     < min_sample_time) {
224                         trace_cpufreq_interactive_notyet(data, cpu_load,
225                                          pcpu->target_freq, new_freq);
226                         goto rearm;
227                 }
228         }
229
230         pcpu->floor_freq = new_freq;
231         pcpu->floor_validate_time = pcpu->timer_run_time;
232
233         if (pcpu->target_freq == new_freq) {
234                 trace_cpufreq_interactive_already(data, cpu_load,
235                                                   pcpu->target_freq, new_freq);
236                 goto rearm_if_notmax;
237         }
238
239         trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
240                                          new_freq);
241         pcpu->target_set_time_in_idle = now_idle;
242         pcpu->target_set_time = pcpu->timer_run_time;
243
244         pcpu->target_freq = new_freq;
245         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
246         cpumask_set_cpu(data, &speedchange_cpumask);
247         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
248         wake_up_process(speedchange_task);
249
250 rearm_if_notmax:
251         /*
252          * Already set max speed and don't see a need to change that,
253          * wait until next idle to re-evaluate, don't need timer.
254          */
255         if (pcpu->target_freq == pcpu->policy->max)
256                 goto exit;
257
258 rearm:
259         if (!timer_pending(&pcpu->cpu_timer)) {
260                 /*
261                  * If already at min: if that CPU is idle, don't set timer.
262                  * Else cancel the timer if that CPU goes idle.  We don't
263                  * need to re-evaluate speed until the next idle exit.
264                  */
265                 if (pcpu->target_freq == pcpu->policy->min) {
266                         smp_rmb();
267
268                         if (pcpu->idling)
269                                 goto exit;
270
271                         pcpu->timer_idlecancel = 1;
272                 }
273
274                 pcpu->time_in_idle = get_cpu_idle_time_us(
275                         data, &pcpu->idle_exit_time);
276                 mod_timer(&pcpu->cpu_timer,
277                           jiffies + usecs_to_jiffies(timer_rate));
278         }
279
280 exit:
281         return;
282 }
283
284 static void cpufreq_interactive_idle_start(void)
285 {
286         struct cpufreq_interactive_cpuinfo *pcpu =
287                 &per_cpu(cpuinfo, smp_processor_id());
288         int pending;
289
290         if (!pcpu->governor_enabled)
291                 return;
292
293         pcpu->idling = 1;
294         smp_wmb();
295         pending = timer_pending(&pcpu->cpu_timer);
296
297         if (pcpu->target_freq != pcpu->policy->min) {
298 #ifdef CONFIG_SMP
299                 /*
300                  * Entering idle while not at lowest speed.  On some
301                  * platforms this can hold the other CPU(s) at that speed
302                  * even though the CPU is idle. Set a timer to re-evaluate
303                  * speed so this idle CPU doesn't hold the other CPUs above
304                  * min indefinitely.  This should probably be a quirk of
305                  * the CPUFreq driver.
306                  */
307                 if (!pending) {
308                         pcpu->time_in_idle = get_cpu_idle_time_us(
309                                 smp_processor_id(), &pcpu->idle_exit_time);
310                         pcpu->timer_idlecancel = 0;
311                         mod_timer(&pcpu->cpu_timer,
312                                   jiffies + usecs_to_jiffies(timer_rate));
313                 }
314 #endif
315         } else {
316                 /*
317                  * If at min speed and entering idle after load has
318                  * already been evaluated, and a timer has been set just in
319                  * case the CPU suddenly goes busy, cancel that timer.  The
320                  * CPU didn't go busy; we'll recheck things upon idle exit.
321                  */
322                 if (pending && pcpu->timer_idlecancel) {
323                         del_timer(&pcpu->cpu_timer);
324                         /*
325                          * Ensure last timer run time is after current idle
326                          * sample start time, so next idle exit will always
327                          * start a new idle sampling period.
328                          */
329                         pcpu->idle_exit_time = 0;
330                         pcpu->timer_idlecancel = 0;
331                 }
332         }
333
334 }
335
336 static void cpufreq_interactive_idle_end(void)
337 {
338         struct cpufreq_interactive_cpuinfo *pcpu =
339                 &per_cpu(cpuinfo, smp_processor_id());
340
341         if (!pcpu->governor_enabled)
342                 return;
343
344         pcpu->idling = 0;
345         smp_wmb();
346
347         /*
348          * Arm the timer for 1-2 ticks later if not already, and if the timer
349          * function has already processed the previous load sampling
350          * interval.  (If the timer is not pending but has not processed
351          * the previous interval, it is probably racing with us on another
352          * CPU.  Let it compute load based on the previous sample and then
353          * re-arm the timer for another interval when it's done, rather
354          * than updating the interval start time to be "now", which doesn't
355          * give the timer function enough time to make a decision on this
356          * run.)
357          */
358         if (timer_pending(&pcpu->cpu_timer) == 0 &&
359             pcpu->timer_run_time >= pcpu->idle_exit_time &&
360             pcpu->governor_enabled) {
361                 pcpu->time_in_idle =
362                         get_cpu_idle_time_us(smp_processor_id(),
363                                              &pcpu->idle_exit_time);
364                 pcpu->timer_idlecancel = 0;
365                 mod_timer(&pcpu->cpu_timer,
366                           jiffies + usecs_to_jiffies(timer_rate));
367         }
368
369 }
370
371 static int cpufreq_interactive_speedchange_task(void *data)
372 {
373         unsigned int cpu;
374         cpumask_t tmp_mask;
375         unsigned long flags;
376         struct cpufreq_interactive_cpuinfo *pcpu;
377
378         while (1) {
379                 set_current_state(TASK_INTERRUPTIBLE);
380                 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
381
382                 if (cpumask_empty(&speedchange_cpumask)) {
383                         spin_unlock_irqrestore(&speedchange_cpumask_lock,
384                                                flags);
385                         schedule();
386
387                         if (kthread_should_stop())
388                                 break;
389
390                         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
391                 }
392
393                 set_current_state(TASK_RUNNING);
394                 tmp_mask = speedchange_cpumask;
395                 cpumask_clear(&speedchange_cpumask);
396                 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
397
398                 for_each_cpu(cpu, &tmp_mask) {
399                         unsigned int j;
400                         unsigned int max_freq = 0;
401
402                         pcpu = &per_cpu(cpuinfo, cpu);
403                         smp_rmb();
404
405                         if (!pcpu->governor_enabled)
406                                 continue;
407
408                         for_each_cpu(j, pcpu->policy->cpus) {
409                                 struct cpufreq_interactive_cpuinfo *pjcpu =
410                                         &per_cpu(cpuinfo, j);
411
412                                 if (pjcpu->target_freq > max_freq)
413                                         max_freq = pjcpu->target_freq;
414                         }
415
416                         if (max_freq != pcpu->policy->cur)
417                                 __cpufreq_driver_target(pcpu->policy,
418                                                         max_freq,
419                                                         CPUFREQ_RELATION_H);
420                         trace_cpufreq_interactive_setspeed(cpu,
421                                                      pcpu->target_freq,
422                                                      pcpu->policy->cur);
423                 }
424         }
425
426         return 0;
427 }
428
429 static void cpufreq_interactive_boost(void)
430 {
431         int i;
432         int anyboost = 0;
433         unsigned long flags;
434         struct cpufreq_interactive_cpuinfo *pcpu;
435
436         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
437
438         for_each_online_cpu(i) {
439                 pcpu = &per_cpu(cpuinfo, i);
440
441                 if (pcpu->target_freq < hispeed_freq) {
442                         pcpu->target_freq = hispeed_freq;
443                         cpumask_set_cpu(i, &speedchange_cpumask);
444                         pcpu->target_set_time_in_idle =
445                                 get_cpu_idle_time_us(i, &pcpu->target_set_time);
446                         pcpu->hispeed_validate_time = pcpu->target_set_time;
447                         anyboost = 1;
448                 }
449
450                 /*
451                  * Set floor freq and (re)start timer for when last
452                  * validated.
453                  */
454
455                 pcpu->floor_freq = hispeed_freq;
456                 pcpu->floor_validate_time = ktime_to_us(ktime_get());
457         }
458
459         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
460
461         if (anyboost)
462                 wake_up_process(speedchange_task);
463 }
464
465 static ssize_t show_hispeed_freq(struct kobject *kobj,
466                                  struct attribute *attr, char *buf)
467 {
468         return sprintf(buf, "%llu\n", hispeed_freq);
469 }
470
471 static ssize_t store_hispeed_freq(struct kobject *kobj,
472                                   struct attribute *attr, const char *buf,
473                                   size_t count)
474 {
475         int ret;
476         u64 val;
477
478         ret = strict_strtoull(buf, 0, &val);
479         if (ret < 0)
480                 return ret;
481         hispeed_freq = val;
482         return count;
483 }
484
485 static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
486                 show_hispeed_freq, store_hispeed_freq);
487
488
489 static ssize_t show_go_hispeed_load(struct kobject *kobj,
490                                      struct attribute *attr, char *buf)
491 {
492         return sprintf(buf, "%lu\n", go_hispeed_load);
493 }
494
495 static ssize_t store_go_hispeed_load(struct kobject *kobj,
496                         struct attribute *attr, const char *buf, size_t count)
497 {
498         int ret;
499         unsigned long val;
500
501         ret = strict_strtoul(buf, 0, &val);
502         if (ret < 0)
503                 return ret;
504         go_hispeed_load = val;
505         return count;
506 }
507
508 static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
509                 show_go_hispeed_load, store_go_hispeed_load);
510
511 static ssize_t show_min_sample_time(struct kobject *kobj,
512                                 struct attribute *attr, char *buf)
513 {
514         return sprintf(buf, "%lu\n", min_sample_time);
515 }
516
517 static ssize_t store_min_sample_time(struct kobject *kobj,
518                         struct attribute *attr, const char *buf, size_t count)
519 {
520         int ret;
521         unsigned long val;
522
523         ret = strict_strtoul(buf, 0, &val);
524         if (ret < 0)
525                 return ret;
526         min_sample_time = val;
527         return count;
528 }
529
530 static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
531                 show_min_sample_time, store_min_sample_time);
532
533 static ssize_t show_above_hispeed_delay(struct kobject *kobj,
534                                         struct attribute *attr, char *buf)
535 {
536         return sprintf(buf, "%lu\n", above_hispeed_delay_val);
537 }
538
539 static ssize_t store_above_hispeed_delay(struct kobject *kobj,
540                                          struct attribute *attr,
541                                          const char *buf, size_t count)
542 {
543         int ret;
544         unsigned long val;
545
546         ret = strict_strtoul(buf, 0, &val);
547         if (ret < 0)
548                 return ret;
549         above_hispeed_delay_val = val;
550         return count;
551 }
552
553 define_one_global_rw(above_hispeed_delay);
554
555 static ssize_t show_timer_rate(struct kobject *kobj,
556                         struct attribute *attr, char *buf)
557 {
558         return sprintf(buf, "%lu\n", timer_rate);
559 }
560
561 static ssize_t store_timer_rate(struct kobject *kobj,
562                         struct attribute *attr, const char *buf, size_t count)
563 {
564         int ret;
565         unsigned long val;
566
567         ret = strict_strtoul(buf, 0, &val);
568         if (ret < 0)
569                 return ret;
570         timer_rate = val;
571         return count;
572 }
573
574 static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
575                 show_timer_rate, store_timer_rate);
576
577 static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
578                           char *buf)
579 {
580         return sprintf(buf, "%d\n", boost_val);
581 }
582
583 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
584                            const char *buf, size_t count)
585 {
586         int ret;
587         unsigned long val;
588
589         ret = kstrtoul(buf, 0, &val);
590         if (ret < 0)
591                 return ret;
592
593         boost_val = val;
594
595         if (boost_val) {
596                 trace_cpufreq_interactive_boost("on");
597                 cpufreq_interactive_boost();
598         } else {
599                 trace_cpufreq_interactive_unboost("off");
600         }
601
602         return count;
603 }
604
605 define_one_global_rw(boost);
606
607 static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
608                                 const char *buf, size_t count)
609 {
610         int ret;
611         unsigned long val;
612
613         ret = kstrtoul(buf, 0, &val);
614         if (ret < 0)
615                 return ret;
616
617         trace_cpufreq_interactive_boost("pulse");
618         cpufreq_interactive_boost();
619         return count;
620 }
621
622 static struct global_attr boostpulse =
623         __ATTR(boostpulse, 0200, NULL, store_boostpulse);
624
625 static struct attribute *interactive_attributes[] = {
626         &hispeed_freq_attr.attr,
627         &go_hispeed_load_attr.attr,
628         &above_hispeed_delay.attr,
629         &min_sample_time_attr.attr,
630         &timer_rate_attr.attr,
631         &boost.attr,
632         &boostpulse.attr,
633         NULL,
634 };
635
636 static struct attribute_group interactive_attr_group = {
637         .attrs = interactive_attributes,
638         .name = "interactive",
639 };
640
641 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
642                                              unsigned long val,
643                                              void *data)
644 {
645         switch (val) {
646         case IDLE_START:
647                 cpufreq_interactive_idle_start();
648                 break;
649         case IDLE_END:
650                 cpufreq_interactive_idle_end();
651                 break;
652         }
653
654         return 0;
655 }
656
657 static struct notifier_block cpufreq_interactive_idle_nb = {
658         .notifier_call = cpufreq_interactive_idle_notifier,
659 };
660
661 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
662                 unsigned int event)
663 {
664         int rc;
665         unsigned int j;
666         struct cpufreq_interactive_cpuinfo *pcpu;
667         struct cpufreq_frequency_table *freq_table;
668
669         switch (event) {
670         case CPUFREQ_GOV_START:
671                 if (!cpu_online(policy->cpu))
672                         return -EINVAL;
673
674                 freq_table =
675                         cpufreq_frequency_get_table(policy->cpu);
676
677                 for_each_cpu(j, policy->cpus) {
678                         pcpu = &per_cpu(cpuinfo, j);
679                         pcpu->policy = policy;
680                         pcpu->target_freq = policy->cur;
681                         pcpu->freq_table = freq_table;
682                         pcpu->target_set_time_in_idle =
683                                 get_cpu_idle_time_us(j,
684                                              &pcpu->target_set_time);
685                         pcpu->floor_freq = pcpu->target_freq;
686                         pcpu->floor_validate_time =
687                                 pcpu->target_set_time;
688                         pcpu->hispeed_validate_time =
689                                 pcpu->target_set_time;
690                         pcpu->governor_enabled = 1;
691                         smp_wmb();
692                 }
693
694                 if (!hispeed_freq)
695                         hispeed_freq = policy->max;
696
697                 /*
698                  * Do not register the idle hook and create sysfs
699                  * entries if we have already done so.
700                  */
701                 if (atomic_inc_return(&active_count) > 1)
702                         return 0;
703
704                 rc = sysfs_create_group(cpufreq_global_kobject,
705                                 &interactive_attr_group);
706                 if (rc)
707                         return rc;
708
709                 idle_notifier_register(&cpufreq_interactive_idle_nb);
710                 break;
711
712         case CPUFREQ_GOV_STOP:
713                 for_each_cpu(j, policy->cpus) {
714                         pcpu = &per_cpu(cpuinfo, j);
715                         pcpu->governor_enabled = 0;
716                         smp_wmb();
717                         del_timer_sync(&pcpu->cpu_timer);
718
719                         /*
720                          * Reset idle exit time since we may cancel the timer
721                          * before it can run after the last idle exit time,
722                          * to avoid tripping the check in idle exit for a timer
723                          * that is trying to run.
724                          */
725                         pcpu->idle_exit_time = 0;
726                 }
727
728                 if (atomic_dec_return(&active_count) > 0)
729                         return 0;
730
731                 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
732                 sysfs_remove_group(cpufreq_global_kobject,
733                                 &interactive_attr_group);
734
735                 break;
736
737         case CPUFREQ_GOV_LIMITS:
738                 if (policy->max < policy->cur)
739                         __cpufreq_driver_target(policy,
740                                         policy->max, CPUFREQ_RELATION_H);
741                 else if (policy->min > policy->cur)
742                         __cpufreq_driver_target(policy,
743                                         policy->min, CPUFREQ_RELATION_L);
744                 break;
745         }
746         return 0;
747 }
748
749 static int __init cpufreq_interactive_init(void)
750 {
751         unsigned int i;
752         struct cpufreq_interactive_cpuinfo *pcpu;
753         struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
754
755         go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
756         min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
757         above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
758         timer_rate = DEFAULT_TIMER_RATE;
759
760         /* Initalize per-cpu timers */
761         for_each_possible_cpu(i) {
762                 pcpu = &per_cpu(cpuinfo, i);
763                 init_timer(&pcpu->cpu_timer);
764                 pcpu->cpu_timer.function = cpufreq_interactive_timer;
765                 pcpu->cpu_timer.data = i;
766         }
767
768         spin_lock_init(&speedchange_cpumask_lock);
769         speedchange_task =
770                 kthread_create(cpufreq_interactive_speedchange_task, NULL,
771                                "cfinteractive");
772         if (IS_ERR(speedchange_task))
773                 return PTR_ERR(speedchange_task);
774
775         sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
776         get_task_struct(speedchange_task);
777
778         /* NB: wake up so the thread does not look hung to the freezer */
779         wake_up_process(speedchange_task);
780
781         return cpufreq_register_governor(&cpufreq_gov_interactive);
782 }
783
784 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
785 fs_initcall(cpufreq_interactive_init);
786 #else
787 module_init(cpufreq_interactive_init);
788 #endif
789
790 static void __exit cpufreq_interactive_exit(void)
791 {
792         cpufreq_unregister_governor(&cpufreq_gov_interactive);
793         kthread_stop(speedchange_task);
794         put_task_struct(speedchange_task);
795 }
796
797 module_exit(cpufreq_interactive_exit);
798
799 MODULE_AUTHOR("Mike Chan <mike@android.com>");
800 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
801         "Latency sensitive workloads");
802 MODULE_LICENSE("GPL");