6b3facdf5ba27f9729f61ecd8c1518459f888e77
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_interactive.c
1 /*
2  * drivers/cpufreq/cpufreq_interactive.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * Author: Mike Chan (mike@android.com)
16  *
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/rwsem.h>
25 #include <linux/sched.h>
26 #include <linux/sched/rt.h>
27 #include <linux/tick.h>
28 #include <linux/time.h>
29 #include <linux/timer.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/slab.h>
33
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/cpufreq_interactive.h>
36
37 struct cpufreq_interactive_cpuinfo {
38         struct timer_list cpu_timer;
39         struct timer_list cpu_slack_timer;
40         spinlock_t load_lock; /* protects the next 4 fields */
41         u64 time_in_idle;
42         u64 time_in_idle_timestamp;
43         u64 cputime_speedadj;
44         u64 cputime_speedadj_timestamp;
45         struct cpufreq_policy *policy;
46         struct cpufreq_frequency_table *freq_table;
47         spinlock_t target_freq_lock; /*protects target freq */
48         unsigned int target_freq;
49         unsigned int floor_freq;
50         unsigned int max_freq;
51         u64 floor_validate_time;
52         u64 hispeed_validate_time;
53         struct rw_semaphore enable_sem;
54         int governor_enabled;
55 };
56
57 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
58
59 /* realtime thread handles frequency scaling */
60 static struct task_struct *speedchange_task;
61 static cpumask_t speedchange_cpumask;
62 static spinlock_t speedchange_cpumask_lock;
63 static struct mutex gov_lock;
64
65 /* Target load.  Lower values result in higher CPU speeds. */
66 #define DEFAULT_TARGET_LOAD 90
67 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
68
69 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
70 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
71 static unsigned int default_above_hispeed_delay[] = {
72         DEFAULT_ABOVE_HISPEED_DELAY };
73
74 struct cpufreq_interactive_tunables {
75         int usage_count;
76         /* Hi speed to bump to from lo speed when load burst (default max) */
77         unsigned int hispeed_freq;
78         /* Go to hi speed when CPU load at or above this value. */
79 #define DEFAULT_GO_HISPEED_LOAD 99
80         unsigned long go_hispeed_load;
81         /* Target load. Lower values result in higher CPU speeds. */
82         spinlock_t target_loads_lock;
83         unsigned int *target_loads;
84         int ntarget_loads;
85         /*
86          * The minimum amount of time to spend at a frequency before we can ramp
87          * down.
88          */
89 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
90         unsigned long min_sample_time;
91         /*
92          * The sample rate of the timer used to increase frequency
93          */
94         unsigned long timer_rate;
95         /*
96          * Wait this long before raising speed above hispeed, by default a
97          * single timer interval.
98          */
99         spinlock_t above_hispeed_delay_lock;
100         unsigned int *above_hispeed_delay;
101         int nabove_hispeed_delay;
102         /* Non-zero means indefinite speed boost active */
103         int boost_val;
104         /* Duration of a boot pulse in usecs */
105         int boostpulse_duration_val;
106         /* End time of boost pulse in ktime converted to usecs */
107         u64 boostpulse_endtime;
108         bool boosted;
109         /*
110          * Max additional time to wait in idle, beyond timer_rate, at speeds
111          * above minimum before wakeup to reduce speed, or -1 if unnecessary.
112          */
113 #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
114         int timer_slack_val;
115         bool io_is_busy;
116 };
117
118 /* For cases where we have single governor instance for system */
119 static struct cpufreq_interactive_tunables *common_tunables;
120
121 static struct attribute_group *get_sysfs_attr(void);
122
123 static void cpufreq_interactive_timer_resched(
124         struct cpufreq_interactive_cpuinfo *pcpu)
125 {
126         struct cpufreq_interactive_tunables *tunables =
127                 pcpu->policy->governor_data;
128         unsigned long expires;
129         unsigned long flags;
130
131         spin_lock_irqsave(&pcpu->load_lock, flags);
132         pcpu->time_in_idle =
133                 get_cpu_idle_time(smp_processor_id(),
134                                   &pcpu->time_in_idle_timestamp,
135                                   tunables->io_is_busy);
136         pcpu->cputime_speedadj = 0;
137         pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
138         expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
139         mod_timer_pinned(&pcpu->cpu_timer, expires);
140
141         if (tunables->timer_slack_val >= 0 &&
142             pcpu->target_freq > pcpu->policy->min) {
143                 expires += usecs_to_jiffies(tunables->timer_slack_val);
144                 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
145         }
146
147         spin_unlock_irqrestore(&pcpu->load_lock, flags);
148 }
149
150 /* The caller shall take enable_sem write semaphore to avoid any timer race.
151  * The cpu_timer and cpu_slack_timer must be deactivated when calling this
152  * function.
153  */
154 static void cpufreq_interactive_timer_start(
155         struct cpufreq_interactive_tunables *tunables, int cpu)
156 {
157         struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
158         unsigned long expires = jiffies +
159                 usecs_to_jiffies(tunables->timer_rate);
160         unsigned long flags;
161
162         pcpu->cpu_timer.expires = expires;
163         add_timer_on(&pcpu->cpu_timer, cpu);
164         if (tunables->timer_slack_val >= 0 &&
165             pcpu->target_freq > pcpu->policy->min) {
166                 expires += usecs_to_jiffies(tunables->timer_slack_val);
167                 pcpu->cpu_slack_timer.expires = expires;
168                 add_timer_on(&pcpu->cpu_slack_timer, cpu);
169         }
170
171         spin_lock_irqsave(&pcpu->load_lock, flags);
172         pcpu->time_in_idle =
173                 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
174                                   tunables->io_is_busy);
175         pcpu->cputime_speedadj = 0;
176         pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
177         spin_unlock_irqrestore(&pcpu->load_lock, flags);
178 }
179
180 static unsigned int freq_to_above_hispeed_delay(
181         struct cpufreq_interactive_tunables *tunables,
182         unsigned int freq)
183 {
184         int i;
185         unsigned int ret;
186         unsigned long flags;
187
188         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
189
190         for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
191                         freq >= tunables->above_hispeed_delay[i+1]; i += 2)
192                 ;
193
194         ret = tunables->above_hispeed_delay[i];
195         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
196         return ret;
197 }
198
199 static unsigned int freq_to_targetload(
200         struct cpufreq_interactive_tunables *tunables, unsigned int freq)
201 {
202         int i;
203         unsigned int ret;
204         unsigned long flags;
205
206         spin_lock_irqsave(&tunables->target_loads_lock, flags);
207
208         for (i = 0; i < tunables->ntarget_loads - 1 &&
209                     freq >= tunables->target_loads[i+1]; i += 2)
210                 ;
211
212         ret = tunables->target_loads[i];
213         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
214         return ret;
215 }
216
217 /*
218  * If increasing frequencies never map to a lower target load then
219  * choose_freq() will find the minimum frequency that does not exceed its
220  * target load given the current load.
221  */
222 static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
223                 unsigned int loadadjfreq)
224 {
225         unsigned int freq = pcpu->policy->cur;
226         unsigned int prevfreq, freqmin, freqmax;
227         unsigned int tl;
228         int index;
229
230         freqmin = 0;
231         freqmax = UINT_MAX;
232
233         do {
234                 prevfreq = freq;
235                 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
236
237                 /*
238                  * Find the lowest frequency where the computed load is less
239                  * than or equal to the target load.
240                  */
241
242                 if (cpufreq_frequency_table_target(
243                             pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
244                             CPUFREQ_RELATION_L, &index))
245                         break;
246                 freq = pcpu->freq_table[index].frequency;
247
248                 if (freq > prevfreq) {
249                         /* The previous frequency is too low. */
250                         freqmin = prevfreq;
251
252                         if (freq >= freqmax) {
253                                 /*
254                                  * Find the highest frequency that is less
255                                  * than freqmax.
256                                  */
257                                 if (cpufreq_frequency_table_target(
258                                             pcpu->policy, pcpu->freq_table,
259                                             freqmax - 1, CPUFREQ_RELATION_H,
260                                             &index))
261                                         break;
262                                 freq = pcpu->freq_table[index].frequency;
263
264                                 if (freq == freqmin) {
265                                         /*
266                                          * The first frequency below freqmax
267                                          * has already been found to be too
268                                          * low.  freqmax is the lowest speed
269                                          * we found that is fast enough.
270                                          */
271                                         freq = freqmax;
272                                         break;
273                                 }
274                         }
275                 } else if (freq < prevfreq) {
276                         /* The previous frequency is high enough. */
277                         freqmax = prevfreq;
278
279                         if (freq <= freqmin) {
280                                 /*
281                                  * Find the lowest frequency that is higher
282                                  * than freqmin.
283                                  */
284                                 if (cpufreq_frequency_table_target(
285                                             pcpu->policy, pcpu->freq_table,
286                                             freqmin + 1, CPUFREQ_RELATION_L,
287                                             &index))
288                                         break;
289                                 freq = pcpu->freq_table[index].frequency;
290
291                                 /*
292                                  * If freqmax is the first frequency above
293                                  * freqmin then we have already found that
294                                  * this speed is fast enough.
295                                  */
296                                 if (freq == freqmax)
297                                         break;
298                         }
299                 }
300
301                 /* If same frequency chosen as previous then done. */
302         } while (freq != prevfreq);
303
304         return freq;
305 }
306
307 static u64 update_load(int cpu)
308 {
309         struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
310         struct cpufreq_interactive_tunables *tunables =
311                 pcpu->policy->governor_data;
312         u64 now;
313         u64 now_idle;
314         unsigned int delta_idle;
315         unsigned int delta_time;
316         u64 active_time;
317
318         now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
319         delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
320         delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
321
322         if (delta_time <= delta_idle)
323                 active_time = 0;
324         else
325                 active_time = delta_time - delta_idle;
326
327         pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
328
329         pcpu->time_in_idle = now_idle;
330         pcpu->time_in_idle_timestamp = now;
331         return now;
332 }
333
334 static void cpufreq_interactive_timer(unsigned long data)
335 {
336         u64 now;
337         unsigned int delta_time;
338         u64 cputime_speedadj;
339         int cpu_load;
340         struct cpufreq_interactive_cpuinfo *pcpu =
341                 &per_cpu(cpuinfo, data);
342         struct cpufreq_interactive_tunables *tunables =
343                 pcpu->policy->governor_data;
344         unsigned int new_freq;
345         unsigned int loadadjfreq;
346         unsigned int index;
347         unsigned long flags;
348
349         if (!down_read_trylock(&pcpu->enable_sem))
350                 return;
351         if (!pcpu->governor_enabled)
352                 goto exit;
353
354         spin_lock_irqsave(&pcpu->load_lock, flags);
355         now = update_load(data);
356         delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
357         cputime_speedadj = pcpu->cputime_speedadj;
358         spin_unlock_irqrestore(&pcpu->load_lock, flags);
359
360         if (WARN_ON_ONCE(!delta_time))
361                 goto rearm;
362
363         spin_lock_irqsave(&pcpu->target_freq_lock, flags);
364         do_div(cputime_speedadj, delta_time);
365         loadadjfreq = (unsigned int)cputime_speedadj * 100;
366         cpu_load = loadadjfreq / pcpu->target_freq;
367         tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
368
369         if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
370                 if (pcpu->target_freq < tunables->hispeed_freq) {
371                         new_freq = tunables->hispeed_freq;
372                 } else {
373                         new_freq = choose_freq(pcpu, loadadjfreq);
374
375                         if (new_freq < tunables->hispeed_freq)
376                                 new_freq = tunables->hispeed_freq;
377                 }
378         } else {
379                 new_freq = choose_freq(pcpu, loadadjfreq);
380                 if (new_freq > tunables->hispeed_freq &&
381                                 pcpu->target_freq < tunables->hispeed_freq)
382                         new_freq = tunables->hispeed_freq;
383         }
384
385         if (pcpu->target_freq >= tunables->hispeed_freq &&
386             new_freq > pcpu->target_freq &&
387             now - pcpu->hispeed_validate_time <
388             freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
389                 trace_cpufreq_interactive_notyet(
390                         data, cpu_load, pcpu->target_freq,
391                         pcpu->policy->cur, new_freq);
392                 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
393                 goto rearm;
394         }
395
396         pcpu->hispeed_validate_time = now;
397
398         if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
399                                            new_freq, CPUFREQ_RELATION_L,
400                                            &index)) {
401                 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
402                 goto rearm;
403         }
404
405         new_freq = pcpu->freq_table[index].frequency;
406
407         /*
408          * Do not scale below floor_freq unless we have been at or above the
409          * floor frequency for the minimum sample time since last validated.
410          */
411         if (new_freq < pcpu->floor_freq) {
412                 if (now - pcpu->floor_validate_time <
413                                 tunables->min_sample_time) {
414                         trace_cpufreq_interactive_notyet(
415                                 data, cpu_load, pcpu->target_freq,
416                                 pcpu->policy->cur, new_freq);
417                         spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
418                         goto rearm;
419                 }
420         }
421
422         /*
423          * Update the timestamp for checking whether speed has been held at
424          * or above the selected frequency for a minimum of min_sample_time,
425          * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
426          * allow the speed to drop as soon as the boostpulse duration expires
427          * (or the indefinite boost is turned off).
428          */
429
430         if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
431                 pcpu->floor_freq = new_freq;
432                 pcpu->floor_validate_time = now;
433         }
434
435         if (pcpu->target_freq == new_freq &&
436                         pcpu->target_freq <= pcpu->policy->cur) {
437                 trace_cpufreq_interactive_already(
438                         data, cpu_load, pcpu->target_freq,
439                         pcpu->policy->cur, new_freq);
440                 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
441                 goto rearm_if_notmax;
442         }
443
444         trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
445                                          pcpu->policy->cur, new_freq);
446
447         pcpu->target_freq = new_freq;
448         spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
449         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
450         cpumask_set_cpu(data, &speedchange_cpumask);
451         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
452         wake_up_process(speedchange_task);
453
454 rearm_if_notmax:
455         /*
456          * Already set max speed and don't see a need to change that,
457          * wait until next idle to re-evaluate, don't need timer.
458          */
459         if (pcpu->target_freq == pcpu->policy->max)
460                 goto exit;
461
462 rearm:
463         if (!timer_pending(&pcpu->cpu_timer))
464                 cpufreq_interactive_timer_resched(pcpu);
465
466 exit:
467         up_read(&pcpu->enable_sem);
468         return;
469 }
470
471 static void cpufreq_interactive_idle_start(void)
472 {
473         struct cpufreq_interactive_cpuinfo *pcpu =
474                 &per_cpu(cpuinfo, smp_processor_id());
475         int pending;
476
477         if (!down_read_trylock(&pcpu->enable_sem))
478                 return;
479         if (!pcpu->governor_enabled) {
480                 up_read(&pcpu->enable_sem);
481                 return;
482         }
483
484         pending = timer_pending(&pcpu->cpu_timer);
485
486         if (pcpu->target_freq != pcpu->policy->min) {
487                 /*
488                  * Entering idle while not at lowest speed.  On some
489                  * platforms this can hold the other CPU(s) at that speed
490                  * even though the CPU is idle. Set a timer to re-evaluate
491                  * speed so this idle CPU doesn't hold the other CPUs above
492                  * min indefinitely.  This should probably be a quirk of
493                  * the CPUFreq driver.
494                  */
495                 if (!pending)
496                         cpufreq_interactive_timer_resched(pcpu);
497         }
498
499         up_read(&pcpu->enable_sem);
500 }
501
502 static void cpufreq_interactive_idle_end(void)
503 {
504         struct cpufreq_interactive_cpuinfo *pcpu =
505                 &per_cpu(cpuinfo, smp_processor_id());
506
507         if (!down_read_trylock(&pcpu->enable_sem))
508                 return;
509         if (!pcpu->governor_enabled) {
510                 up_read(&pcpu->enable_sem);
511                 return;
512         }
513
514         /* Arm the timer for 1-2 ticks later if not already. */
515         if (!timer_pending(&pcpu->cpu_timer)) {
516                 cpufreq_interactive_timer_resched(pcpu);
517         } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
518                 del_timer(&pcpu->cpu_timer);
519                 del_timer(&pcpu->cpu_slack_timer);
520                 cpufreq_interactive_timer(smp_processor_id());
521         }
522
523         up_read(&pcpu->enable_sem);
524 }
525
526 static int cpufreq_interactive_speedchange_task(void *data)
527 {
528         unsigned int cpu;
529         cpumask_t tmp_mask;
530         unsigned long flags;
531         struct cpufreq_interactive_cpuinfo *pcpu;
532
533         while (1) {
534                 set_current_state(TASK_INTERRUPTIBLE);
535                 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
536
537                 if (cpumask_empty(&speedchange_cpumask)) {
538                         spin_unlock_irqrestore(&speedchange_cpumask_lock,
539                                                flags);
540                         schedule();
541
542                         if (kthread_should_stop())
543                                 break;
544
545                         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
546                 }
547
548                 set_current_state(TASK_RUNNING);
549                 tmp_mask = speedchange_cpumask;
550                 cpumask_clear(&speedchange_cpumask);
551                 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
552
553                 for_each_cpu(cpu, &tmp_mask) {
554                         unsigned int j;
555                         unsigned int max_freq = 0;
556
557                         pcpu = &per_cpu(cpuinfo, cpu);
558                         if (!down_read_trylock(&pcpu->enable_sem))
559                                 continue;
560                         if (!pcpu->governor_enabled) {
561                                 up_read(&pcpu->enable_sem);
562                                 continue;
563                         }
564
565                         for_each_cpu(j, pcpu->policy->cpus) {
566                                 struct cpufreq_interactive_cpuinfo *pjcpu =
567                                         &per_cpu(cpuinfo, j);
568
569                                 if (pjcpu->target_freq > max_freq)
570                                         max_freq = pjcpu->target_freq;
571                         }
572
573                         if (max_freq != pcpu->policy->cur)
574                                 __cpufreq_driver_target(pcpu->policy,
575                                                         max_freq,
576                                                         CPUFREQ_RELATION_H);
577                         trace_cpufreq_interactive_setspeed(cpu,
578                                                      pcpu->target_freq,
579                                                      pcpu->policy->cur);
580
581                         up_read(&pcpu->enable_sem);
582                 }
583         }
584
585         return 0;
586 }
587
588 static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
589 {
590         int i;
591         int anyboost = 0;
592         unsigned long flags[2];
593         struct cpufreq_interactive_cpuinfo *pcpu;
594
595         tunables->boosted = true;
596
597         spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
598
599         for_each_online_cpu(i) {
600                 pcpu = &per_cpu(cpuinfo, i);
601                 if (tunables != pcpu->policy->governor_data)
602                         continue;
603
604                 spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
605                 if (pcpu->target_freq < tunables->hispeed_freq) {
606                         pcpu->target_freq = tunables->hispeed_freq;
607                         cpumask_set_cpu(i, &speedchange_cpumask);
608                         pcpu->hispeed_validate_time =
609                                 ktime_to_us(ktime_get());
610                         anyboost = 1;
611                 }
612
613                 /*
614                  * Set floor freq and (re)start timer for when last
615                  * validated.
616                  */
617
618                 pcpu->floor_freq = tunables->hispeed_freq;
619                 pcpu->floor_validate_time = ktime_to_us(ktime_get());
620                 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
621         }
622
623         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
624
625         if (anyboost)
626                 wake_up_process(speedchange_task);
627 }
628
629 static int cpufreq_interactive_notifier(
630         struct notifier_block *nb, unsigned long val, void *data)
631 {
632         struct cpufreq_freqs *freq = data;
633         struct cpufreq_interactive_cpuinfo *pcpu;
634         int cpu;
635         unsigned long flags;
636
637         if (val == CPUFREQ_POSTCHANGE) {
638                 pcpu = &per_cpu(cpuinfo, freq->cpu);
639                 if (!down_read_trylock(&pcpu->enable_sem))
640                         return 0;
641                 if (!pcpu->governor_enabled) {
642                         up_read(&pcpu->enable_sem);
643                         return 0;
644                 }
645
646                 for_each_cpu(cpu, pcpu->policy->cpus) {
647                         struct cpufreq_interactive_cpuinfo *pjcpu =
648                                 &per_cpu(cpuinfo, cpu);
649                         if (cpu != freq->cpu) {
650                                 if (!down_read_trylock(&pjcpu->enable_sem))
651                                         continue;
652                                 if (!pjcpu->governor_enabled) {
653                                         up_read(&pjcpu->enable_sem);
654                                         continue;
655                                 }
656                         }
657                         spin_lock_irqsave(&pjcpu->load_lock, flags);
658                         update_load(cpu);
659                         spin_unlock_irqrestore(&pjcpu->load_lock, flags);
660                         if (cpu != freq->cpu)
661                                 up_read(&pjcpu->enable_sem);
662                 }
663
664                 up_read(&pcpu->enable_sem);
665         }
666         return 0;
667 }
668
669 static struct notifier_block cpufreq_notifier_block = {
670         .notifier_call = cpufreq_interactive_notifier,
671 };
672
673 static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
674 {
675         const char *cp;
676         int i;
677         int ntokens = 1;
678         unsigned int *tokenized_data;
679         int err = -EINVAL;
680
681         cp = buf;
682         while ((cp = strpbrk(cp + 1, " :")))
683                 ntokens++;
684
685         if (!(ntokens & 0x1))
686                 goto err;
687
688         tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
689         if (!tokenized_data) {
690                 err = -ENOMEM;
691                 goto err;
692         }
693
694         cp = buf;
695         i = 0;
696         while (i < ntokens) {
697                 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
698                         goto err_kfree;
699
700                 cp = strpbrk(cp, " :");
701                 if (!cp)
702                         break;
703                 cp++;
704         }
705
706         if (i != ntokens)
707                 goto err_kfree;
708
709         *num_tokens = ntokens;
710         return tokenized_data;
711
712 err_kfree:
713         kfree(tokenized_data);
714 err:
715         return ERR_PTR(err);
716 }
717
718 static ssize_t show_target_loads(
719         struct cpufreq_interactive_tunables *tunables,
720         char *buf)
721 {
722         int i;
723         ssize_t ret = 0;
724         unsigned long flags;
725
726         spin_lock_irqsave(&tunables->target_loads_lock, flags);
727
728         for (i = 0; i < tunables->ntarget_loads; i++)
729                 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
730                                i & 0x1 ? ":" : " ");
731
732         sprintf(buf + ret - 1, "\n");
733         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
734         return ret;
735 }
736
737 static ssize_t store_target_loads(
738         struct cpufreq_interactive_tunables *tunables,
739         const char *buf, size_t count)
740 {
741         int ntokens;
742         unsigned int *new_target_loads = NULL;
743         unsigned long flags;
744
745         new_target_loads = get_tokenized_data(buf, &ntokens);
746         if (IS_ERR(new_target_loads))
747                 return PTR_RET(new_target_loads);
748
749         spin_lock_irqsave(&tunables->target_loads_lock, flags);
750         if (tunables->target_loads != default_target_loads)
751                 kfree(tunables->target_loads);
752         tunables->target_loads = new_target_loads;
753         tunables->ntarget_loads = ntokens;
754         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
755         return count;
756 }
757
758 static ssize_t show_above_hispeed_delay(
759         struct cpufreq_interactive_tunables *tunables, char *buf)
760 {
761         int i;
762         ssize_t ret = 0;
763         unsigned long flags;
764
765         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
766
767         for (i = 0; i < tunables->nabove_hispeed_delay; i++)
768                 ret += sprintf(buf + ret, "%u%s",
769                                tunables->above_hispeed_delay[i],
770                                i & 0x1 ? ":" : " ");
771
772         sprintf(buf + ret - 1, "\n");
773         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
774         return ret;
775 }
776
777 static ssize_t store_above_hispeed_delay(
778         struct cpufreq_interactive_tunables *tunables,
779         const char *buf, size_t count)
780 {
781         int ntokens;
782         unsigned int *new_above_hispeed_delay = NULL;
783         unsigned long flags;
784
785         new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
786         if (IS_ERR(new_above_hispeed_delay))
787                 return PTR_RET(new_above_hispeed_delay);
788
789         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
790         if (tunables->above_hispeed_delay != default_above_hispeed_delay)
791                 kfree(tunables->above_hispeed_delay);
792         tunables->above_hispeed_delay = new_above_hispeed_delay;
793         tunables->nabove_hispeed_delay = ntokens;
794         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
795         return count;
796
797 }
798
799 static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
800                 char *buf)
801 {
802         return sprintf(buf, "%u\n", tunables->hispeed_freq);
803 }
804
805 static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
806                 const char *buf, size_t count)
807 {
808         int ret;
809         long unsigned int val;
810
811         ret = strict_strtoul(buf, 0, &val);
812         if (ret < 0)
813                 return ret;
814         tunables->hispeed_freq = val;
815         return count;
816 }
817
818 static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
819                 *tunables, char *buf)
820 {
821         return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
822 }
823
824 static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
825                 *tunables, const char *buf, size_t count)
826 {
827         int ret;
828         unsigned long val;
829
830         ret = strict_strtoul(buf, 0, &val);
831         if (ret < 0)
832                 return ret;
833         tunables->go_hispeed_load = val;
834         return count;
835 }
836
837 static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
838                 *tunables, char *buf)
839 {
840         return sprintf(buf, "%lu\n", tunables->min_sample_time);
841 }
842
843 static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
844                 *tunables, const char *buf, size_t count)
845 {
846         int ret;
847         unsigned long val;
848
849         ret = strict_strtoul(buf, 0, &val);
850         if (ret < 0)
851                 return ret;
852         tunables->min_sample_time = val;
853         return count;
854 }
855
856 static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
857                 char *buf)
858 {
859         return sprintf(buf, "%lu\n", tunables->timer_rate);
860 }
861
862 static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
863                 const char *buf, size_t count)
864 {
865         int ret;
866         unsigned long val;
867
868         ret = strict_strtoul(buf, 0, &val);
869         if (ret < 0)
870                 return ret;
871         tunables->timer_rate = val;
872         return count;
873 }
874
875 static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
876                 char *buf)
877 {
878         return sprintf(buf, "%d\n", tunables->timer_slack_val);
879 }
880
881 static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
882                 const char *buf, size_t count)
883 {
884         int ret;
885         unsigned long val;
886
887         ret = kstrtol(buf, 10, &val);
888         if (ret < 0)
889                 return ret;
890
891         tunables->timer_slack_val = val;
892         return count;
893 }
894
895 static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
896                           char *buf)
897 {
898         return sprintf(buf, "%d\n", tunables->boost_val);
899 }
900
901 static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
902                            const char *buf, size_t count)
903 {
904         int ret;
905         unsigned long val;
906
907         ret = kstrtoul(buf, 0, &val);
908         if (ret < 0)
909                 return ret;
910
911         tunables->boost_val = val;
912
913         if (tunables->boost_val) {
914                 trace_cpufreq_interactive_boost("on");
915                 if (!tunables->boosted)
916                         cpufreq_interactive_boost(tunables);
917         } else {
918                 tunables->boostpulse_endtime = ktime_to_us(ktime_get());
919                 trace_cpufreq_interactive_unboost("off");
920         }
921
922         return count;
923 }
924
925 static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
926                                 const char *buf, size_t count)
927 {
928         int ret;
929         unsigned long val;
930
931         ret = kstrtoul(buf, 0, &val);
932         if (ret < 0)
933                 return ret;
934
935         tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
936                 tunables->boostpulse_duration_val;
937         trace_cpufreq_interactive_boost("pulse");
938         if (!tunables->boosted)
939                 cpufreq_interactive_boost(tunables);
940         return count;
941 }
942
943 static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
944                 *tunables, char *buf)
945 {
946         return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
947 }
948
949 static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
950                 *tunables, const char *buf, size_t count)
951 {
952         int ret;
953         unsigned long val;
954
955         ret = kstrtoul(buf, 0, &val);
956         if (ret < 0)
957                 return ret;
958
959         tunables->boostpulse_duration_val = val;
960         return count;
961 }
962
963 static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
964                 char *buf)
965 {
966         return sprintf(buf, "%u\n", tunables->io_is_busy);
967 }
968
969 static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
970                 const char *buf, size_t count)
971 {
972         int ret;
973         unsigned long val;
974
975         ret = kstrtoul(buf, 0, &val);
976         if (ret < 0)
977                 return ret;
978         tunables->io_is_busy = val;
979         return count;
980 }
981
982 /*
983  * Create show/store routines
984  * - sys: One governor instance for complete SYSTEM
985  * - pol: One governor instance per struct cpufreq_policy
986  */
987 #define show_gov_pol_sys(file_name)                                     \
988 static ssize_t show_##file_name##_gov_sys                               \
989 (struct kobject *kobj, struct attribute *attr, char *buf)               \
990 {                                                                       \
991         return show_##file_name(common_tunables, buf);                  \
992 }                                                                       \
993                                                                         \
994 static ssize_t show_##file_name##_gov_pol                               \
995 (struct cpufreq_policy *policy, char *buf)                              \
996 {                                                                       \
997         return show_##file_name(policy->governor_data, buf);            \
998 }
999
1000 #define store_gov_pol_sys(file_name)                                    \
1001 static ssize_t store_##file_name##_gov_sys                              \
1002 (struct kobject *kobj, struct attribute *attr, const char *buf,         \
1003         size_t count)                                                   \
1004 {                                                                       \
1005         return store_##file_name(common_tunables, buf, count);          \
1006 }                                                                       \
1007                                                                         \
1008 static ssize_t store_##file_name##_gov_pol                              \
1009 (struct cpufreq_policy *policy, const char *buf, size_t count)          \
1010 {                                                                       \
1011         return store_##file_name(policy->governor_data, buf, count);    \
1012 }
1013
1014 #define show_store_gov_pol_sys(file_name)                               \
1015 show_gov_pol_sys(file_name);                                            \
1016 store_gov_pol_sys(file_name)
1017
1018 show_store_gov_pol_sys(target_loads);
1019 show_store_gov_pol_sys(above_hispeed_delay);
1020 show_store_gov_pol_sys(hispeed_freq);
1021 show_store_gov_pol_sys(go_hispeed_load);
1022 show_store_gov_pol_sys(min_sample_time);
1023 show_store_gov_pol_sys(timer_rate);
1024 show_store_gov_pol_sys(timer_slack);
1025 show_store_gov_pol_sys(boost);
1026 store_gov_pol_sys(boostpulse);
1027 show_store_gov_pol_sys(boostpulse_duration);
1028 show_store_gov_pol_sys(io_is_busy);
1029
1030 #define gov_sys_attr_rw(_name)                                          \
1031 static struct global_attr _name##_gov_sys =                             \
1032 __ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1033
1034 #define gov_pol_attr_rw(_name)                                          \
1035 static struct freq_attr _name##_gov_pol =                               \
1036 __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1037
1038 #define gov_sys_pol_attr_rw(_name)                                      \
1039         gov_sys_attr_rw(_name);                                         \
1040         gov_pol_attr_rw(_name)
1041
1042 gov_sys_pol_attr_rw(target_loads);
1043 gov_sys_pol_attr_rw(above_hispeed_delay);
1044 gov_sys_pol_attr_rw(hispeed_freq);
1045 gov_sys_pol_attr_rw(go_hispeed_load);
1046 gov_sys_pol_attr_rw(min_sample_time);
1047 gov_sys_pol_attr_rw(timer_rate);
1048 gov_sys_pol_attr_rw(timer_slack);
1049 gov_sys_pol_attr_rw(boost);
1050 gov_sys_pol_attr_rw(boostpulse_duration);
1051 gov_sys_pol_attr_rw(io_is_busy);
1052
1053 static struct global_attr boostpulse_gov_sys =
1054         __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1055
1056 static struct freq_attr boostpulse_gov_pol =
1057         __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1058
1059 /* One Governor instance for entire system */
1060 static struct attribute *interactive_attributes_gov_sys[] = {
1061         &target_loads_gov_sys.attr,
1062         &above_hispeed_delay_gov_sys.attr,
1063         &hispeed_freq_gov_sys.attr,
1064         &go_hispeed_load_gov_sys.attr,
1065         &min_sample_time_gov_sys.attr,
1066         &timer_rate_gov_sys.attr,
1067         &timer_slack_gov_sys.attr,
1068         &boost_gov_sys.attr,
1069         &boostpulse_gov_sys.attr,
1070         &boostpulse_duration_gov_sys.attr,
1071         &io_is_busy_gov_sys.attr,
1072         NULL,
1073 };
1074
1075 static struct attribute_group interactive_attr_group_gov_sys = {
1076         .attrs = interactive_attributes_gov_sys,
1077         .name = "interactive",
1078 };
1079
1080 /* Per policy governor instance */
1081 static struct attribute *interactive_attributes_gov_pol[] = {
1082         &target_loads_gov_pol.attr,
1083         &above_hispeed_delay_gov_pol.attr,
1084         &hispeed_freq_gov_pol.attr,
1085         &go_hispeed_load_gov_pol.attr,
1086         &min_sample_time_gov_pol.attr,
1087         &timer_rate_gov_pol.attr,
1088         &timer_slack_gov_pol.attr,
1089         &boost_gov_pol.attr,
1090         &boostpulse_gov_pol.attr,
1091         &boostpulse_duration_gov_pol.attr,
1092         &io_is_busy_gov_pol.attr,
1093         NULL,
1094 };
1095
1096 static struct attribute_group interactive_attr_group_gov_pol = {
1097         .attrs = interactive_attributes_gov_pol,
1098         .name = "interactive",
1099 };
1100
1101 static struct attribute_group *get_sysfs_attr(void)
1102 {
1103         if (have_governor_per_policy())
1104                 return &interactive_attr_group_gov_pol;
1105         else
1106                 return &interactive_attr_group_gov_sys;
1107 }
1108
1109 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1110                                              unsigned long val,
1111                                              void *data)
1112 {
1113         switch (val) {
1114         case IDLE_START:
1115                 cpufreq_interactive_idle_start();
1116                 break;
1117         case IDLE_END:
1118                 cpufreq_interactive_idle_end();
1119                 break;
1120         }
1121
1122         return 0;
1123 }
1124
1125 static struct notifier_block cpufreq_interactive_idle_nb = {
1126         .notifier_call = cpufreq_interactive_idle_notifier,
1127 };
1128
1129 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1130                 unsigned int event)
1131 {
1132         int rc;
1133         unsigned int j;
1134         struct cpufreq_interactive_cpuinfo *pcpu;
1135         struct cpufreq_frequency_table *freq_table;
1136         struct cpufreq_interactive_tunables *tunables;
1137         unsigned long flags;
1138
1139         if (have_governor_per_policy())
1140                 tunables = policy->governor_data;
1141         else
1142                 tunables = common_tunables;
1143
1144         WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
1145
1146         switch (event) {
1147         case CPUFREQ_GOV_POLICY_INIT:
1148                 if (have_governor_per_policy()) {
1149                         WARN_ON(tunables);
1150                 } else if (tunables) {
1151                         tunables->usage_count++;
1152                         policy->governor_data = tunables;
1153                         return 0;
1154                 }
1155
1156                 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1157                 if (!tunables) {
1158                         pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
1159                         return -ENOMEM;
1160                 }
1161
1162                 tunables->usage_count = 1;
1163                 tunables->above_hispeed_delay = default_above_hispeed_delay;
1164                 tunables->nabove_hispeed_delay =
1165                         ARRAY_SIZE(default_above_hispeed_delay);
1166                 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1167                 tunables->target_loads = default_target_loads;
1168                 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1169                 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1170                 tunables->timer_rate = DEFAULT_TIMER_RATE;
1171                 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1172                 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1173
1174                 spin_lock_init(&tunables->target_loads_lock);
1175                 spin_lock_init(&tunables->above_hispeed_delay_lock);
1176
1177                 policy->governor_data = tunables;
1178                 if (!have_governor_per_policy()) {
1179                         common_tunables = tunables;
1180                         WARN_ON(cpufreq_get_global_kobject());
1181                 }
1182
1183                 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1184                                 get_sysfs_attr());
1185                 if (rc) {
1186                         kfree(tunables);
1187                         policy->governor_data = NULL;
1188                         if (!have_governor_per_policy()) {
1189                                 common_tunables = NULL;
1190                                 cpufreq_put_global_kobject();
1191                         }
1192                         return rc;
1193                 }
1194
1195                 if (!policy->governor->initialized) {
1196                         idle_notifier_register(&cpufreq_interactive_idle_nb);
1197                         cpufreq_register_notifier(&cpufreq_notifier_block,
1198                                         CPUFREQ_TRANSITION_NOTIFIER);
1199                 }
1200
1201                 break;
1202
1203         case CPUFREQ_GOV_POLICY_EXIT:
1204                 if (!--tunables->usage_count) {
1205                         if (policy->governor->initialized == 1) {
1206                                 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1207                                                 CPUFREQ_TRANSITION_NOTIFIER);
1208                                 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1209                         }
1210
1211                         sysfs_remove_group(get_governor_parent_kobj(policy),
1212                                         get_sysfs_attr());
1213
1214                         if (!have_governor_per_policy())
1215                                 cpufreq_put_global_kobject();
1216
1217                         kfree(tunables);
1218                         common_tunables = NULL;
1219                 }
1220
1221                 policy->governor_data = NULL;
1222                 break;
1223
1224         case CPUFREQ_GOV_START:
1225                 mutex_lock(&gov_lock);
1226
1227                 freq_table = cpufreq_frequency_get_table(policy->cpu);
1228                 if (!tunables->hispeed_freq)
1229                         tunables->hispeed_freq = policy->max;
1230
1231                 for_each_cpu(j, policy->cpus) {
1232                         pcpu = &per_cpu(cpuinfo, j);
1233                         pcpu->policy = policy;
1234                         pcpu->target_freq = policy->cur;
1235                         pcpu->freq_table = freq_table;
1236                         pcpu->floor_freq = pcpu->target_freq;
1237                         pcpu->floor_validate_time =
1238                                 ktime_to_us(ktime_get());
1239                         pcpu->hispeed_validate_time =
1240                                 pcpu->floor_validate_time;
1241                         pcpu->max_freq = policy->max;
1242                         down_write(&pcpu->enable_sem);
1243                         del_timer_sync(&pcpu->cpu_timer);
1244                         del_timer_sync(&pcpu->cpu_slack_timer);
1245                         cpufreq_interactive_timer_start(tunables, j);
1246                         pcpu->governor_enabled = 1;
1247                         up_write(&pcpu->enable_sem);
1248                 }
1249
1250                 mutex_unlock(&gov_lock);
1251                 break;
1252
1253         case CPUFREQ_GOV_STOP:
1254                 mutex_lock(&gov_lock);
1255                 for_each_cpu(j, policy->cpus) {
1256                         pcpu = &per_cpu(cpuinfo, j);
1257                         down_write(&pcpu->enable_sem);
1258                         pcpu->governor_enabled = 0;
1259                         del_timer_sync(&pcpu->cpu_timer);
1260                         del_timer_sync(&pcpu->cpu_slack_timer);
1261                         up_write(&pcpu->enable_sem);
1262                 }
1263
1264                 mutex_unlock(&gov_lock);
1265                 break;
1266
1267         case CPUFREQ_GOV_LIMITS:
1268                 if (policy->max < policy->cur)
1269                         __cpufreq_driver_target(policy,
1270                                         policy->max, CPUFREQ_RELATION_H);
1271                 else if (policy->min > policy->cur)
1272                         __cpufreq_driver_target(policy,
1273                                         policy->min, CPUFREQ_RELATION_L);
1274                 for_each_cpu(j, policy->cpus) {
1275                         pcpu = &per_cpu(cpuinfo, j);
1276
1277                         down_read(&pcpu->enable_sem);
1278                         if (pcpu->governor_enabled == 0) {
1279                                 up_read(&pcpu->enable_sem);
1280                                 continue;
1281                         }
1282
1283                         spin_lock_irqsave(&pcpu->target_freq_lock, flags);
1284                         if (policy->max < pcpu->target_freq)
1285                                 pcpu->target_freq = policy->max;
1286                         else if (policy->min > pcpu->target_freq)
1287                                 pcpu->target_freq = policy->min;
1288
1289                         spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
1290                         up_read(&pcpu->enable_sem);
1291
1292                         /* Reschedule timer only if policy->max is raised.
1293                          * Delete the timers, else the timer callback may
1294                          * return without re-arm the timer when failed
1295                          * acquire the semaphore. This race may cause timer
1296                          * stopped unexpectedly.
1297                          */
1298
1299                         if (policy->max > pcpu->max_freq) {
1300                                 down_write(&pcpu->enable_sem);
1301                                 del_timer_sync(&pcpu->cpu_timer);
1302                                 del_timer_sync(&pcpu->cpu_slack_timer);
1303                                 cpufreq_interactive_timer_start(tunables, j);
1304                                 up_write(&pcpu->enable_sem);
1305                         }
1306
1307                         pcpu->max_freq = policy->max;
1308                 }
1309                 break;
1310         }
1311         return 0;
1312 }
1313
1314 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1315 static
1316 #endif
1317 struct cpufreq_governor cpufreq_gov_interactive = {
1318         .name = "interactive",
1319         .governor = cpufreq_governor_interactive,
1320         .max_transition_latency = 10000000,
1321         .owner = THIS_MODULE,
1322 };
1323
1324 static void cpufreq_interactive_nop_timer(unsigned long data)
1325 {
1326 }
1327
1328 static int __init cpufreq_interactive_init(void)
1329 {
1330         unsigned int i;
1331         struct cpufreq_interactive_cpuinfo *pcpu;
1332         struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1333
1334         /* Initalize per-cpu timers */
1335         for_each_possible_cpu(i) {
1336                 pcpu = &per_cpu(cpuinfo, i);
1337                 init_timer_deferrable(&pcpu->cpu_timer);
1338                 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1339                 pcpu->cpu_timer.data = i;
1340                 init_timer(&pcpu->cpu_slack_timer);
1341                 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
1342                 spin_lock_init(&pcpu->load_lock);
1343                 spin_lock_init(&pcpu->target_freq_lock);
1344                 init_rwsem(&pcpu->enable_sem);
1345         }
1346
1347         spin_lock_init(&speedchange_cpumask_lock);
1348         mutex_init(&gov_lock);
1349         speedchange_task =
1350                 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1351                                "cfinteractive");
1352         if (IS_ERR(speedchange_task))
1353                 return PTR_ERR(speedchange_task);
1354
1355         sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1356         get_task_struct(speedchange_task);
1357
1358         /* NB: wake up so the thread does not look hung to the freezer */
1359         wake_up_process(speedchange_task);
1360
1361         return cpufreq_register_governor(&cpufreq_gov_interactive);
1362 }
1363
1364 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1365 fs_initcall(cpufreq_interactive_init);
1366 #else
1367 module_init(cpufreq_interactive_init);
1368 #endif
1369
1370 static void __exit cpufreq_interactive_exit(void)
1371 {
1372         cpufreq_unregister_governor(&cpufreq_gov_interactive);
1373         kthread_stop(speedchange_task);
1374         put_task_struct(speedchange_task);
1375 }
1376
1377 module_exit(cpufreq_interactive_exit);
1378
1379 MODULE_AUTHOR("Mike Chan <mike@android.com>");
1380 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1381         "Latency sensitive workloads");
1382 MODULE_LICENSE("GPL");