cpufreq: interactive: fix compiling warnings
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_interactive.c
1 /*
2  * drivers/cpufreq/cpufreq_interactive.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * Author: Mike Chan (mike@android.com)
16  *
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/rwsem.h>
25 #include <linux/sched.h>
26 #include <linux/sched/rt.h>
27 #include <linux/tick.h>
28 #include <linux/time.h>
29 #include <linux/timer.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/slab.h>
33 #include <linux/kernel_stat.h>
34 #include <asm/cputime.h>
35
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/cpufreq_interactive.h>
38
39 struct cpufreq_interactive_cpuinfo {
40         struct timer_list cpu_timer;
41         struct timer_list cpu_slack_timer;
42         spinlock_t load_lock; /* protects the next 4 fields */
43         u64 time_in_idle;
44         u64 time_in_idle_timestamp;
45         u64 cputime_speedadj;
46         u64 cputime_speedadj_timestamp;
47         struct cpufreq_policy *policy;
48         struct cpufreq_frequency_table *freq_table;
49         unsigned int target_freq;
50         unsigned int floor_freq;
51         u64 floor_validate_time;
52         u64 hispeed_validate_time;
53         struct rw_semaphore enable_sem;
54         int governor_enabled;
55 };
56
57 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
58
59 /* realtime thread handles frequency scaling */
60 static struct task_struct *speedchange_task;
61 static cpumask_t speedchange_cpumask;
62 static spinlock_t speedchange_cpumask_lock;
63 static struct mutex gov_lock;
64
65 /* Target load.  Lower values result in higher CPU speeds. */
66 #define DEFAULT_TARGET_LOAD 90
67 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
68
69 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
70 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
71 static unsigned int default_above_hispeed_delay[] = {
72         DEFAULT_ABOVE_HISPEED_DELAY };
73
74 struct cpufreq_interactive_tunables {
75         int usage_count;
76         /* Hi speed to bump to from lo speed when load burst (default max) */
77         unsigned int hispeed_freq;
78         /* Go to hi speed when CPU load at or above this value. */
79 #define DEFAULT_GO_HISPEED_LOAD 99
80         unsigned long go_hispeed_load;
81         /* Target load. Lower values result in higher CPU speeds. */
82         spinlock_t target_loads_lock;
83         unsigned int *target_loads;
84         int ntarget_loads;
85         /*
86          * The minimum amount of time to spend at a frequency before we can ramp
87          * down.
88          */
89 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
90         unsigned long min_sample_time;
91         /*
92          * The sample rate of the timer used to increase frequency
93          */
94         unsigned long timer_rate;
95         /*
96          * Wait this long before raising speed above hispeed, by default a
97          * single timer interval.
98          */
99         spinlock_t above_hispeed_delay_lock;
100         unsigned int *above_hispeed_delay;
101         int nabove_hispeed_delay;
102         /* Non-zero means indefinite speed boost active */
103         int boost_val;
104         /* Duration of a boot pulse in usecs */
105         int boostpulse_duration_val;
106         /* End time of boost pulse in ktime converted to usecs */
107         u64 boostpulse_endtime;
108         /*
109          * Max additional time to wait in idle, beyond timer_rate, at speeds
110          * above minimum before wakeup to reduce speed, or -1 if unnecessary.
111          */
112 #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
113         int timer_slack_val;
114         bool io_is_busy;
115 };
116
117 /* For cases where we have single governor instance for system */
118 struct cpufreq_interactive_tunables *common_tunables;
119
120 static struct attribute_group *get_sysfs_attr(void);
121
122 static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
123                                                   cputime64_t *wall)
124 {
125         u64 idle_time;
126         u64 cur_wall_time;
127         u64 busy_time;
128
129         cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
130
131         busy_time  = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
132         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
133         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
134         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
135         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
136         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
137
138         idle_time = cur_wall_time - busy_time;
139         if (wall)
140                 *wall = jiffies_to_usecs(cur_wall_time);
141
142         return jiffies_to_usecs(idle_time);
143 }
144
145 static inline cputime64_t get_cpu_idle_time(
146         unsigned int cpu,
147         cputime64_t *wall,
148         bool io_is_busy)
149 {
150         u64 idle_time = get_cpu_idle_time_us(cpu, wall);
151
152         if (idle_time == -1ULL)
153                 idle_time = get_cpu_idle_time_jiffy(cpu, wall);
154         else if (!io_is_busy)
155                 idle_time += get_cpu_iowait_time_us(cpu, wall);
156
157         return idle_time;
158 }
159
160 static void cpufreq_interactive_timer_resched(
161         struct cpufreq_interactive_cpuinfo *pcpu)
162 {
163         struct cpufreq_interactive_tunables *tunables =
164                 pcpu->policy->governor_data;
165         unsigned long expires;
166         unsigned long flags;
167
168         spin_lock_irqsave(&pcpu->load_lock, flags);
169         pcpu->time_in_idle =
170                 get_cpu_idle_time(smp_processor_id(),
171                                   &pcpu->time_in_idle_timestamp,
172                                   tunables->io_is_busy);
173         pcpu->cputime_speedadj = 0;
174         pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
175         expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
176         mod_timer_pinned(&pcpu->cpu_timer, expires);
177
178         if (tunables->timer_slack_val >= 0 &&
179             pcpu->target_freq > pcpu->policy->min) {
180                 expires += usecs_to_jiffies(tunables->timer_slack_val);
181                 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
182         }
183
184         spin_unlock_irqrestore(&pcpu->load_lock, flags);
185 }
186
187 /* The caller shall take enable_sem write semaphore to avoid any timer race.
188  * The cpu_timer and cpu_slack_timer must be deactivated when calling this
189  * function.
190  */
191 static void cpufreq_interactive_timer_start(
192         struct cpufreq_interactive_tunables *tunables, int cpu)
193 {
194         struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
195         unsigned long expires = jiffies +
196                 usecs_to_jiffies(tunables->timer_rate);
197         unsigned long flags;
198
199         pcpu->cpu_timer.expires = expires;
200         add_timer_on(&pcpu->cpu_timer, cpu);
201         if (tunables->timer_slack_val >= 0 &&
202             pcpu->target_freq > pcpu->policy->min) {
203                 expires += usecs_to_jiffies(tunables->timer_slack_val);
204                 pcpu->cpu_slack_timer.expires = expires;
205                 add_timer_on(&pcpu->cpu_slack_timer, cpu);
206         }
207
208         spin_lock_irqsave(&pcpu->load_lock, flags);
209         pcpu->time_in_idle =
210                 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
211                                   tunables->io_is_busy);
212         pcpu->cputime_speedadj = 0;
213         pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
214         spin_unlock_irqrestore(&pcpu->load_lock, flags);
215 }
216
217 static unsigned int freq_to_above_hispeed_delay(
218         struct cpufreq_interactive_tunables *tunables,
219         unsigned int freq)
220 {
221         int i;
222         unsigned int ret;
223         unsigned long flags;
224
225         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
226
227         for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
228                         freq >= tunables->above_hispeed_delay[i+1]; i += 2)
229                 ;
230
231         ret = tunables->above_hispeed_delay[i];
232         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
233         return ret;
234 }
235
236 static unsigned int freq_to_targetload(
237         struct cpufreq_interactive_tunables *tunables, unsigned int freq)
238 {
239         int i;
240         unsigned int ret;
241         unsigned long flags;
242
243         spin_lock_irqsave(&tunables->target_loads_lock, flags);
244
245         for (i = 0; i < tunables->ntarget_loads - 1 &&
246                     freq >= tunables->target_loads[i+1]; i += 2)
247                 ;
248
249         ret = tunables->target_loads[i];
250         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
251         return ret;
252 }
253
254 /*
255  * If increasing frequencies never map to a lower target load then
256  * choose_freq() will find the minimum frequency that does not exceed its
257  * target load given the current load.
258  */
259 static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
260                 unsigned int loadadjfreq)
261 {
262         unsigned int freq = pcpu->policy->cur;
263         unsigned int prevfreq, freqmin, freqmax;
264         unsigned int tl;
265         int index;
266
267         freqmin = 0;
268         freqmax = UINT_MAX;
269
270         do {
271                 prevfreq = freq;
272                 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
273
274                 /*
275                  * Find the lowest frequency where the computed load is less
276                  * than or equal to the target load.
277                  */
278
279                 if (cpufreq_frequency_table_target(
280                             pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
281                             CPUFREQ_RELATION_L, &index))
282                         break;
283                 freq = pcpu->freq_table[index].frequency;
284
285                 if (freq > prevfreq) {
286                         /* The previous frequency is too low. */
287                         freqmin = prevfreq;
288
289                         if (freq >= freqmax) {
290                                 /*
291                                  * Find the highest frequency that is less
292                                  * than freqmax.
293                                  */
294                                 if (cpufreq_frequency_table_target(
295                                             pcpu->policy, pcpu->freq_table,
296                                             freqmax - 1, CPUFREQ_RELATION_H,
297                                             &index))
298                                         break;
299                                 freq = pcpu->freq_table[index].frequency;
300
301                                 if (freq == freqmin) {
302                                         /*
303                                          * The first frequency below freqmax
304                                          * has already been found to be too
305                                          * low.  freqmax is the lowest speed
306                                          * we found that is fast enough.
307                                          */
308                                         freq = freqmax;
309                                         break;
310                                 }
311                         }
312                 } else if (freq < prevfreq) {
313                         /* The previous frequency is high enough. */
314                         freqmax = prevfreq;
315
316                         if (freq <= freqmin) {
317                                 /*
318                                  * Find the lowest frequency that is higher
319                                  * than freqmin.
320                                  */
321                                 if (cpufreq_frequency_table_target(
322                                             pcpu->policy, pcpu->freq_table,
323                                             freqmin + 1, CPUFREQ_RELATION_L,
324                                             &index))
325                                         break;
326                                 freq = pcpu->freq_table[index].frequency;
327
328                                 /*
329                                  * If freqmax is the first frequency above
330                                  * freqmin then we have already found that
331                                  * this speed is fast enough.
332                                  */
333                                 if (freq == freqmax)
334                                         break;
335                         }
336                 }
337
338                 /* If same frequency chosen as previous then done. */
339         } while (freq != prevfreq);
340
341         return freq;
342 }
343
344 static u64 update_load(int cpu)
345 {
346         struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
347         struct cpufreq_interactive_tunables *tunables =
348                 pcpu->policy->governor_data;
349         u64 now;
350         u64 now_idle;
351         unsigned int delta_idle;
352         unsigned int delta_time;
353         u64 active_time;
354
355         now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
356         delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
357         delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
358
359         if (delta_time <= delta_idle)
360                 active_time = 0;
361         else
362                 active_time = delta_time - delta_idle;
363
364         pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
365
366         pcpu->time_in_idle = now_idle;
367         pcpu->time_in_idle_timestamp = now;
368         return now;
369 }
370
371 static void cpufreq_interactive_timer(unsigned long data)
372 {
373         u64 now;
374         unsigned int delta_time;
375         u64 cputime_speedadj;
376         int cpu_load;
377         struct cpufreq_interactive_cpuinfo *pcpu =
378                 &per_cpu(cpuinfo, data);
379         struct cpufreq_interactive_tunables *tunables =
380                 pcpu->policy->governor_data;
381         unsigned int new_freq;
382         unsigned int loadadjfreq;
383         unsigned int index;
384         unsigned long flags;
385         bool boosted;
386
387         if (!down_read_trylock(&pcpu->enable_sem))
388                 return;
389         if (!pcpu->governor_enabled)
390                 goto exit;
391
392         spin_lock_irqsave(&pcpu->load_lock, flags);
393         now = update_load(data);
394         delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
395         cputime_speedadj = pcpu->cputime_speedadj;
396         spin_unlock_irqrestore(&pcpu->load_lock, flags);
397
398         if (WARN_ON_ONCE(!delta_time))
399                 goto rearm;
400
401         do_div(cputime_speedadj, delta_time);
402         loadadjfreq = (unsigned int)cputime_speedadj * 100;
403         cpu_load = loadadjfreq / pcpu->target_freq;
404         boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
405
406         if (cpu_load >= tunables->go_hispeed_load || boosted) {
407                 if (pcpu->target_freq < tunables->hispeed_freq) {
408                         new_freq = tunables->hispeed_freq;
409                 } else {
410                         new_freq = choose_freq(pcpu, loadadjfreq);
411
412                         if (new_freq < tunables->hispeed_freq)
413                                 new_freq = tunables->hispeed_freq;
414                 }
415         } else {
416                 new_freq = choose_freq(pcpu, loadadjfreq);
417         }
418
419         if (pcpu->target_freq >= tunables->hispeed_freq &&
420             new_freq > pcpu->target_freq &&
421             now - pcpu->hispeed_validate_time <
422             freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
423                 trace_cpufreq_interactive_notyet(
424                         data, cpu_load, pcpu->target_freq,
425                         pcpu->policy->cur, new_freq);
426                 goto rearm;
427         }
428
429         pcpu->hispeed_validate_time = now;
430
431         if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
432                                            new_freq, CPUFREQ_RELATION_L,
433                                            &index))
434                 goto rearm;
435
436         new_freq = pcpu->freq_table[index].frequency;
437
438         /*
439          * Do not scale below floor_freq unless we have been at or above the
440          * floor frequency for the minimum sample time since last validated.
441          */
442         if (new_freq < pcpu->floor_freq) {
443                 if (now - pcpu->floor_validate_time <
444                                 tunables->min_sample_time) {
445                         trace_cpufreq_interactive_notyet(
446                                 data, cpu_load, pcpu->target_freq,
447                                 pcpu->policy->cur, new_freq);
448                         goto rearm;
449                 }
450         }
451
452         /*
453          * Update the timestamp for checking whether speed has been held at
454          * or above the selected frequency for a minimum of min_sample_time,
455          * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
456          * allow the speed to drop as soon as the boostpulse duration expires
457          * (or the indefinite boost is turned off).
458          */
459
460         if (!boosted || new_freq > tunables->hispeed_freq) {
461                 pcpu->floor_freq = new_freq;
462                 pcpu->floor_validate_time = now;
463         }
464
465         if (pcpu->target_freq == new_freq) {
466                 trace_cpufreq_interactive_already(
467                         data, cpu_load, pcpu->target_freq,
468                         pcpu->policy->cur, new_freq);
469                 goto rearm_if_notmax;
470         }
471
472         trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
473                                          pcpu->policy->cur, new_freq);
474
475         pcpu->target_freq = new_freq;
476         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
477         cpumask_set_cpu(data, &speedchange_cpumask);
478         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
479         wake_up_process(speedchange_task);
480
481 rearm_if_notmax:
482         /*
483          * Already set max speed and don't see a need to change that,
484          * wait until next idle to re-evaluate, don't need timer.
485          */
486         if (pcpu->target_freq == pcpu->policy->max)
487                 goto exit;
488
489 rearm:
490         if (!timer_pending(&pcpu->cpu_timer))
491                 cpufreq_interactive_timer_resched(pcpu);
492
493 exit:
494         up_read(&pcpu->enable_sem);
495         return;
496 }
497
498 static void cpufreq_interactive_idle_start(void)
499 {
500         struct cpufreq_interactive_cpuinfo *pcpu =
501                 &per_cpu(cpuinfo, smp_processor_id());
502         int pending;
503
504         if (!down_read_trylock(&pcpu->enable_sem))
505                 return;
506         if (!pcpu->governor_enabled) {
507                 up_read(&pcpu->enable_sem);
508                 return;
509         }
510
511         pending = timer_pending(&pcpu->cpu_timer);
512
513         if (pcpu->target_freq != pcpu->policy->min) {
514                 /*
515                  * Entering idle while not at lowest speed.  On some
516                  * platforms this can hold the other CPU(s) at that speed
517                  * even though the CPU is idle. Set a timer to re-evaluate
518                  * speed so this idle CPU doesn't hold the other CPUs above
519                  * min indefinitely.  This should probably be a quirk of
520                  * the CPUFreq driver.
521                  */
522                 if (!pending)
523                         cpufreq_interactive_timer_resched(pcpu);
524         }
525
526         up_read(&pcpu->enable_sem);
527 }
528
529 static void cpufreq_interactive_idle_end(void)
530 {
531         struct cpufreq_interactive_cpuinfo *pcpu =
532                 &per_cpu(cpuinfo, smp_processor_id());
533
534         if (!down_read_trylock(&pcpu->enable_sem))
535                 return;
536         if (!pcpu->governor_enabled) {
537                 up_read(&pcpu->enable_sem);
538                 return;
539         }
540
541         /* Arm the timer for 1-2 ticks later if not already. */
542         if (!timer_pending(&pcpu->cpu_timer)) {
543                 cpufreq_interactive_timer_resched(pcpu);
544         } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
545                 del_timer(&pcpu->cpu_timer);
546                 del_timer(&pcpu->cpu_slack_timer);
547                 cpufreq_interactive_timer(smp_processor_id());
548         }
549
550         up_read(&pcpu->enable_sem);
551 }
552
553 static int cpufreq_interactive_speedchange_task(void *data)
554 {
555         unsigned int cpu;
556         cpumask_t tmp_mask;
557         unsigned long flags;
558         struct cpufreq_interactive_cpuinfo *pcpu;
559
560         while (1) {
561                 set_current_state(TASK_INTERRUPTIBLE);
562                 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
563
564                 if (cpumask_empty(&speedchange_cpumask)) {
565                         spin_unlock_irqrestore(&speedchange_cpumask_lock,
566                                                flags);
567                         schedule();
568
569                         if (kthread_should_stop())
570                                 break;
571
572                         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
573                 }
574
575                 set_current_state(TASK_RUNNING);
576                 tmp_mask = speedchange_cpumask;
577                 cpumask_clear(&speedchange_cpumask);
578                 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
579
580                 for_each_cpu(cpu, &tmp_mask) {
581                         unsigned int j;
582                         unsigned int max_freq = 0;
583
584                         pcpu = &per_cpu(cpuinfo, cpu);
585                         if (!down_read_trylock(&pcpu->enable_sem))
586                                 continue;
587                         if (!pcpu->governor_enabled) {
588                                 up_read(&pcpu->enable_sem);
589                                 continue;
590                         }
591
592                         for_each_cpu(j, pcpu->policy->cpus) {
593                                 struct cpufreq_interactive_cpuinfo *pjcpu =
594                                         &per_cpu(cpuinfo, j);
595
596                                 if (pjcpu->target_freq > max_freq)
597                                         max_freq = pjcpu->target_freq;
598                         }
599
600                         if (max_freq != pcpu->policy->cur)
601                                 __cpufreq_driver_target(pcpu->policy,
602                                                         max_freq,
603                                                         CPUFREQ_RELATION_H);
604                         trace_cpufreq_interactive_setspeed(cpu,
605                                                      pcpu->target_freq,
606                                                      pcpu->policy->cur);
607
608                         up_read(&pcpu->enable_sem);
609                 }
610         }
611
612         return 0;
613 }
614
615 static void cpufreq_interactive_boost(void)
616 {
617         int i;
618         int anyboost = 0;
619         unsigned long flags;
620         struct cpufreq_interactive_cpuinfo *pcpu;
621         struct cpufreq_interactive_tunables *tunables;
622
623         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
624
625         for_each_online_cpu(i) {
626                 pcpu = &per_cpu(cpuinfo, i);
627                 tunables = pcpu->policy->governor_data;
628
629                 if (pcpu->target_freq < tunables->hispeed_freq) {
630                         pcpu->target_freq = tunables->hispeed_freq;
631                         cpumask_set_cpu(i, &speedchange_cpumask);
632                         pcpu->hispeed_validate_time =
633                                 ktime_to_us(ktime_get());
634                         anyboost = 1;
635                 }
636
637                 /*
638                  * Set floor freq and (re)start timer for when last
639                  * validated.
640                  */
641
642                 pcpu->floor_freq = tunables->hispeed_freq;
643                 pcpu->floor_validate_time = ktime_to_us(ktime_get());
644         }
645
646         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
647
648         if (anyboost)
649                 wake_up_process(speedchange_task);
650 }
651
652 static int cpufreq_interactive_notifier(
653         struct notifier_block *nb, unsigned long val, void *data)
654 {
655         struct cpufreq_freqs *freq = data;
656         struct cpufreq_interactive_cpuinfo *pcpu;
657         int cpu;
658         unsigned long flags;
659
660         if (val == CPUFREQ_POSTCHANGE) {
661                 pcpu = &per_cpu(cpuinfo, freq->cpu);
662                 if (!down_read_trylock(&pcpu->enable_sem))
663                         return 0;
664                 if (!pcpu->governor_enabled) {
665                         up_read(&pcpu->enable_sem);
666                         return 0;
667                 }
668
669                 for_each_cpu(cpu, pcpu->policy->cpus) {
670                         struct cpufreq_interactive_cpuinfo *pjcpu =
671                                 &per_cpu(cpuinfo, cpu);
672                         if (cpu != freq->cpu) {
673                                 if (!down_read_trylock(&pjcpu->enable_sem))
674                                         continue;
675                                 if (!pjcpu->governor_enabled) {
676                                         up_read(&pjcpu->enable_sem);
677                                         continue;
678                                 }
679                         }
680                         spin_lock_irqsave(&pjcpu->load_lock, flags);
681                         update_load(cpu);
682                         spin_unlock_irqrestore(&pjcpu->load_lock, flags);
683                         if (cpu != freq->cpu)
684                                 up_read(&pjcpu->enable_sem);
685                 }
686
687                 up_read(&pcpu->enable_sem);
688         }
689         return 0;
690 }
691
692 static struct notifier_block cpufreq_notifier_block = {
693         .notifier_call = cpufreq_interactive_notifier,
694 };
695
696 static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
697 {
698         const char *cp;
699         int i;
700         int ntokens = 1;
701         unsigned int *tokenized_data;
702         int err = -EINVAL;
703
704         cp = buf;
705         while ((cp = strpbrk(cp + 1, " :")))
706                 ntokens++;
707
708         if (!(ntokens & 0x1))
709                 goto err;
710
711         tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
712         if (!tokenized_data) {
713                 err = -ENOMEM;
714                 goto err;
715         }
716
717         cp = buf;
718         i = 0;
719         while (i < ntokens) {
720                 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
721                         goto err_kfree;
722
723                 cp = strpbrk(cp, " :");
724                 if (!cp)
725                         break;
726                 cp++;
727         }
728
729         if (i != ntokens)
730                 goto err_kfree;
731
732         *num_tokens = ntokens;
733         return tokenized_data;
734
735 err_kfree:
736         kfree(tokenized_data);
737 err:
738         return ERR_PTR(err);
739 }
740
741 static ssize_t show_target_loads(
742         struct cpufreq_interactive_tunables *tunables,
743         char *buf)
744 {
745         int i;
746         ssize_t ret = 0;
747         unsigned long flags;
748
749         spin_lock_irqsave(&tunables->target_loads_lock, flags);
750
751         for (i = 0; i < tunables->ntarget_loads; i++)
752                 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
753                                i & 0x1 ? ":" : " ");
754
755         sprintf(buf + ret - 1, "\n");
756         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
757         return ret;
758 }
759
760 static ssize_t store_target_loads(
761         struct cpufreq_interactive_tunables *tunables,
762         const char *buf, size_t count)
763 {
764         int ntokens;
765         unsigned int *new_target_loads = NULL;
766         unsigned long flags;
767
768         new_target_loads = get_tokenized_data(buf, &ntokens);
769         if (IS_ERR(new_target_loads))
770                 return PTR_RET(new_target_loads);
771
772         spin_lock_irqsave(&tunables->target_loads_lock, flags);
773         if (tunables->target_loads != default_target_loads)
774                 kfree(tunables->target_loads);
775         tunables->target_loads = new_target_loads;
776         tunables->ntarget_loads = ntokens;
777         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
778         return count;
779 }
780
781 static ssize_t show_above_hispeed_delay(
782         struct cpufreq_interactive_tunables *tunables, char *buf)
783 {
784         int i;
785         ssize_t ret = 0;
786         unsigned long flags;
787
788         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
789
790         for (i = 0; i < tunables->nabove_hispeed_delay; i++)
791                 ret += sprintf(buf + ret, "%u%s",
792                                tunables->above_hispeed_delay[i],
793                                i & 0x1 ? ":" : " ");
794
795         sprintf(buf + ret - 1, "\n");
796         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
797         return ret;
798 }
799
800 static ssize_t store_above_hispeed_delay(
801         struct cpufreq_interactive_tunables *tunables,
802         const char *buf, size_t count)
803 {
804         int ntokens;
805         unsigned int *new_above_hispeed_delay = NULL;
806         unsigned long flags;
807
808         new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
809         if (IS_ERR(new_above_hispeed_delay))
810                 return PTR_RET(new_above_hispeed_delay);
811
812         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
813         if (tunables->above_hispeed_delay != default_above_hispeed_delay)
814                 kfree(tunables->above_hispeed_delay);
815         tunables->above_hispeed_delay = new_above_hispeed_delay;
816         tunables->nabove_hispeed_delay = ntokens;
817         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
818         return count;
819
820 }
821
822 static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
823                 char *buf)
824 {
825         return sprintf(buf, "%u\n", tunables->hispeed_freq);
826 }
827
828 static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
829                 const char *buf, size_t count)
830 {
831         int ret;
832         long unsigned int val;
833
834         ret = strict_strtoul(buf, 0, &val);
835         if (ret < 0)
836                 return ret;
837         tunables->hispeed_freq = val;
838         return count;
839 }
840
841 static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
842                 *tunables, char *buf)
843 {
844         return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
845 }
846
847 static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
848                 *tunables, const char *buf, size_t count)
849 {
850         int ret;
851         unsigned long val;
852
853         ret = strict_strtoul(buf, 0, &val);
854         if (ret < 0)
855                 return ret;
856         tunables->go_hispeed_load = val;
857         return count;
858 }
859
860 static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
861                 *tunables, char *buf)
862 {
863         return sprintf(buf, "%lu\n", tunables->min_sample_time);
864 }
865
866 static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
867                 *tunables, const char *buf, size_t count)
868 {
869         int ret;
870         unsigned long val;
871
872         ret = strict_strtoul(buf, 0, &val);
873         if (ret < 0)
874                 return ret;
875         tunables->min_sample_time = val;
876         return count;
877 }
878
879 static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
880                 char *buf)
881 {
882         return sprintf(buf, "%lu\n", tunables->timer_rate);
883 }
884
885 static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
886                 const char *buf, size_t count)
887 {
888         int ret;
889         unsigned long val;
890
891         ret = strict_strtoul(buf, 0, &val);
892         if (ret < 0)
893                 return ret;
894         tunables->timer_rate = val;
895         return count;
896 }
897
898 static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
899                 char *buf)
900 {
901         return sprintf(buf, "%d\n", tunables->timer_slack_val);
902 }
903
904 static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
905                 const char *buf, size_t count)
906 {
907         int ret;
908         unsigned long val;
909
910         ret = kstrtol(buf, 10, &val);
911         if (ret < 0)
912                 return ret;
913
914         tunables->timer_slack_val = val;
915         return count;
916 }
917
918 static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
919                           char *buf)
920 {
921         return sprintf(buf, "%d\n", tunables->boost_val);
922 }
923
924 static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
925                            const char *buf, size_t count)
926 {
927         int ret;
928         unsigned long val;
929
930         ret = kstrtoul(buf, 0, &val);
931         if (ret < 0)
932                 return ret;
933
934         tunables->boost_val = val;
935
936         if (tunables->boost_val) {
937                 trace_cpufreq_interactive_boost("on");
938                 cpufreq_interactive_boost();
939         } else {
940                 trace_cpufreq_interactive_unboost("off");
941         }
942
943         return count;
944 }
945
946 static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
947                                 const char *buf, size_t count)
948 {
949         int ret;
950         unsigned long val;
951
952         ret = kstrtoul(buf, 0, &val);
953         if (ret < 0)
954                 return ret;
955
956         tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
957                 tunables->boostpulse_duration_val;
958         trace_cpufreq_interactive_boost("pulse");
959         cpufreq_interactive_boost();
960         return count;
961 }
962
963 static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
964                 *tunables, char *buf)
965 {
966         return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
967 }
968
969 static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
970                 *tunables, const char *buf, size_t count)
971 {
972         int ret;
973         unsigned long val;
974
975         ret = kstrtoul(buf, 0, &val);
976         if (ret < 0)
977                 return ret;
978
979         tunables->boostpulse_duration_val = val;
980         return count;
981 }
982
983 static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
984                 char *buf)
985 {
986         return sprintf(buf, "%u\n", tunables->io_is_busy);
987 }
988
989 static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
990                 const char *buf, size_t count)
991 {
992         int ret;
993         unsigned long val;
994
995         ret = kstrtoul(buf, 0, &val);
996         if (ret < 0)
997                 return ret;
998         tunables->io_is_busy = val;
999         return count;
1000 }
1001
1002 /*
1003  * Create show/store routines
1004  * - sys: One governor instance for complete SYSTEM
1005  * - pol: One governor instance per struct cpufreq_policy
1006  */
1007 #define show_gov_pol_sys(file_name)                                     \
1008 static ssize_t show_##file_name##_gov_sys                               \
1009 (struct kobject *kobj, struct attribute *attr, char *buf)               \
1010 {                                                                       \
1011         return show_##file_name(common_tunables, buf);                  \
1012 }                                                                       \
1013                                                                         \
1014 static ssize_t show_##file_name##_gov_pol                               \
1015 (struct cpufreq_policy *policy, char *buf)                              \
1016 {                                                                       \
1017         return show_##file_name(policy->governor_data, buf);            \
1018 }
1019
1020 #define store_gov_pol_sys(file_name)                                    \
1021 static ssize_t store_##file_name##_gov_sys                              \
1022 (struct kobject *kobj, struct attribute *attr, const char *buf,         \
1023         size_t count)                                                   \
1024 {                                                                       \
1025         return store_##file_name(common_tunables, buf, count);          \
1026 }                                                                       \
1027                                                                         \
1028 static ssize_t store_##file_name##_gov_pol                              \
1029 (struct cpufreq_policy *policy, const char *buf, size_t count)          \
1030 {                                                                       \
1031         return store_##file_name(policy->governor_data, buf, count);    \
1032 }
1033
1034 #define show_store_gov_pol_sys(file_name)                               \
1035 show_gov_pol_sys(file_name);                                            \
1036 store_gov_pol_sys(file_name)
1037
1038 show_store_gov_pol_sys(target_loads);
1039 show_store_gov_pol_sys(above_hispeed_delay);
1040 show_store_gov_pol_sys(hispeed_freq);
1041 show_store_gov_pol_sys(go_hispeed_load);
1042 show_store_gov_pol_sys(min_sample_time);
1043 show_store_gov_pol_sys(timer_rate);
1044 show_store_gov_pol_sys(timer_slack);
1045 show_store_gov_pol_sys(boost);
1046 store_gov_pol_sys(boostpulse);
1047 show_store_gov_pol_sys(boostpulse_duration);
1048 show_store_gov_pol_sys(io_is_busy);
1049
1050 #define gov_sys_attr_rw(_name)                                          \
1051 static struct global_attr _name##_gov_sys =                             \
1052 __ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1053
1054 #define gov_pol_attr_rw(_name)                                          \
1055 static struct freq_attr _name##_gov_pol =                               \
1056 __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1057
1058 #define gov_sys_pol_attr_rw(_name)                                      \
1059         gov_sys_attr_rw(_name);                                         \
1060         gov_pol_attr_rw(_name)
1061
1062 gov_sys_pol_attr_rw(target_loads);
1063 gov_sys_pol_attr_rw(above_hispeed_delay);
1064 gov_sys_pol_attr_rw(hispeed_freq);
1065 gov_sys_pol_attr_rw(go_hispeed_load);
1066 gov_sys_pol_attr_rw(min_sample_time);
1067 gov_sys_pol_attr_rw(timer_rate);
1068 gov_sys_pol_attr_rw(timer_slack);
1069 gov_sys_pol_attr_rw(boost);
1070 gov_sys_pol_attr_rw(boostpulse_duration);
1071 gov_sys_pol_attr_rw(io_is_busy);
1072
1073 static struct global_attr boostpulse_gov_sys =
1074         __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1075
1076 static struct freq_attr boostpulse_gov_pol =
1077         __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1078
1079 /* One Governor instance for entire system */
1080 static struct attribute *interactive_attributes_gov_sys[] = {
1081         &target_loads_gov_sys.attr,
1082         &above_hispeed_delay_gov_sys.attr,
1083         &hispeed_freq_gov_sys.attr,
1084         &go_hispeed_load_gov_sys.attr,
1085         &min_sample_time_gov_sys.attr,
1086         &timer_rate_gov_sys.attr,
1087         &timer_slack_gov_sys.attr,
1088         &boost_gov_sys.attr,
1089         &boostpulse_gov_sys.attr,
1090         &boostpulse_duration_gov_sys.attr,
1091         &io_is_busy_gov_sys.attr,
1092         NULL,
1093 };
1094
1095 static struct attribute_group interactive_attr_group_gov_sys = {
1096         .attrs = interactive_attributes_gov_sys,
1097         .name = "interactive",
1098 };
1099
1100 /* Per policy governor instance */
1101 static struct attribute *interactive_attributes_gov_pol[] = {
1102         &target_loads_gov_pol.attr,
1103         &above_hispeed_delay_gov_pol.attr,
1104         &hispeed_freq_gov_pol.attr,
1105         &go_hispeed_load_gov_pol.attr,
1106         &min_sample_time_gov_pol.attr,
1107         &timer_rate_gov_pol.attr,
1108         &timer_slack_gov_pol.attr,
1109         &boost_gov_pol.attr,
1110         &boostpulse_gov_pol.attr,
1111         &boostpulse_duration_gov_pol.attr,
1112         &io_is_busy_gov_pol.attr,
1113         NULL,
1114 };
1115
1116 static struct attribute_group interactive_attr_group_gov_pol = {
1117         .attrs = interactive_attributes_gov_pol,
1118         .name = "interactive",
1119 };
1120
1121 static struct attribute_group *get_sysfs_attr(void)
1122 {
1123         if (have_governor_per_policy())
1124                 return &interactive_attr_group_gov_pol;
1125         else
1126                 return &interactive_attr_group_gov_sys;
1127 }
1128
1129 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1130                                              unsigned long val,
1131                                              void *data)
1132 {
1133         switch (val) {
1134         case IDLE_START:
1135                 cpufreq_interactive_idle_start();
1136                 break;
1137         case IDLE_END:
1138                 cpufreq_interactive_idle_end();
1139                 break;
1140         }
1141
1142         return 0;
1143 }
1144
1145 static struct notifier_block cpufreq_interactive_idle_nb = {
1146         .notifier_call = cpufreq_interactive_idle_notifier,
1147 };
1148
1149 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1150                 unsigned int event)
1151 {
1152         int rc;
1153         unsigned int j;
1154         struct cpufreq_interactive_cpuinfo *pcpu;
1155         struct cpufreq_frequency_table *freq_table;
1156         struct cpufreq_interactive_tunables *tunables;
1157
1158         if (have_governor_per_policy())
1159                 tunables = policy->governor_data;
1160         else
1161                 tunables = common_tunables;
1162
1163         WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
1164
1165         switch (event) {
1166         case CPUFREQ_GOV_POLICY_INIT:
1167                 if (have_governor_per_policy()) {
1168                         WARN_ON(tunables);
1169                 } else if (tunables) {
1170                         tunables->usage_count++;
1171                         policy->governor_data = tunables;
1172                         return 0;
1173                 }
1174
1175                 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1176                 if (!tunables) {
1177                         pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
1178                         return -ENOMEM;
1179                 }
1180
1181                 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1182                                 get_sysfs_attr());
1183                 if (rc) {
1184                         kfree(tunables);
1185                         return rc;
1186                 }
1187
1188                 tunables->usage_count = 1;
1189                 tunables->above_hispeed_delay = default_above_hispeed_delay;
1190                 tunables->nabove_hispeed_delay =
1191                         ARRAY_SIZE(default_above_hispeed_delay);
1192                 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1193                 tunables->target_loads = default_target_loads;
1194                 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1195                 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1196                 tunables->timer_rate = DEFAULT_TIMER_RATE;
1197                 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1198                 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1199
1200                 spin_lock_init(&tunables->target_loads_lock);
1201                 spin_lock_init(&tunables->above_hispeed_delay_lock);
1202
1203                 if (!policy->governor->initialized) {
1204                         idle_notifier_register(&cpufreq_interactive_idle_nb);
1205                         cpufreq_register_notifier(&cpufreq_notifier_block,
1206                                         CPUFREQ_TRANSITION_NOTIFIER);
1207                 }
1208
1209                 policy->governor_data = tunables;
1210                 if (!have_governor_per_policy())
1211                         common_tunables = tunables;
1212
1213                 break;
1214
1215         case CPUFREQ_GOV_POLICY_EXIT:
1216                 if (!--tunables->usage_count) {
1217                         if (policy->governor->initialized == 1) {
1218                                 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1219                                                 CPUFREQ_TRANSITION_NOTIFIER);
1220                                 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1221                         }
1222
1223                         sysfs_remove_group(get_governor_parent_kobj(policy),
1224                                         get_sysfs_attr());
1225                         kfree(tunables);
1226                         common_tunables = NULL;
1227                 }
1228
1229                 policy->governor_data = NULL;
1230                 break;
1231
1232         case CPUFREQ_GOV_START:
1233                 mutex_lock(&gov_lock);
1234
1235                 freq_table = cpufreq_frequency_get_table(policy->cpu);
1236                 if (!tunables->hispeed_freq)
1237                         tunables->hispeed_freq = policy->max;
1238
1239                 for_each_cpu(j, policy->cpus) {
1240                         pcpu = &per_cpu(cpuinfo, j);
1241                         pcpu->policy = policy;
1242                         pcpu->target_freq = policy->cur;
1243                         pcpu->freq_table = freq_table;
1244                         pcpu->floor_freq = pcpu->target_freq;
1245                         pcpu->floor_validate_time =
1246                                 ktime_to_us(ktime_get());
1247                         pcpu->hispeed_validate_time =
1248                                 pcpu->floor_validate_time;
1249                         down_write(&pcpu->enable_sem);
1250                         del_timer_sync(&pcpu->cpu_timer);
1251                         del_timer_sync(&pcpu->cpu_slack_timer);
1252                         cpufreq_interactive_timer_start(tunables, j);
1253                         pcpu->governor_enabled = 1;
1254                         up_write(&pcpu->enable_sem);
1255                 }
1256
1257                 mutex_unlock(&gov_lock);
1258                 break;
1259
1260         case CPUFREQ_GOV_STOP:
1261                 mutex_lock(&gov_lock);
1262                 for_each_cpu(j, policy->cpus) {
1263                         pcpu = &per_cpu(cpuinfo, j);
1264                         down_write(&pcpu->enable_sem);
1265                         pcpu->governor_enabled = 0;
1266                         del_timer_sync(&pcpu->cpu_timer);
1267                         del_timer_sync(&pcpu->cpu_slack_timer);
1268                         up_write(&pcpu->enable_sem);
1269                 }
1270
1271                 mutex_unlock(&gov_lock);
1272                 break;
1273
1274         case CPUFREQ_GOV_LIMITS:
1275                 if (policy->max < policy->cur)
1276                         __cpufreq_driver_target(policy,
1277                                         policy->max, CPUFREQ_RELATION_H);
1278                 else if (policy->min > policy->cur)
1279                         __cpufreq_driver_target(policy,
1280                                         policy->min, CPUFREQ_RELATION_L);
1281                 for_each_cpu(j, policy->cpus) {
1282                         pcpu = &per_cpu(cpuinfo, j);
1283
1284                         /* hold write semaphore to avoid race */
1285                         down_write(&pcpu->enable_sem);
1286                         if (pcpu->governor_enabled == 0) {
1287                                 up_write(&pcpu->enable_sem);
1288                                 continue;
1289                         }
1290
1291                         /* update target_freq firstly */
1292                         if (policy->max < pcpu->target_freq)
1293                                 pcpu->target_freq = policy->max;
1294                         else if (policy->min > pcpu->target_freq)
1295                                 pcpu->target_freq = policy->min;
1296
1297                         /* Reschedule timer.
1298                          * Delete the timers, else the timer callback may
1299                          * return without re-arm the timer when failed
1300                          * acquire the semaphore. This race may cause timer
1301                          * stopped unexpectedly.
1302                          */
1303                         del_timer_sync(&pcpu->cpu_timer);
1304                         del_timer_sync(&pcpu->cpu_slack_timer);
1305                         cpufreq_interactive_timer_start(tunables, j);
1306                         up_write(&pcpu->enable_sem);
1307                 }
1308                 break;
1309         }
1310         return 0;
1311 }
1312
1313 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1314 static
1315 #endif
1316 struct cpufreq_governor cpufreq_gov_interactive = {
1317         .name = "interactive",
1318         .governor = cpufreq_governor_interactive,
1319         .max_transition_latency = 10000000,
1320         .owner = THIS_MODULE,
1321 };
1322
1323 static void cpufreq_interactive_nop_timer(unsigned long data)
1324 {
1325 }
1326
1327 static int __init cpufreq_interactive_init(void)
1328 {
1329         unsigned int i;
1330         struct cpufreq_interactive_cpuinfo *pcpu;
1331         struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1332
1333         /* Initalize per-cpu timers */
1334         for_each_possible_cpu(i) {
1335                 pcpu = &per_cpu(cpuinfo, i);
1336                 init_timer_deferrable(&pcpu->cpu_timer);
1337                 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1338                 pcpu->cpu_timer.data = i;
1339                 init_timer(&pcpu->cpu_slack_timer);
1340                 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
1341                 spin_lock_init(&pcpu->load_lock);
1342                 init_rwsem(&pcpu->enable_sem);
1343         }
1344
1345         spin_lock_init(&speedchange_cpumask_lock);
1346         mutex_init(&gov_lock);
1347         speedchange_task =
1348                 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1349                                "cfinteractive");
1350         if (IS_ERR(speedchange_task))
1351                 return PTR_ERR(speedchange_task);
1352
1353         sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1354         get_task_struct(speedchange_task);
1355
1356         /* NB: wake up so the thread does not look hung to the freezer */
1357         wake_up_process(speedchange_task);
1358
1359         return cpufreq_register_governor(&cpufreq_gov_interactive);
1360 }
1361
1362 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1363 fs_initcall(cpufreq_interactive_init);
1364 #else
1365 module_init(cpufreq_interactive_init);
1366 #endif
1367
1368 static void __exit cpufreq_interactive_exit(void)
1369 {
1370         cpufreq_unregister_governor(&cpufreq_gov_interactive);
1371         kthread_stop(speedchange_task);
1372         put_task_struct(speedchange_task);
1373 }
1374
1375 module_exit(cpufreq_interactive_exit);
1376
1377 MODULE_AUTHOR("Mike Chan <mike@android.com>");
1378 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1379         "Latency sensitive workloads");
1380 MODULE_LICENSE("GPL");