cpufreq: interactive: make common_tunables static
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_interactive.c
1 /*
2  * drivers/cpufreq/cpufreq_interactive.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * Author: Mike Chan (mike@android.com)
16  *
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/rwsem.h>
25 #include <linux/sched.h>
26 #include <linux/sched/rt.h>
27 #include <linux/tick.h>
28 #include <linux/time.h>
29 #include <linux/timer.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/slab.h>
33
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/cpufreq_interactive.h>
36
37 struct cpufreq_interactive_cpuinfo {
38         struct timer_list cpu_timer;
39         struct timer_list cpu_slack_timer;
40         spinlock_t load_lock; /* protects the next 4 fields */
41         u64 time_in_idle;
42         u64 time_in_idle_timestamp;
43         u64 cputime_speedadj;
44         u64 cputime_speedadj_timestamp;
45         struct cpufreq_policy *policy;
46         struct cpufreq_frequency_table *freq_table;
47         spinlock_t target_freq_lock; /*protects target freq */
48         unsigned int target_freq;
49         unsigned int floor_freq;
50         unsigned int max_freq;
51         u64 floor_validate_time;
52         u64 hispeed_validate_time;
53         struct rw_semaphore enable_sem;
54         int governor_enabled;
55 };
56
57 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
58
59 /* realtime thread handles frequency scaling */
60 static struct task_struct *speedchange_task;
61 static cpumask_t speedchange_cpumask;
62 static spinlock_t speedchange_cpumask_lock;
63 static struct mutex gov_lock;
64
65 /* Target load.  Lower values result in higher CPU speeds. */
66 #define DEFAULT_TARGET_LOAD 90
67 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
68
69 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
70 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
71 static unsigned int default_above_hispeed_delay[] = {
72         DEFAULT_ABOVE_HISPEED_DELAY };
73
74 struct cpufreq_interactive_tunables {
75         int usage_count;
76         /* Hi speed to bump to from lo speed when load burst (default max) */
77         unsigned int hispeed_freq;
78         /* Go to hi speed when CPU load at or above this value. */
79 #define DEFAULT_GO_HISPEED_LOAD 99
80         unsigned long go_hispeed_load;
81         /* Target load. Lower values result in higher CPU speeds. */
82         spinlock_t target_loads_lock;
83         unsigned int *target_loads;
84         int ntarget_loads;
85         /*
86          * The minimum amount of time to spend at a frequency before we can ramp
87          * down.
88          */
89 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
90         unsigned long min_sample_time;
91         /*
92          * The sample rate of the timer used to increase frequency
93          */
94         unsigned long timer_rate;
95         /*
96          * Wait this long before raising speed above hispeed, by default a
97          * single timer interval.
98          */
99         spinlock_t above_hispeed_delay_lock;
100         unsigned int *above_hispeed_delay;
101         int nabove_hispeed_delay;
102         /* Non-zero means indefinite speed boost active */
103         int boost_val;
104         /* Duration of a boot pulse in usecs */
105         int boostpulse_duration_val;
106         /* End time of boost pulse in ktime converted to usecs */
107         u64 boostpulse_endtime;
108         /*
109          * Max additional time to wait in idle, beyond timer_rate, at speeds
110          * above minimum before wakeup to reduce speed, or -1 if unnecessary.
111          */
112 #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
113         int timer_slack_val;
114         bool io_is_busy;
115 };
116
117 /* For cases where we have single governor instance for system */
118 static struct cpufreq_interactive_tunables *common_tunables;
119
120 static struct attribute_group *get_sysfs_attr(void);
121
122 static void cpufreq_interactive_timer_resched(
123         struct cpufreq_interactive_cpuinfo *pcpu)
124 {
125         struct cpufreq_interactive_tunables *tunables =
126                 pcpu->policy->governor_data;
127         unsigned long expires;
128         unsigned long flags;
129
130         spin_lock_irqsave(&pcpu->load_lock, flags);
131         pcpu->time_in_idle =
132                 get_cpu_idle_time(smp_processor_id(),
133                                   &pcpu->time_in_idle_timestamp,
134                                   tunables->io_is_busy);
135         pcpu->cputime_speedadj = 0;
136         pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
137         expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
138         mod_timer_pinned(&pcpu->cpu_timer, expires);
139
140         if (tunables->timer_slack_val >= 0 &&
141             pcpu->target_freq > pcpu->policy->min) {
142                 expires += usecs_to_jiffies(tunables->timer_slack_val);
143                 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
144         }
145
146         spin_unlock_irqrestore(&pcpu->load_lock, flags);
147 }
148
149 /* The caller shall take enable_sem write semaphore to avoid any timer race.
150  * The cpu_timer and cpu_slack_timer must be deactivated when calling this
151  * function.
152  */
153 static void cpufreq_interactive_timer_start(
154         struct cpufreq_interactive_tunables *tunables, int cpu)
155 {
156         struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
157         unsigned long expires = jiffies +
158                 usecs_to_jiffies(tunables->timer_rate);
159         unsigned long flags;
160
161         pcpu->cpu_timer.expires = expires;
162         add_timer_on(&pcpu->cpu_timer, cpu);
163         if (tunables->timer_slack_val >= 0 &&
164             pcpu->target_freq > pcpu->policy->min) {
165                 expires += usecs_to_jiffies(tunables->timer_slack_val);
166                 pcpu->cpu_slack_timer.expires = expires;
167                 add_timer_on(&pcpu->cpu_slack_timer, cpu);
168         }
169
170         spin_lock_irqsave(&pcpu->load_lock, flags);
171         pcpu->time_in_idle =
172                 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
173                                   tunables->io_is_busy);
174         pcpu->cputime_speedadj = 0;
175         pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
176         spin_unlock_irqrestore(&pcpu->load_lock, flags);
177 }
178
179 static unsigned int freq_to_above_hispeed_delay(
180         struct cpufreq_interactive_tunables *tunables,
181         unsigned int freq)
182 {
183         int i;
184         unsigned int ret;
185         unsigned long flags;
186
187         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
188
189         for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
190                         freq >= tunables->above_hispeed_delay[i+1]; i += 2)
191                 ;
192
193         ret = tunables->above_hispeed_delay[i];
194         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
195         return ret;
196 }
197
198 static unsigned int freq_to_targetload(
199         struct cpufreq_interactive_tunables *tunables, unsigned int freq)
200 {
201         int i;
202         unsigned int ret;
203         unsigned long flags;
204
205         spin_lock_irqsave(&tunables->target_loads_lock, flags);
206
207         for (i = 0; i < tunables->ntarget_loads - 1 &&
208                     freq >= tunables->target_loads[i+1]; i += 2)
209                 ;
210
211         ret = tunables->target_loads[i];
212         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
213         return ret;
214 }
215
216 /*
217  * If increasing frequencies never map to a lower target load then
218  * choose_freq() will find the minimum frequency that does not exceed its
219  * target load given the current load.
220  */
221 static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
222                 unsigned int loadadjfreq)
223 {
224         unsigned int freq = pcpu->policy->cur;
225         unsigned int prevfreq, freqmin, freqmax;
226         unsigned int tl;
227         int index;
228
229         freqmin = 0;
230         freqmax = UINT_MAX;
231
232         do {
233                 prevfreq = freq;
234                 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
235
236                 /*
237                  * Find the lowest frequency where the computed load is less
238                  * than or equal to the target load.
239                  */
240
241                 if (cpufreq_frequency_table_target(
242                             pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
243                             CPUFREQ_RELATION_L, &index))
244                         break;
245                 freq = pcpu->freq_table[index].frequency;
246
247                 if (freq > prevfreq) {
248                         /* The previous frequency is too low. */
249                         freqmin = prevfreq;
250
251                         if (freq >= freqmax) {
252                                 /*
253                                  * Find the highest frequency that is less
254                                  * than freqmax.
255                                  */
256                                 if (cpufreq_frequency_table_target(
257                                             pcpu->policy, pcpu->freq_table,
258                                             freqmax - 1, CPUFREQ_RELATION_H,
259                                             &index))
260                                         break;
261                                 freq = pcpu->freq_table[index].frequency;
262
263                                 if (freq == freqmin) {
264                                         /*
265                                          * The first frequency below freqmax
266                                          * has already been found to be too
267                                          * low.  freqmax is the lowest speed
268                                          * we found that is fast enough.
269                                          */
270                                         freq = freqmax;
271                                         break;
272                                 }
273                         }
274                 } else if (freq < prevfreq) {
275                         /* The previous frequency is high enough. */
276                         freqmax = prevfreq;
277
278                         if (freq <= freqmin) {
279                                 /*
280                                  * Find the lowest frequency that is higher
281                                  * than freqmin.
282                                  */
283                                 if (cpufreq_frequency_table_target(
284                                             pcpu->policy, pcpu->freq_table,
285                                             freqmin + 1, CPUFREQ_RELATION_L,
286                                             &index))
287                                         break;
288                                 freq = pcpu->freq_table[index].frequency;
289
290                                 /*
291                                  * If freqmax is the first frequency above
292                                  * freqmin then we have already found that
293                                  * this speed is fast enough.
294                                  */
295                                 if (freq == freqmax)
296                                         break;
297                         }
298                 }
299
300                 /* If same frequency chosen as previous then done. */
301         } while (freq != prevfreq);
302
303         return freq;
304 }
305
306 static u64 update_load(int cpu)
307 {
308         struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
309         struct cpufreq_interactive_tunables *tunables =
310                 pcpu->policy->governor_data;
311         u64 now;
312         u64 now_idle;
313         unsigned int delta_idle;
314         unsigned int delta_time;
315         u64 active_time;
316
317         now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
318         delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
319         delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
320
321         if (delta_time <= delta_idle)
322                 active_time = 0;
323         else
324                 active_time = delta_time - delta_idle;
325
326         pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
327
328         pcpu->time_in_idle = now_idle;
329         pcpu->time_in_idle_timestamp = now;
330         return now;
331 }
332
333 static void cpufreq_interactive_timer(unsigned long data)
334 {
335         u64 now;
336         unsigned int delta_time;
337         u64 cputime_speedadj;
338         int cpu_load;
339         struct cpufreq_interactive_cpuinfo *pcpu =
340                 &per_cpu(cpuinfo, data);
341         struct cpufreq_interactive_tunables *tunables =
342                 pcpu->policy->governor_data;
343         unsigned int new_freq;
344         unsigned int loadadjfreq;
345         unsigned int index;
346         unsigned long flags;
347         bool boosted;
348
349         if (!down_read_trylock(&pcpu->enable_sem))
350                 return;
351         if (!pcpu->governor_enabled)
352                 goto exit;
353
354         spin_lock_irqsave(&pcpu->load_lock, flags);
355         now = update_load(data);
356         delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
357         cputime_speedadj = pcpu->cputime_speedadj;
358         spin_unlock_irqrestore(&pcpu->load_lock, flags);
359
360         if (WARN_ON_ONCE(!delta_time))
361                 goto rearm;
362
363         spin_lock_irqsave(&pcpu->target_freq_lock, flags);
364         do_div(cputime_speedadj, delta_time);
365         loadadjfreq = (unsigned int)cputime_speedadj * 100;
366         cpu_load = loadadjfreq / pcpu->target_freq;
367         boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
368
369         if (cpu_load >= tunables->go_hispeed_load || boosted) {
370                 if (pcpu->target_freq < tunables->hispeed_freq) {
371                         new_freq = tunables->hispeed_freq;
372                 } else {
373                         new_freq = choose_freq(pcpu, loadadjfreq);
374
375                         if (new_freq < tunables->hispeed_freq)
376                                 new_freq = tunables->hispeed_freq;
377                 }
378         } else {
379                 new_freq = choose_freq(pcpu, loadadjfreq);
380                 if (new_freq > tunables->hispeed_freq &&
381                                 pcpu->target_freq < tunables->hispeed_freq)
382                         new_freq = tunables->hispeed_freq;
383         }
384
385         if (pcpu->target_freq >= tunables->hispeed_freq &&
386             new_freq > pcpu->target_freq &&
387             now - pcpu->hispeed_validate_time <
388             freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
389                 trace_cpufreq_interactive_notyet(
390                         data, cpu_load, pcpu->target_freq,
391                         pcpu->policy->cur, new_freq);
392                 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
393                 goto rearm;
394         }
395
396         pcpu->hispeed_validate_time = now;
397
398         if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
399                                            new_freq, CPUFREQ_RELATION_L,
400                                            &index)) {
401                 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
402                 goto rearm;
403         }
404
405         new_freq = pcpu->freq_table[index].frequency;
406
407         /*
408          * Do not scale below floor_freq unless we have been at or above the
409          * floor frequency for the minimum sample time since last validated.
410          */
411         if (new_freq < pcpu->floor_freq) {
412                 if (now - pcpu->floor_validate_time <
413                                 tunables->min_sample_time) {
414                         trace_cpufreq_interactive_notyet(
415                                 data, cpu_load, pcpu->target_freq,
416                                 pcpu->policy->cur, new_freq);
417                         spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
418                         goto rearm;
419                 }
420         }
421
422         /*
423          * Update the timestamp for checking whether speed has been held at
424          * or above the selected frequency for a minimum of min_sample_time,
425          * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
426          * allow the speed to drop as soon as the boostpulse duration expires
427          * (or the indefinite boost is turned off).
428          */
429
430         if (!boosted || new_freq > tunables->hispeed_freq) {
431                 pcpu->floor_freq = new_freq;
432                 pcpu->floor_validate_time = now;
433         }
434
435         if (pcpu->target_freq == new_freq) {
436                 trace_cpufreq_interactive_already(
437                         data, cpu_load, pcpu->target_freq,
438                         pcpu->policy->cur, new_freq);
439                 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
440                 goto rearm_if_notmax;
441         }
442
443         trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
444                                          pcpu->policy->cur, new_freq);
445
446         pcpu->target_freq = new_freq;
447         spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
448         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
449         cpumask_set_cpu(data, &speedchange_cpumask);
450         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
451         wake_up_process(speedchange_task);
452
453 rearm_if_notmax:
454         /*
455          * Already set max speed and don't see a need to change that,
456          * wait until next idle to re-evaluate, don't need timer.
457          */
458         if (pcpu->target_freq == pcpu->policy->max)
459                 goto exit;
460
461 rearm:
462         if (!timer_pending(&pcpu->cpu_timer))
463                 cpufreq_interactive_timer_resched(pcpu);
464
465 exit:
466         up_read(&pcpu->enable_sem);
467         return;
468 }
469
470 static void cpufreq_interactive_idle_start(void)
471 {
472         struct cpufreq_interactive_cpuinfo *pcpu =
473                 &per_cpu(cpuinfo, smp_processor_id());
474         int pending;
475
476         if (!down_read_trylock(&pcpu->enable_sem))
477                 return;
478         if (!pcpu->governor_enabled) {
479                 up_read(&pcpu->enable_sem);
480                 return;
481         }
482
483         pending = timer_pending(&pcpu->cpu_timer);
484
485         if (pcpu->target_freq != pcpu->policy->min) {
486                 /*
487                  * Entering idle while not at lowest speed.  On some
488                  * platforms this can hold the other CPU(s) at that speed
489                  * even though the CPU is idle. Set a timer to re-evaluate
490                  * speed so this idle CPU doesn't hold the other CPUs above
491                  * min indefinitely.  This should probably be a quirk of
492                  * the CPUFreq driver.
493                  */
494                 if (!pending)
495                         cpufreq_interactive_timer_resched(pcpu);
496         }
497
498         up_read(&pcpu->enable_sem);
499 }
500
501 static void cpufreq_interactive_idle_end(void)
502 {
503         struct cpufreq_interactive_cpuinfo *pcpu =
504                 &per_cpu(cpuinfo, smp_processor_id());
505
506         if (!down_read_trylock(&pcpu->enable_sem))
507                 return;
508         if (!pcpu->governor_enabled) {
509                 up_read(&pcpu->enable_sem);
510                 return;
511         }
512
513         /* Arm the timer for 1-2 ticks later if not already. */
514         if (!timer_pending(&pcpu->cpu_timer)) {
515                 cpufreq_interactive_timer_resched(pcpu);
516         } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
517                 del_timer(&pcpu->cpu_timer);
518                 del_timer(&pcpu->cpu_slack_timer);
519                 cpufreq_interactive_timer(smp_processor_id());
520         }
521
522         up_read(&pcpu->enable_sem);
523 }
524
525 static int cpufreq_interactive_speedchange_task(void *data)
526 {
527         unsigned int cpu;
528         cpumask_t tmp_mask;
529         unsigned long flags;
530         struct cpufreq_interactive_cpuinfo *pcpu;
531
532         while (1) {
533                 set_current_state(TASK_INTERRUPTIBLE);
534                 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
535
536                 if (cpumask_empty(&speedchange_cpumask)) {
537                         spin_unlock_irqrestore(&speedchange_cpumask_lock,
538                                                flags);
539                         schedule();
540
541                         if (kthread_should_stop())
542                                 break;
543
544                         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
545                 }
546
547                 set_current_state(TASK_RUNNING);
548                 tmp_mask = speedchange_cpumask;
549                 cpumask_clear(&speedchange_cpumask);
550                 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
551
552                 for_each_cpu(cpu, &tmp_mask) {
553                         unsigned int j;
554                         unsigned int max_freq = 0;
555
556                         pcpu = &per_cpu(cpuinfo, cpu);
557                         if (!down_read_trylock(&pcpu->enable_sem))
558                                 continue;
559                         if (!pcpu->governor_enabled) {
560                                 up_read(&pcpu->enable_sem);
561                                 continue;
562                         }
563
564                         for_each_cpu(j, pcpu->policy->cpus) {
565                                 struct cpufreq_interactive_cpuinfo *pjcpu =
566                                         &per_cpu(cpuinfo, j);
567
568                                 if (pjcpu->target_freq > max_freq)
569                                         max_freq = pjcpu->target_freq;
570                         }
571
572                         if (max_freq != pcpu->policy->cur)
573                                 __cpufreq_driver_target(pcpu->policy,
574                                                         max_freq,
575                                                         CPUFREQ_RELATION_H);
576                         trace_cpufreq_interactive_setspeed(cpu,
577                                                      pcpu->target_freq,
578                                                      pcpu->policy->cur);
579
580                         up_read(&pcpu->enable_sem);
581                 }
582         }
583
584         return 0;
585 }
586
587 static void cpufreq_interactive_boost(void)
588 {
589         int i;
590         int anyboost = 0;
591         unsigned long flags[2];
592         struct cpufreq_interactive_cpuinfo *pcpu;
593         struct cpufreq_interactive_tunables *tunables;
594
595         spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
596
597         for_each_online_cpu(i) {
598                 pcpu = &per_cpu(cpuinfo, i);
599                 tunables = pcpu->policy->governor_data;
600
601                 spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
602                 if (pcpu->target_freq < tunables->hispeed_freq) {
603                         pcpu->target_freq = tunables->hispeed_freq;
604                         cpumask_set_cpu(i, &speedchange_cpumask);
605                         pcpu->hispeed_validate_time =
606                                 ktime_to_us(ktime_get());
607                         anyboost = 1;
608                 }
609
610                 /*
611                  * Set floor freq and (re)start timer for when last
612                  * validated.
613                  */
614
615                 pcpu->floor_freq = tunables->hispeed_freq;
616                 pcpu->floor_validate_time = ktime_to_us(ktime_get());
617                 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
618         }
619
620         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
621
622         if (anyboost)
623                 wake_up_process(speedchange_task);
624 }
625
626 static int cpufreq_interactive_notifier(
627         struct notifier_block *nb, unsigned long val, void *data)
628 {
629         struct cpufreq_freqs *freq = data;
630         struct cpufreq_interactive_cpuinfo *pcpu;
631         int cpu;
632         unsigned long flags;
633
634         if (val == CPUFREQ_POSTCHANGE) {
635                 pcpu = &per_cpu(cpuinfo, freq->cpu);
636                 if (!down_read_trylock(&pcpu->enable_sem))
637                         return 0;
638                 if (!pcpu->governor_enabled) {
639                         up_read(&pcpu->enable_sem);
640                         return 0;
641                 }
642
643                 for_each_cpu(cpu, pcpu->policy->cpus) {
644                         struct cpufreq_interactive_cpuinfo *pjcpu =
645                                 &per_cpu(cpuinfo, cpu);
646                         if (cpu != freq->cpu) {
647                                 if (!down_read_trylock(&pjcpu->enable_sem))
648                                         continue;
649                                 if (!pjcpu->governor_enabled) {
650                                         up_read(&pjcpu->enable_sem);
651                                         continue;
652                                 }
653                         }
654                         spin_lock_irqsave(&pjcpu->load_lock, flags);
655                         update_load(cpu);
656                         spin_unlock_irqrestore(&pjcpu->load_lock, flags);
657                         if (cpu != freq->cpu)
658                                 up_read(&pjcpu->enable_sem);
659                 }
660
661                 up_read(&pcpu->enable_sem);
662         }
663         return 0;
664 }
665
666 static struct notifier_block cpufreq_notifier_block = {
667         .notifier_call = cpufreq_interactive_notifier,
668 };
669
670 static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
671 {
672         const char *cp;
673         int i;
674         int ntokens = 1;
675         unsigned int *tokenized_data;
676         int err = -EINVAL;
677
678         cp = buf;
679         while ((cp = strpbrk(cp + 1, " :")))
680                 ntokens++;
681
682         if (!(ntokens & 0x1))
683                 goto err;
684
685         tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
686         if (!tokenized_data) {
687                 err = -ENOMEM;
688                 goto err;
689         }
690
691         cp = buf;
692         i = 0;
693         while (i < ntokens) {
694                 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
695                         goto err_kfree;
696
697                 cp = strpbrk(cp, " :");
698                 if (!cp)
699                         break;
700                 cp++;
701         }
702
703         if (i != ntokens)
704                 goto err_kfree;
705
706         *num_tokens = ntokens;
707         return tokenized_data;
708
709 err_kfree:
710         kfree(tokenized_data);
711 err:
712         return ERR_PTR(err);
713 }
714
715 static ssize_t show_target_loads(
716         struct cpufreq_interactive_tunables *tunables,
717         char *buf)
718 {
719         int i;
720         ssize_t ret = 0;
721         unsigned long flags;
722
723         spin_lock_irqsave(&tunables->target_loads_lock, flags);
724
725         for (i = 0; i < tunables->ntarget_loads; i++)
726                 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
727                                i & 0x1 ? ":" : " ");
728
729         sprintf(buf + ret - 1, "\n");
730         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
731         return ret;
732 }
733
734 static ssize_t store_target_loads(
735         struct cpufreq_interactive_tunables *tunables,
736         const char *buf, size_t count)
737 {
738         int ntokens;
739         unsigned int *new_target_loads = NULL;
740         unsigned long flags;
741
742         new_target_loads = get_tokenized_data(buf, &ntokens);
743         if (IS_ERR(new_target_loads))
744                 return PTR_RET(new_target_loads);
745
746         spin_lock_irqsave(&tunables->target_loads_lock, flags);
747         if (tunables->target_loads != default_target_loads)
748                 kfree(tunables->target_loads);
749         tunables->target_loads = new_target_loads;
750         tunables->ntarget_loads = ntokens;
751         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
752         return count;
753 }
754
755 static ssize_t show_above_hispeed_delay(
756         struct cpufreq_interactive_tunables *tunables, char *buf)
757 {
758         int i;
759         ssize_t ret = 0;
760         unsigned long flags;
761
762         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
763
764         for (i = 0; i < tunables->nabove_hispeed_delay; i++)
765                 ret += sprintf(buf + ret, "%u%s",
766                                tunables->above_hispeed_delay[i],
767                                i & 0x1 ? ":" : " ");
768
769         sprintf(buf + ret - 1, "\n");
770         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
771         return ret;
772 }
773
774 static ssize_t store_above_hispeed_delay(
775         struct cpufreq_interactive_tunables *tunables,
776         const char *buf, size_t count)
777 {
778         int ntokens;
779         unsigned int *new_above_hispeed_delay = NULL;
780         unsigned long flags;
781
782         new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
783         if (IS_ERR(new_above_hispeed_delay))
784                 return PTR_RET(new_above_hispeed_delay);
785
786         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
787         if (tunables->above_hispeed_delay != default_above_hispeed_delay)
788                 kfree(tunables->above_hispeed_delay);
789         tunables->above_hispeed_delay = new_above_hispeed_delay;
790         tunables->nabove_hispeed_delay = ntokens;
791         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
792         return count;
793
794 }
795
796 static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
797                 char *buf)
798 {
799         return sprintf(buf, "%u\n", tunables->hispeed_freq);
800 }
801
802 static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
803                 const char *buf, size_t count)
804 {
805         int ret;
806         long unsigned int val;
807
808         ret = strict_strtoul(buf, 0, &val);
809         if (ret < 0)
810                 return ret;
811         tunables->hispeed_freq = val;
812         return count;
813 }
814
815 static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
816                 *tunables, char *buf)
817 {
818         return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
819 }
820
821 static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
822                 *tunables, const char *buf, size_t count)
823 {
824         int ret;
825         unsigned long val;
826
827         ret = strict_strtoul(buf, 0, &val);
828         if (ret < 0)
829                 return ret;
830         tunables->go_hispeed_load = val;
831         return count;
832 }
833
834 static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
835                 *tunables, char *buf)
836 {
837         return sprintf(buf, "%lu\n", tunables->min_sample_time);
838 }
839
840 static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
841                 *tunables, const char *buf, size_t count)
842 {
843         int ret;
844         unsigned long val;
845
846         ret = strict_strtoul(buf, 0, &val);
847         if (ret < 0)
848                 return ret;
849         tunables->min_sample_time = val;
850         return count;
851 }
852
853 static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
854                 char *buf)
855 {
856         return sprintf(buf, "%lu\n", tunables->timer_rate);
857 }
858
859 static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
860                 const char *buf, size_t count)
861 {
862         int ret;
863         unsigned long val;
864
865         ret = strict_strtoul(buf, 0, &val);
866         if (ret < 0)
867                 return ret;
868         tunables->timer_rate = val;
869         return count;
870 }
871
872 static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
873                 char *buf)
874 {
875         return sprintf(buf, "%d\n", tunables->timer_slack_val);
876 }
877
878 static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
879                 const char *buf, size_t count)
880 {
881         int ret;
882         unsigned long val;
883
884         ret = kstrtol(buf, 10, &val);
885         if (ret < 0)
886                 return ret;
887
888         tunables->timer_slack_val = val;
889         return count;
890 }
891
892 static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
893                           char *buf)
894 {
895         return sprintf(buf, "%d\n", tunables->boost_val);
896 }
897
898 static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
899                            const char *buf, size_t count)
900 {
901         int ret;
902         unsigned long val;
903
904         ret = kstrtoul(buf, 0, &val);
905         if (ret < 0)
906                 return ret;
907
908         tunables->boost_val = val;
909
910         if (tunables->boost_val) {
911                 trace_cpufreq_interactive_boost("on");
912                 cpufreq_interactive_boost();
913         } else {
914                 tunables->boostpulse_endtime = ktime_to_us(ktime_get());
915                 trace_cpufreq_interactive_unboost("off");
916         }
917
918         return count;
919 }
920
921 static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
922                                 const char *buf, size_t count)
923 {
924         int ret;
925         unsigned long val;
926
927         ret = kstrtoul(buf, 0, &val);
928         if (ret < 0)
929                 return ret;
930
931         tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
932                 tunables->boostpulse_duration_val;
933         trace_cpufreq_interactive_boost("pulse");
934         cpufreq_interactive_boost();
935         return count;
936 }
937
938 static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
939                 *tunables, char *buf)
940 {
941         return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
942 }
943
944 static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
945                 *tunables, const char *buf, size_t count)
946 {
947         int ret;
948         unsigned long val;
949
950         ret = kstrtoul(buf, 0, &val);
951         if (ret < 0)
952                 return ret;
953
954         tunables->boostpulse_duration_val = val;
955         return count;
956 }
957
958 static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
959                 char *buf)
960 {
961         return sprintf(buf, "%u\n", tunables->io_is_busy);
962 }
963
964 static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
965                 const char *buf, size_t count)
966 {
967         int ret;
968         unsigned long val;
969
970         ret = kstrtoul(buf, 0, &val);
971         if (ret < 0)
972                 return ret;
973         tunables->io_is_busy = val;
974         return count;
975 }
976
977 /*
978  * Create show/store routines
979  * - sys: One governor instance for complete SYSTEM
980  * - pol: One governor instance per struct cpufreq_policy
981  */
982 #define show_gov_pol_sys(file_name)                                     \
983 static ssize_t show_##file_name##_gov_sys                               \
984 (struct kobject *kobj, struct attribute *attr, char *buf)               \
985 {                                                                       \
986         return show_##file_name(common_tunables, buf);                  \
987 }                                                                       \
988                                                                         \
989 static ssize_t show_##file_name##_gov_pol                               \
990 (struct cpufreq_policy *policy, char *buf)                              \
991 {                                                                       \
992         return show_##file_name(policy->governor_data, buf);            \
993 }
994
995 #define store_gov_pol_sys(file_name)                                    \
996 static ssize_t store_##file_name##_gov_sys                              \
997 (struct kobject *kobj, struct attribute *attr, const char *buf,         \
998         size_t count)                                                   \
999 {                                                                       \
1000         return store_##file_name(common_tunables, buf, count);          \
1001 }                                                                       \
1002                                                                         \
1003 static ssize_t store_##file_name##_gov_pol                              \
1004 (struct cpufreq_policy *policy, const char *buf, size_t count)          \
1005 {                                                                       \
1006         return store_##file_name(policy->governor_data, buf, count);    \
1007 }
1008
1009 #define show_store_gov_pol_sys(file_name)                               \
1010 show_gov_pol_sys(file_name);                                            \
1011 store_gov_pol_sys(file_name)
1012
1013 show_store_gov_pol_sys(target_loads);
1014 show_store_gov_pol_sys(above_hispeed_delay);
1015 show_store_gov_pol_sys(hispeed_freq);
1016 show_store_gov_pol_sys(go_hispeed_load);
1017 show_store_gov_pol_sys(min_sample_time);
1018 show_store_gov_pol_sys(timer_rate);
1019 show_store_gov_pol_sys(timer_slack);
1020 show_store_gov_pol_sys(boost);
1021 store_gov_pol_sys(boostpulse);
1022 show_store_gov_pol_sys(boostpulse_duration);
1023 show_store_gov_pol_sys(io_is_busy);
1024
1025 #define gov_sys_attr_rw(_name)                                          \
1026 static struct global_attr _name##_gov_sys =                             \
1027 __ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1028
1029 #define gov_pol_attr_rw(_name)                                          \
1030 static struct freq_attr _name##_gov_pol =                               \
1031 __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1032
1033 #define gov_sys_pol_attr_rw(_name)                                      \
1034         gov_sys_attr_rw(_name);                                         \
1035         gov_pol_attr_rw(_name)
1036
1037 gov_sys_pol_attr_rw(target_loads);
1038 gov_sys_pol_attr_rw(above_hispeed_delay);
1039 gov_sys_pol_attr_rw(hispeed_freq);
1040 gov_sys_pol_attr_rw(go_hispeed_load);
1041 gov_sys_pol_attr_rw(min_sample_time);
1042 gov_sys_pol_attr_rw(timer_rate);
1043 gov_sys_pol_attr_rw(timer_slack);
1044 gov_sys_pol_attr_rw(boost);
1045 gov_sys_pol_attr_rw(boostpulse_duration);
1046 gov_sys_pol_attr_rw(io_is_busy);
1047
1048 static struct global_attr boostpulse_gov_sys =
1049         __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1050
1051 static struct freq_attr boostpulse_gov_pol =
1052         __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1053
1054 /* One Governor instance for entire system */
1055 static struct attribute *interactive_attributes_gov_sys[] = {
1056         &target_loads_gov_sys.attr,
1057         &above_hispeed_delay_gov_sys.attr,
1058         &hispeed_freq_gov_sys.attr,
1059         &go_hispeed_load_gov_sys.attr,
1060         &min_sample_time_gov_sys.attr,
1061         &timer_rate_gov_sys.attr,
1062         &timer_slack_gov_sys.attr,
1063         &boost_gov_sys.attr,
1064         &boostpulse_gov_sys.attr,
1065         &boostpulse_duration_gov_sys.attr,
1066         &io_is_busy_gov_sys.attr,
1067         NULL,
1068 };
1069
1070 static struct attribute_group interactive_attr_group_gov_sys = {
1071         .attrs = interactive_attributes_gov_sys,
1072         .name = "interactive",
1073 };
1074
1075 /* Per policy governor instance */
1076 static struct attribute *interactive_attributes_gov_pol[] = {
1077         &target_loads_gov_pol.attr,
1078         &above_hispeed_delay_gov_pol.attr,
1079         &hispeed_freq_gov_pol.attr,
1080         &go_hispeed_load_gov_pol.attr,
1081         &min_sample_time_gov_pol.attr,
1082         &timer_rate_gov_pol.attr,
1083         &timer_slack_gov_pol.attr,
1084         &boost_gov_pol.attr,
1085         &boostpulse_gov_pol.attr,
1086         &boostpulse_duration_gov_pol.attr,
1087         &io_is_busy_gov_pol.attr,
1088         NULL,
1089 };
1090
1091 static struct attribute_group interactive_attr_group_gov_pol = {
1092         .attrs = interactive_attributes_gov_pol,
1093         .name = "interactive",
1094 };
1095
1096 static struct attribute_group *get_sysfs_attr(void)
1097 {
1098         if (have_governor_per_policy())
1099                 return &interactive_attr_group_gov_pol;
1100         else
1101                 return &interactive_attr_group_gov_sys;
1102 }
1103
1104 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1105                                              unsigned long val,
1106                                              void *data)
1107 {
1108         switch (val) {
1109         case IDLE_START:
1110                 cpufreq_interactive_idle_start();
1111                 break;
1112         case IDLE_END:
1113                 cpufreq_interactive_idle_end();
1114                 break;
1115         }
1116
1117         return 0;
1118 }
1119
1120 static struct notifier_block cpufreq_interactive_idle_nb = {
1121         .notifier_call = cpufreq_interactive_idle_notifier,
1122 };
1123
1124 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1125                 unsigned int event)
1126 {
1127         int rc;
1128         unsigned int j;
1129         struct cpufreq_interactive_cpuinfo *pcpu;
1130         struct cpufreq_frequency_table *freq_table;
1131         struct cpufreq_interactive_tunables *tunables;
1132         unsigned long flags;
1133
1134         if (have_governor_per_policy())
1135                 tunables = policy->governor_data;
1136         else
1137                 tunables = common_tunables;
1138
1139         WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
1140
1141         switch (event) {
1142         case CPUFREQ_GOV_POLICY_INIT:
1143                 if (have_governor_per_policy()) {
1144                         WARN_ON(tunables);
1145                 } else if (tunables) {
1146                         tunables->usage_count++;
1147                         policy->governor_data = tunables;
1148                         return 0;
1149                 }
1150
1151                 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1152                 if (!tunables) {
1153                         pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
1154                         return -ENOMEM;
1155                 }
1156
1157                 tunables->usage_count = 1;
1158                 tunables->above_hispeed_delay = default_above_hispeed_delay;
1159                 tunables->nabove_hispeed_delay =
1160                         ARRAY_SIZE(default_above_hispeed_delay);
1161                 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1162                 tunables->target_loads = default_target_loads;
1163                 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1164                 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1165                 tunables->timer_rate = DEFAULT_TIMER_RATE;
1166                 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1167                 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1168
1169                 spin_lock_init(&tunables->target_loads_lock);
1170                 spin_lock_init(&tunables->above_hispeed_delay_lock);
1171
1172                 policy->governor_data = tunables;
1173                 if (!have_governor_per_policy()) {
1174                         common_tunables = tunables;
1175                         WARN_ON(cpufreq_get_global_kobject());
1176                 }
1177
1178                 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1179                                 get_sysfs_attr());
1180                 if (rc) {
1181                         kfree(tunables);
1182                         policy->governor_data = NULL;
1183                         if (!have_governor_per_policy())
1184                                 common_tunables = NULL;
1185                         return rc;
1186                 }
1187
1188                 if (!policy->governor->initialized) {
1189                         idle_notifier_register(&cpufreq_interactive_idle_nb);
1190                         cpufreq_register_notifier(&cpufreq_notifier_block,
1191                                         CPUFREQ_TRANSITION_NOTIFIER);
1192                 }
1193
1194                 break;
1195
1196         case CPUFREQ_GOV_POLICY_EXIT:
1197                 if (!--tunables->usage_count) {
1198                         if (policy->governor->initialized == 1) {
1199                                 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1200                                                 CPUFREQ_TRANSITION_NOTIFIER);
1201                                 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1202                         }
1203
1204                         sysfs_remove_group(get_governor_parent_kobj(policy),
1205                                         get_sysfs_attr());
1206
1207                         if (!have_governor_per_policy())
1208                                 cpufreq_put_global_kobject();
1209
1210                         kfree(tunables);
1211                         common_tunables = NULL;
1212                 }
1213
1214                 policy->governor_data = NULL;
1215                 break;
1216
1217         case CPUFREQ_GOV_START:
1218                 mutex_lock(&gov_lock);
1219
1220                 freq_table = cpufreq_frequency_get_table(policy->cpu);
1221                 if (!tunables->hispeed_freq)
1222                         tunables->hispeed_freq = policy->max;
1223
1224                 for_each_cpu(j, policy->cpus) {
1225                         pcpu = &per_cpu(cpuinfo, j);
1226                         pcpu->policy = policy;
1227                         pcpu->target_freq = policy->cur;
1228                         pcpu->freq_table = freq_table;
1229                         pcpu->floor_freq = pcpu->target_freq;
1230                         pcpu->floor_validate_time =
1231                                 ktime_to_us(ktime_get());
1232                         pcpu->hispeed_validate_time =
1233                                 pcpu->floor_validate_time;
1234                         pcpu->max_freq = policy->max;
1235                         down_write(&pcpu->enable_sem);
1236                         del_timer_sync(&pcpu->cpu_timer);
1237                         del_timer_sync(&pcpu->cpu_slack_timer);
1238                         cpufreq_interactive_timer_start(tunables, j);
1239                         pcpu->governor_enabled = 1;
1240                         up_write(&pcpu->enable_sem);
1241                 }
1242
1243                 mutex_unlock(&gov_lock);
1244                 break;
1245
1246         case CPUFREQ_GOV_STOP:
1247                 mutex_lock(&gov_lock);
1248                 for_each_cpu(j, policy->cpus) {
1249                         pcpu = &per_cpu(cpuinfo, j);
1250                         down_write(&pcpu->enable_sem);
1251                         pcpu->governor_enabled = 0;
1252                         del_timer_sync(&pcpu->cpu_timer);
1253                         del_timer_sync(&pcpu->cpu_slack_timer);
1254                         up_write(&pcpu->enable_sem);
1255                 }
1256
1257                 mutex_unlock(&gov_lock);
1258                 break;
1259
1260         case CPUFREQ_GOV_LIMITS:
1261                 if (policy->max < policy->cur)
1262                         __cpufreq_driver_target(policy,
1263                                         policy->max, CPUFREQ_RELATION_H);
1264                 else if (policy->min > policy->cur)
1265                         __cpufreq_driver_target(policy,
1266                                         policy->min, CPUFREQ_RELATION_L);
1267                 for_each_cpu(j, policy->cpus) {
1268                         pcpu = &per_cpu(cpuinfo, j);
1269
1270                         down_read(&pcpu->enable_sem);
1271                         if (pcpu->governor_enabled == 0) {
1272                                 up_read(&pcpu->enable_sem);
1273                                 continue;
1274                         }
1275
1276                         spin_lock_irqsave(&pcpu->target_freq_lock, flags);
1277                         if (policy->max < pcpu->target_freq)
1278                                 pcpu->target_freq = policy->max;
1279                         else if (policy->min > pcpu->target_freq)
1280                                 pcpu->target_freq = policy->min;
1281
1282                         spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
1283                         up_read(&pcpu->enable_sem);
1284
1285                         /* Reschedule timer only if policy->max is raised.
1286                          * Delete the timers, else the timer callback may
1287                          * return without re-arm the timer when failed
1288                          * acquire the semaphore. This race may cause timer
1289                          * stopped unexpectedly.
1290                          */
1291
1292                         if (policy->max > pcpu->max_freq) {
1293                                 down_write(&pcpu->enable_sem);
1294                                 del_timer_sync(&pcpu->cpu_timer);
1295                                 del_timer_sync(&pcpu->cpu_slack_timer);
1296                                 cpufreq_interactive_timer_start(tunables, j);
1297                                 up_write(&pcpu->enable_sem);
1298                         }
1299
1300                         pcpu->max_freq = policy->max;
1301                 }
1302                 break;
1303         }
1304         return 0;
1305 }
1306
1307 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1308 static
1309 #endif
1310 struct cpufreq_governor cpufreq_gov_interactive = {
1311         .name = "interactive",
1312         .governor = cpufreq_governor_interactive,
1313         .max_transition_latency = 10000000,
1314         .owner = THIS_MODULE,
1315 };
1316
1317 static void cpufreq_interactive_nop_timer(unsigned long data)
1318 {
1319 }
1320
1321 static int __init cpufreq_interactive_init(void)
1322 {
1323         unsigned int i;
1324         struct cpufreq_interactive_cpuinfo *pcpu;
1325         struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1326
1327         /* Initalize per-cpu timers */
1328         for_each_possible_cpu(i) {
1329                 pcpu = &per_cpu(cpuinfo, i);
1330                 init_timer_deferrable(&pcpu->cpu_timer);
1331                 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1332                 pcpu->cpu_timer.data = i;
1333                 init_timer(&pcpu->cpu_slack_timer);
1334                 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
1335                 spin_lock_init(&pcpu->load_lock);
1336                 spin_lock_init(&pcpu->target_freq_lock);
1337                 init_rwsem(&pcpu->enable_sem);
1338         }
1339
1340         spin_lock_init(&speedchange_cpumask_lock);
1341         mutex_init(&gov_lock);
1342         speedchange_task =
1343                 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1344                                "cfinteractive");
1345         if (IS_ERR(speedchange_task))
1346                 return PTR_ERR(speedchange_task);
1347
1348         sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1349         get_task_struct(speedchange_task);
1350
1351         /* NB: wake up so the thread does not look hung to the freezer */
1352         wake_up_process(speedchange_task);
1353
1354         return cpufreq_register_governor(&cpufreq_gov_interactive);
1355 }
1356
1357 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1358 fs_initcall(cpufreq_interactive_init);
1359 #else
1360 module_init(cpufreq_interactive_init);
1361 #endif
1362
1363 static void __exit cpufreq_interactive_exit(void)
1364 {
1365         cpufreq_unregister_governor(&cpufreq_gov_interactive);
1366         kthread_stop(speedchange_task);
1367         put_task_struct(speedchange_task);
1368 }
1369
1370 module_exit(cpufreq_interactive_exit);
1371
1372 MODULE_AUTHOR("Mike Chan <mike@android.com>");
1373 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1374         "Latency sensitive workloads");
1375 MODULE_LICENSE("GPL");