Merge branch 'linux-linaro-lsk' into linux-linaro-lsk-android
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_interactive.c
1 /*
2  * drivers/cpufreq/cpufreq_interactive.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * Author: Mike Chan (mike@android.com)
16  *
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/rwsem.h>
25 #include <linux/sched.h>
26 #include <linux/sched/rt.h>
27 #include <linux/tick.h>
28 #include <linux/time.h>
29 #include <linux/timer.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/slab.h>
33 #include "cpufreq_governor.h"
34
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/cpufreq_interactive.h>
37
38 struct cpufreq_interactive_cpuinfo {
39         struct timer_list cpu_timer;
40         struct timer_list cpu_slack_timer;
41         spinlock_t load_lock; /* protects the next 4 fields */
42         u64 time_in_idle;
43         u64 time_in_idle_timestamp;
44         u64 cputime_speedadj;
45         u64 cputime_speedadj_timestamp;
46         struct cpufreq_policy *policy;
47         struct cpufreq_frequency_table *freq_table;
48         spinlock_t target_freq_lock; /*protects target freq */
49         unsigned int target_freq;
50         unsigned int floor_freq;
51         unsigned int max_freq;
52         u64 floor_validate_time;
53         u64 hispeed_validate_time;
54         struct rw_semaphore enable_sem;
55         int governor_enabled;
56 };
57
58 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
59
60 /* realtime thread handles frequency scaling */
61 static struct task_struct *speedchange_task;
62 static cpumask_t speedchange_cpumask;
63 static spinlock_t speedchange_cpumask_lock;
64 static struct mutex gov_lock;
65
66 /* Target load.  Lower values result in higher CPU speeds. */
67 #define DEFAULT_TARGET_LOAD 90
68 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
69
70 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
71 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
72 static unsigned int default_above_hispeed_delay[] = {
73         DEFAULT_ABOVE_HISPEED_DELAY };
74
75 struct cpufreq_interactive_tunables {
76         int usage_count;
77         /* Hi speed to bump to from lo speed when load burst (default max) */
78         unsigned int hispeed_freq;
79         /* Go to hi speed when CPU load at or above this value. */
80 #define DEFAULT_GO_HISPEED_LOAD 99
81         unsigned long go_hispeed_load;
82         /* Target load. Lower values result in higher CPU speeds. */
83         spinlock_t target_loads_lock;
84         unsigned int *target_loads;
85         int ntarget_loads;
86         /*
87          * The minimum amount of time to spend at a frequency before we can ramp
88          * down.
89          */
90 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
91         unsigned long min_sample_time;
92         /*
93          * The sample rate of the timer used to increase frequency
94          */
95         unsigned long timer_rate;
96         /*
97          * Wait this long before raising speed above hispeed, by default a
98          * single timer interval.
99          */
100         spinlock_t above_hispeed_delay_lock;
101         unsigned int *above_hispeed_delay;
102         int nabove_hispeed_delay;
103         /* Non-zero means indefinite speed boost active */
104         int boost_val;
105         /* Duration of a boot pulse in usecs */
106         int boostpulse_duration_val;
107         /* End time of boost pulse in ktime converted to usecs */
108         u64 boostpulse_endtime;
109         bool boosted;
110         /*
111          * Max additional time to wait in idle, beyond timer_rate, at speeds
112          * above minimum before wakeup to reduce speed, or -1 if unnecessary.
113          */
114 #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
115         int timer_slack_val;
116         bool io_is_busy;
117 };
118
119 /* For cases where we have single governor instance for system */
120 static struct cpufreq_interactive_tunables *common_tunables;
121
122 static struct attribute_group *get_sysfs_attr(void);
123
124 static void cpufreq_interactive_timer_resched(
125         struct cpufreq_interactive_cpuinfo *pcpu)
126 {
127         struct cpufreq_interactive_tunables *tunables =
128                 pcpu->policy->governor_data;
129         unsigned long expires;
130         unsigned long flags;
131
132         spin_lock_irqsave(&pcpu->load_lock, flags);
133         pcpu->time_in_idle =
134                 get_cpu_idle_time(smp_processor_id(),
135                                   &pcpu->time_in_idle_timestamp,
136                                   tunables->io_is_busy);
137         pcpu->cputime_speedadj = 0;
138         pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
139         expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
140         mod_timer_pinned(&pcpu->cpu_timer, expires);
141
142         if (tunables->timer_slack_val >= 0 &&
143             pcpu->target_freq > pcpu->policy->min) {
144                 expires += usecs_to_jiffies(tunables->timer_slack_val);
145                 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
146         }
147
148         spin_unlock_irqrestore(&pcpu->load_lock, flags);
149 }
150
151 /* The caller shall take enable_sem write semaphore to avoid any timer race.
152  * The cpu_timer and cpu_slack_timer must be deactivated when calling this
153  * function.
154  */
155 static void cpufreq_interactive_timer_start(
156         struct cpufreq_interactive_tunables *tunables, int cpu)
157 {
158         struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
159         unsigned long expires = jiffies +
160                 usecs_to_jiffies(tunables->timer_rate);
161         unsigned long flags;
162
163         pcpu->cpu_timer.expires = expires;
164         add_timer_on(&pcpu->cpu_timer, cpu);
165         if (tunables->timer_slack_val >= 0 &&
166             pcpu->target_freq > pcpu->policy->min) {
167                 expires += usecs_to_jiffies(tunables->timer_slack_val);
168                 pcpu->cpu_slack_timer.expires = expires;
169                 add_timer_on(&pcpu->cpu_slack_timer, cpu);
170         }
171
172         spin_lock_irqsave(&pcpu->load_lock, flags);
173         pcpu->time_in_idle =
174                 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
175                                   tunables->io_is_busy);
176         pcpu->cputime_speedadj = 0;
177         pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
178         spin_unlock_irqrestore(&pcpu->load_lock, flags);
179 }
180
181 static unsigned int freq_to_above_hispeed_delay(
182         struct cpufreq_interactive_tunables *tunables,
183         unsigned int freq)
184 {
185         int i;
186         unsigned int ret;
187         unsigned long flags;
188
189         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
190
191         for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
192                         freq >= tunables->above_hispeed_delay[i+1]; i += 2)
193                 ;
194
195         ret = tunables->above_hispeed_delay[i];
196         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
197         return ret;
198 }
199
200 static unsigned int freq_to_targetload(
201         struct cpufreq_interactive_tunables *tunables, unsigned int freq)
202 {
203         int i;
204         unsigned int ret;
205         unsigned long flags;
206
207         spin_lock_irqsave(&tunables->target_loads_lock, flags);
208
209         for (i = 0; i < tunables->ntarget_loads - 1 &&
210                     freq >= tunables->target_loads[i+1]; i += 2)
211                 ;
212
213         ret = tunables->target_loads[i];
214         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
215         return ret;
216 }
217
218 /*
219  * If increasing frequencies never map to a lower target load then
220  * choose_freq() will find the minimum frequency that does not exceed its
221  * target load given the current load.
222  */
223 static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
224                 unsigned int loadadjfreq)
225 {
226         unsigned int freq = pcpu->policy->cur;
227         unsigned int prevfreq, freqmin, freqmax;
228         unsigned int tl;
229         int index;
230
231         freqmin = 0;
232         freqmax = UINT_MAX;
233
234         do {
235                 prevfreq = freq;
236                 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
237
238                 /*
239                  * Find the lowest frequency where the computed load is less
240                  * than or equal to the target load.
241                  */
242
243                 if (cpufreq_frequency_table_target(
244                             pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
245                             CPUFREQ_RELATION_L, &index))
246                         break;
247                 freq = pcpu->freq_table[index].frequency;
248
249                 if (freq > prevfreq) {
250                         /* The previous frequency is too low. */
251                         freqmin = prevfreq;
252
253                         if (freq >= freqmax) {
254                                 /*
255                                  * Find the highest frequency that is less
256                                  * than freqmax.
257                                  */
258                                 if (cpufreq_frequency_table_target(
259                                             pcpu->policy, pcpu->freq_table,
260                                             freqmax - 1, CPUFREQ_RELATION_H,
261                                             &index))
262                                         break;
263                                 freq = pcpu->freq_table[index].frequency;
264
265                                 if (freq == freqmin) {
266                                         /*
267                                          * The first frequency below freqmax
268                                          * has already been found to be too
269                                          * low.  freqmax is the lowest speed
270                                          * we found that is fast enough.
271                                          */
272                                         freq = freqmax;
273                                         break;
274                                 }
275                         }
276                 } else if (freq < prevfreq) {
277                         /* The previous frequency is high enough. */
278                         freqmax = prevfreq;
279
280                         if (freq <= freqmin) {
281                                 /*
282                                  * Find the lowest frequency that is higher
283                                  * than freqmin.
284                                  */
285                                 if (cpufreq_frequency_table_target(
286                                             pcpu->policy, pcpu->freq_table,
287                                             freqmin + 1, CPUFREQ_RELATION_L,
288                                             &index))
289                                         break;
290                                 freq = pcpu->freq_table[index].frequency;
291
292                                 /*
293                                  * If freqmax is the first frequency above
294                                  * freqmin then we have already found that
295                                  * this speed is fast enough.
296                                  */
297                                 if (freq == freqmax)
298                                         break;
299                         }
300                 }
301
302                 /* If same frequency chosen as previous then done. */
303         } while (freq != prevfreq);
304
305         return freq;
306 }
307
308 static u64 update_load(int cpu)
309 {
310         struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
311         struct cpufreq_interactive_tunables *tunables =
312                 pcpu->policy->governor_data;
313         u64 now;
314         u64 now_idle;
315         u64 delta_idle;
316         u64 delta_time;
317         u64 active_time;
318
319         now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
320         delta_idle = (now_idle - pcpu->time_in_idle);
321         delta_time = (now - pcpu->time_in_idle_timestamp);
322
323         if (delta_time <= delta_idle)
324                 active_time = 0;
325         else
326                 active_time = delta_time - delta_idle;
327
328         pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
329
330         pcpu->time_in_idle = now_idle;
331         pcpu->time_in_idle_timestamp = now;
332         return now;
333 }
334
335 static void cpufreq_interactive_timer(unsigned long data)
336 {
337         u64 now;
338         unsigned int delta_time;
339         u64 cputime_speedadj;
340         int cpu_load;
341         struct cpufreq_interactive_cpuinfo *pcpu =
342                 &per_cpu(cpuinfo, data);
343         struct cpufreq_interactive_tunables *tunables =
344                 pcpu->policy->governor_data;
345         unsigned int new_freq;
346         unsigned int loadadjfreq;
347         unsigned int index;
348         unsigned long flags;
349
350         if (!down_read_trylock(&pcpu->enable_sem))
351                 return;
352         if (!pcpu->governor_enabled)
353                 goto exit;
354
355         spin_lock_irqsave(&pcpu->load_lock, flags);
356         now = update_load(data);
357         delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
358         cputime_speedadj = pcpu->cputime_speedadj;
359         spin_unlock_irqrestore(&pcpu->load_lock, flags);
360
361         if (WARN_ON_ONCE(!delta_time))
362                 goto rearm;
363
364         spin_lock_irqsave(&pcpu->target_freq_lock, flags);
365         do_div(cputime_speedadj, delta_time);
366         loadadjfreq = (unsigned int)cputime_speedadj * 100;
367         cpu_load = loadadjfreq / pcpu->target_freq;
368         tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
369
370         if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
371                 if (pcpu->target_freq < tunables->hispeed_freq) {
372                         new_freq = tunables->hispeed_freq;
373                 } else {
374                         new_freq = choose_freq(pcpu, loadadjfreq);
375
376                         if (new_freq < tunables->hispeed_freq)
377                                 new_freq = tunables->hispeed_freq;
378                 }
379         } else {
380                 new_freq = choose_freq(pcpu, loadadjfreq);
381                 if (new_freq > tunables->hispeed_freq &&
382                                 pcpu->target_freq < tunables->hispeed_freq)
383                         new_freq = tunables->hispeed_freq;
384         }
385
386         if (pcpu->target_freq >= tunables->hispeed_freq &&
387             new_freq > pcpu->target_freq &&
388             now - pcpu->hispeed_validate_time <
389             freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
390                 trace_cpufreq_interactive_notyet(
391                         data, cpu_load, pcpu->target_freq,
392                         pcpu->policy->cur, new_freq);
393                 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
394                 goto rearm;
395         }
396
397         pcpu->hispeed_validate_time = now;
398
399         if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
400                                            new_freq, CPUFREQ_RELATION_L,
401                                            &index)) {
402                 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
403                 goto rearm;
404         }
405
406         new_freq = pcpu->freq_table[index].frequency;
407
408         /*
409          * Do not scale below floor_freq unless we have been at or above the
410          * floor frequency for the minimum sample time since last validated.
411          */
412         if (new_freq < pcpu->floor_freq) {
413                 if (now - pcpu->floor_validate_time <
414                                 tunables->min_sample_time) {
415                         trace_cpufreq_interactive_notyet(
416                                 data, cpu_load, pcpu->target_freq,
417                                 pcpu->policy->cur, new_freq);
418                         spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
419                         goto rearm;
420                 }
421         }
422
423         /*
424          * Update the timestamp for checking whether speed has been held at
425          * or above the selected frequency for a minimum of min_sample_time,
426          * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
427          * allow the speed to drop as soon as the boostpulse duration expires
428          * (or the indefinite boost is turned off).
429          */
430
431         if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
432                 pcpu->floor_freq = new_freq;
433                 pcpu->floor_validate_time = now;
434         }
435
436         if (pcpu->target_freq == new_freq &&
437                         pcpu->target_freq <= pcpu->policy->cur) {
438                 trace_cpufreq_interactive_already(
439                         data, cpu_load, pcpu->target_freq,
440                         pcpu->policy->cur, new_freq);
441                 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
442                 goto rearm_if_notmax;
443         }
444
445         trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
446                                          pcpu->policy->cur, new_freq);
447
448         pcpu->target_freq = new_freq;
449         spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
450         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
451         cpumask_set_cpu(data, &speedchange_cpumask);
452         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
453         wake_up_process(speedchange_task);
454
455 rearm_if_notmax:
456         /*
457          * Already set max speed and don't see a need to change that,
458          * wait until next idle to re-evaluate, don't need timer.
459          */
460         if (pcpu->target_freq == pcpu->policy->max)
461                 goto exit;
462
463 rearm:
464         if (!timer_pending(&pcpu->cpu_timer))
465                 cpufreq_interactive_timer_resched(pcpu);
466
467 exit:
468         up_read(&pcpu->enable_sem);
469         return;
470 }
471
472 static void cpufreq_interactive_idle_start(void)
473 {
474         struct cpufreq_interactive_cpuinfo *pcpu =
475                 &per_cpu(cpuinfo, smp_processor_id());
476         int pending;
477
478         if (!down_read_trylock(&pcpu->enable_sem))
479                 return;
480         if (!pcpu->governor_enabled) {
481                 up_read(&pcpu->enable_sem);
482                 return;
483         }
484
485         pending = timer_pending(&pcpu->cpu_timer);
486
487         if (pcpu->target_freq != pcpu->policy->min) {
488                 /*
489                  * Entering idle while not at lowest speed.  On some
490                  * platforms this can hold the other CPU(s) at that speed
491                  * even though the CPU is idle. Set a timer to re-evaluate
492                  * speed so this idle CPU doesn't hold the other CPUs above
493                  * min indefinitely.  This should probably be a quirk of
494                  * the CPUFreq driver.
495                  */
496                 if (!pending)
497                         cpufreq_interactive_timer_resched(pcpu);
498         }
499
500         up_read(&pcpu->enable_sem);
501 }
502
503 static void cpufreq_interactive_idle_end(void)
504 {
505         struct cpufreq_interactive_cpuinfo *pcpu =
506                 &per_cpu(cpuinfo, smp_processor_id());
507
508         if (!down_read_trylock(&pcpu->enable_sem))
509                 return;
510         if (!pcpu->governor_enabled) {
511                 up_read(&pcpu->enable_sem);
512                 return;
513         }
514
515         /* Arm the timer for 1-2 ticks later if not already. */
516         if (!timer_pending(&pcpu->cpu_timer)) {
517                 cpufreq_interactive_timer_resched(pcpu);
518         } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
519                 del_timer(&pcpu->cpu_timer);
520                 del_timer(&pcpu->cpu_slack_timer);
521                 cpufreq_interactive_timer(smp_processor_id());
522         }
523
524         up_read(&pcpu->enable_sem);
525 }
526
527 static int cpufreq_interactive_speedchange_task(void *data)
528 {
529         unsigned int cpu;
530         cpumask_t tmp_mask;
531         unsigned long flags;
532         struct cpufreq_interactive_cpuinfo *pcpu;
533
534         while (1) {
535                 set_current_state(TASK_INTERRUPTIBLE);
536                 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
537
538                 if (cpumask_empty(&speedchange_cpumask)) {
539                         spin_unlock_irqrestore(&speedchange_cpumask_lock,
540                                                flags);
541                         schedule();
542
543                         if (kthread_should_stop())
544                                 break;
545
546                         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
547                 }
548
549                 set_current_state(TASK_RUNNING);
550                 tmp_mask = speedchange_cpumask;
551                 cpumask_clear(&speedchange_cpumask);
552                 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
553
554                 for_each_cpu(cpu, &tmp_mask) {
555                         unsigned int j;
556                         unsigned int max_freq = 0;
557
558                         pcpu = &per_cpu(cpuinfo, cpu);
559                         if (!down_read_trylock(&pcpu->enable_sem))
560                                 continue;
561                         if (!pcpu->governor_enabled) {
562                                 up_read(&pcpu->enable_sem);
563                                 continue;
564                         }
565
566                         for_each_cpu(j, pcpu->policy->cpus) {
567                                 struct cpufreq_interactive_cpuinfo *pjcpu =
568                                         &per_cpu(cpuinfo, j);
569
570                                 if (pjcpu->target_freq > max_freq)
571                                         max_freq = pjcpu->target_freq;
572                         }
573
574                         if (max_freq != pcpu->policy->cur)
575                                 __cpufreq_driver_target(pcpu->policy,
576                                                         max_freq,
577                                                         CPUFREQ_RELATION_H);
578                         trace_cpufreq_interactive_setspeed(cpu,
579                                                      pcpu->target_freq,
580                                                      pcpu->policy->cur);
581
582                         up_read(&pcpu->enable_sem);
583                 }
584         }
585
586         return 0;
587 }
588
589 static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
590 {
591         int i;
592         int anyboost = 0;
593         unsigned long flags[2];
594         struct cpufreq_interactive_cpuinfo *pcpu;
595
596         tunables->boosted = true;
597
598         spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
599
600         for_each_online_cpu(i) {
601                 pcpu = &per_cpu(cpuinfo, i);
602                 if (tunables != pcpu->policy->governor_data)
603                         continue;
604
605                 spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
606                 if (pcpu->target_freq < tunables->hispeed_freq) {
607                         pcpu->target_freq = tunables->hispeed_freq;
608                         cpumask_set_cpu(i, &speedchange_cpumask);
609                         pcpu->hispeed_validate_time =
610                                 ktime_to_us(ktime_get());
611                         anyboost = 1;
612                 }
613
614                 /*
615                  * Set floor freq and (re)start timer for when last
616                  * validated.
617                  */
618
619                 pcpu->floor_freq = tunables->hispeed_freq;
620                 pcpu->floor_validate_time = ktime_to_us(ktime_get());
621                 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
622         }
623
624         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
625
626         if (anyboost)
627                 wake_up_process(speedchange_task);
628 }
629
630 static int cpufreq_interactive_notifier(
631         struct notifier_block *nb, unsigned long val, void *data)
632 {
633         struct cpufreq_freqs *freq = data;
634         struct cpufreq_interactive_cpuinfo *pcpu;
635         int cpu;
636         unsigned long flags;
637
638         if (val == CPUFREQ_POSTCHANGE) {
639                 pcpu = &per_cpu(cpuinfo, freq->cpu);
640                 if (!down_read_trylock(&pcpu->enable_sem))
641                         return 0;
642                 if (!pcpu->governor_enabled) {
643                         up_read(&pcpu->enable_sem);
644                         return 0;
645                 }
646
647                 for_each_cpu(cpu, pcpu->policy->cpus) {
648                         struct cpufreq_interactive_cpuinfo *pjcpu =
649                                 &per_cpu(cpuinfo, cpu);
650                         if (cpu != freq->cpu) {
651                                 if (!down_read_trylock(&pjcpu->enable_sem))
652                                         continue;
653                                 if (!pjcpu->governor_enabled) {
654                                         up_read(&pjcpu->enable_sem);
655                                         continue;
656                                 }
657                         }
658                         spin_lock_irqsave(&pjcpu->load_lock, flags);
659                         update_load(cpu);
660                         spin_unlock_irqrestore(&pjcpu->load_lock, flags);
661                         if (cpu != freq->cpu)
662                                 up_read(&pjcpu->enable_sem);
663                 }
664
665                 up_read(&pcpu->enable_sem);
666         }
667         return 0;
668 }
669
670 static struct notifier_block cpufreq_notifier_block = {
671         .notifier_call = cpufreq_interactive_notifier,
672 };
673
674 static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
675 {
676         const char *cp;
677         int i;
678         int ntokens = 1;
679         unsigned int *tokenized_data;
680         int err = -EINVAL;
681
682         cp = buf;
683         while ((cp = strpbrk(cp + 1, " :")))
684                 ntokens++;
685
686         if (!(ntokens & 0x1))
687                 goto err;
688
689         tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
690         if (!tokenized_data) {
691                 err = -ENOMEM;
692                 goto err;
693         }
694
695         cp = buf;
696         i = 0;
697         while (i < ntokens) {
698                 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
699                         goto err_kfree;
700
701                 cp = strpbrk(cp, " :");
702                 if (!cp)
703                         break;
704                 cp++;
705         }
706
707         if (i != ntokens)
708                 goto err_kfree;
709
710         *num_tokens = ntokens;
711         return tokenized_data;
712
713 err_kfree:
714         kfree(tokenized_data);
715 err:
716         return ERR_PTR(err);
717 }
718
719 static ssize_t show_target_loads(
720         struct cpufreq_interactive_tunables *tunables,
721         char *buf)
722 {
723         int i;
724         ssize_t ret = 0;
725         unsigned long flags;
726
727         spin_lock_irqsave(&tunables->target_loads_lock, flags);
728
729         for (i = 0; i < tunables->ntarget_loads; i++)
730                 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
731                                i & 0x1 ? ":" : " ");
732
733         sprintf(buf + ret - 1, "\n");
734         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
735         return ret;
736 }
737
738 static ssize_t store_target_loads(
739         struct cpufreq_interactive_tunables *tunables,
740         const char *buf, size_t count)
741 {
742         int ntokens;
743         unsigned int *new_target_loads = NULL;
744         unsigned long flags;
745
746         new_target_loads = get_tokenized_data(buf, &ntokens);
747         if (IS_ERR(new_target_loads))
748                 return PTR_RET(new_target_loads);
749
750         spin_lock_irqsave(&tunables->target_loads_lock, flags);
751         if (tunables->target_loads != default_target_loads)
752                 kfree(tunables->target_loads);
753         tunables->target_loads = new_target_loads;
754         tunables->ntarget_loads = ntokens;
755         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
756         return count;
757 }
758
759 static ssize_t show_above_hispeed_delay(
760         struct cpufreq_interactive_tunables *tunables, char *buf)
761 {
762         int i;
763         ssize_t ret = 0;
764         unsigned long flags;
765
766         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
767
768         for (i = 0; i < tunables->nabove_hispeed_delay; i++)
769                 ret += sprintf(buf + ret, "%u%s",
770                                tunables->above_hispeed_delay[i],
771                                i & 0x1 ? ":" : " ");
772
773         sprintf(buf + ret - 1, "\n");
774         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
775         return ret;
776 }
777
778 static ssize_t store_above_hispeed_delay(
779         struct cpufreq_interactive_tunables *tunables,
780         const char *buf, size_t count)
781 {
782         int ntokens;
783         unsigned int *new_above_hispeed_delay = NULL;
784         unsigned long flags;
785
786         new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
787         if (IS_ERR(new_above_hispeed_delay))
788                 return PTR_RET(new_above_hispeed_delay);
789
790         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
791         if (tunables->above_hispeed_delay != default_above_hispeed_delay)
792                 kfree(tunables->above_hispeed_delay);
793         tunables->above_hispeed_delay = new_above_hispeed_delay;
794         tunables->nabove_hispeed_delay = ntokens;
795         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
796         return count;
797
798 }
799
800 static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
801                 char *buf)
802 {
803         return sprintf(buf, "%u\n", tunables->hispeed_freq);
804 }
805
806 static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
807                 const char *buf, size_t count)
808 {
809         int ret;
810         long unsigned int val;
811
812         ret = strict_strtoul(buf, 0, &val);
813         if (ret < 0)
814                 return ret;
815         tunables->hispeed_freq = val;
816         return count;
817 }
818
819 static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
820                 *tunables, char *buf)
821 {
822         return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
823 }
824
825 static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
826                 *tunables, const char *buf, size_t count)
827 {
828         int ret;
829         unsigned long val;
830
831         ret = strict_strtoul(buf, 0, &val);
832         if (ret < 0)
833                 return ret;
834         tunables->go_hispeed_load = val;
835         return count;
836 }
837
838 static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
839                 *tunables, char *buf)
840 {
841         return sprintf(buf, "%lu\n", tunables->min_sample_time);
842 }
843
844 static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
845                 *tunables, const char *buf, size_t count)
846 {
847         int ret;
848         unsigned long val;
849
850         ret = strict_strtoul(buf, 0, &val);
851         if (ret < 0)
852                 return ret;
853         tunables->min_sample_time = val;
854         return count;
855 }
856
857 static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
858                 char *buf)
859 {
860         return sprintf(buf, "%lu\n", tunables->timer_rate);
861 }
862
863 static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
864                 const char *buf, size_t count)
865 {
866         int ret;
867         unsigned long val;
868
869         ret = strict_strtoul(buf, 0, &val);
870         if (ret < 0)
871                 return ret;
872         tunables->timer_rate = val;
873         return count;
874 }
875
876 static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
877                 char *buf)
878 {
879         return sprintf(buf, "%d\n", tunables->timer_slack_val);
880 }
881
882 static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
883                 const char *buf, size_t count)
884 {
885         int ret;
886         unsigned long val;
887
888         ret = kstrtol(buf, 10, &val);
889         if (ret < 0)
890                 return ret;
891
892         tunables->timer_slack_val = val;
893         return count;
894 }
895
896 static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
897                           char *buf)
898 {
899         return sprintf(buf, "%d\n", tunables->boost_val);
900 }
901
902 static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
903                            const char *buf, size_t count)
904 {
905         int ret;
906         unsigned long val;
907
908         ret = kstrtoul(buf, 0, &val);
909         if (ret < 0)
910                 return ret;
911
912         tunables->boost_val = val;
913
914         if (tunables->boost_val) {
915                 trace_cpufreq_interactive_boost("on");
916                 if (!tunables->boosted)
917                         cpufreq_interactive_boost(tunables);
918         } else {
919                 tunables->boostpulse_endtime = ktime_to_us(ktime_get());
920                 trace_cpufreq_interactive_unboost("off");
921         }
922
923         return count;
924 }
925
926 static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
927                                 const char *buf, size_t count)
928 {
929         int ret;
930         unsigned long val;
931
932         ret = kstrtoul(buf, 0, &val);
933         if (ret < 0)
934                 return ret;
935
936         tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
937                 tunables->boostpulse_duration_val;
938         trace_cpufreq_interactive_boost("pulse");
939         if (!tunables->boosted)
940                 cpufreq_interactive_boost(tunables);
941         return count;
942 }
943
944 static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
945                 *tunables, char *buf)
946 {
947         return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
948 }
949
950 static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
951                 *tunables, const char *buf, size_t count)
952 {
953         int ret;
954         unsigned long val;
955
956         ret = kstrtoul(buf, 0, &val);
957         if (ret < 0)
958                 return ret;
959
960         tunables->boostpulse_duration_val = val;
961         return count;
962 }
963
964 static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
965                 char *buf)
966 {
967         return sprintf(buf, "%u\n", tunables->io_is_busy);
968 }
969
970 static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
971                 const char *buf, size_t count)
972 {
973         int ret;
974         unsigned long val;
975
976         ret = kstrtoul(buf, 0, &val);
977         if (ret < 0)
978                 return ret;
979         tunables->io_is_busy = val;
980         return count;
981 }
982
983 /*
984  * Create show/store routines
985  * - sys: One governor instance for complete SYSTEM
986  * - pol: One governor instance per struct cpufreq_policy
987  */
988 #define show_gov_pol_sys(file_name)                                     \
989 static ssize_t show_##file_name##_gov_sys                               \
990 (struct kobject *kobj, struct attribute *attr, char *buf)               \
991 {                                                                       \
992         return show_##file_name(common_tunables, buf);                  \
993 }                                                                       \
994                                                                         \
995 static ssize_t show_##file_name##_gov_pol                               \
996 (struct cpufreq_policy *policy, char *buf)                              \
997 {                                                                       \
998         return show_##file_name(policy->governor_data, buf);            \
999 }
1000
1001 #define store_gov_pol_sys(file_name)                                    \
1002 static ssize_t store_##file_name##_gov_sys                              \
1003 (struct kobject *kobj, struct attribute *attr, const char *buf,         \
1004         size_t count)                                                   \
1005 {                                                                       \
1006         return store_##file_name(common_tunables, buf, count);          \
1007 }                                                                       \
1008                                                                         \
1009 static ssize_t store_##file_name##_gov_pol                              \
1010 (struct cpufreq_policy *policy, const char *buf, size_t count)          \
1011 {                                                                       \
1012         return store_##file_name(policy->governor_data, buf, count);    \
1013 }
1014
1015 #define show_store_gov_pol_sys(file_name)                               \
1016 show_gov_pol_sys(file_name);                                            \
1017 store_gov_pol_sys(file_name)
1018
1019 show_store_gov_pol_sys(target_loads);
1020 show_store_gov_pol_sys(above_hispeed_delay);
1021 show_store_gov_pol_sys(hispeed_freq);
1022 show_store_gov_pol_sys(go_hispeed_load);
1023 show_store_gov_pol_sys(min_sample_time);
1024 show_store_gov_pol_sys(timer_rate);
1025 show_store_gov_pol_sys(timer_slack);
1026 show_store_gov_pol_sys(boost);
1027 store_gov_pol_sys(boostpulse);
1028 show_store_gov_pol_sys(boostpulse_duration);
1029 show_store_gov_pol_sys(io_is_busy);
1030
1031 #define gov_sys_attr_rw(_name)                                          \
1032 static struct global_attr _name##_gov_sys =                             \
1033 __ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1034
1035 #define gov_pol_attr_rw(_name)                                          \
1036 static struct freq_attr _name##_gov_pol =                               \
1037 __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1038
1039 #define gov_sys_pol_attr_rw(_name)                                      \
1040         gov_sys_attr_rw(_name);                                         \
1041         gov_pol_attr_rw(_name)
1042
1043 gov_sys_pol_attr_rw(target_loads);
1044 gov_sys_pol_attr_rw(above_hispeed_delay);
1045 gov_sys_pol_attr_rw(hispeed_freq);
1046 gov_sys_pol_attr_rw(go_hispeed_load);
1047 gov_sys_pol_attr_rw(min_sample_time);
1048 gov_sys_pol_attr_rw(timer_rate);
1049 gov_sys_pol_attr_rw(timer_slack);
1050 gov_sys_pol_attr_rw(boost);
1051 gov_sys_pol_attr_rw(boostpulse_duration);
1052 gov_sys_pol_attr_rw(io_is_busy);
1053
1054 static struct global_attr boostpulse_gov_sys =
1055         __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1056
1057 static struct freq_attr boostpulse_gov_pol =
1058         __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1059
1060 /* One Governor instance for entire system */
1061 static struct attribute *interactive_attributes_gov_sys[] = {
1062         &target_loads_gov_sys.attr,
1063         &above_hispeed_delay_gov_sys.attr,
1064         &hispeed_freq_gov_sys.attr,
1065         &go_hispeed_load_gov_sys.attr,
1066         &min_sample_time_gov_sys.attr,
1067         &timer_rate_gov_sys.attr,
1068         &timer_slack_gov_sys.attr,
1069         &boost_gov_sys.attr,
1070         &boostpulse_gov_sys.attr,
1071         &boostpulse_duration_gov_sys.attr,
1072         &io_is_busy_gov_sys.attr,
1073         NULL,
1074 };
1075
1076 static struct attribute_group interactive_attr_group_gov_sys = {
1077         .attrs = interactive_attributes_gov_sys,
1078         .name = "interactive",
1079 };
1080
1081 /* Per policy governor instance */
1082 static struct attribute *interactive_attributes_gov_pol[] = {
1083         &target_loads_gov_pol.attr,
1084         &above_hispeed_delay_gov_pol.attr,
1085         &hispeed_freq_gov_pol.attr,
1086         &go_hispeed_load_gov_pol.attr,
1087         &min_sample_time_gov_pol.attr,
1088         &timer_rate_gov_pol.attr,
1089         &timer_slack_gov_pol.attr,
1090         &boost_gov_pol.attr,
1091         &boostpulse_gov_pol.attr,
1092         &boostpulse_duration_gov_pol.attr,
1093         &io_is_busy_gov_pol.attr,
1094         NULL,
1095 };
1096
1097 static struct attribute_group interactive_attr_group_gov_pol = {
1098         .attrs = interactive_attributes_gov_pol,
1099         .name = "interactive",
1100 };
1101
1102 static struct attribute_group *get_sysfs_attr(void)
1103 {
1104         if (have_governor_per_policy())
1105                 return &interactive_attr_group_gov_pol;
1106         else
1107                 return &interactive_attr_group_gov_sys;
1108 }
1109
1110 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1111                                              unsigned long val,
1112                                              void *data)
1113 {
1114         switch (val) {
1115         case IDLE_START:
1116                 cpufreq_interactive_idle_start();
1117                 break;
1118         case IDLE_END:
1119                 cpufreq_interactive_idle_end();
1120                 break;
1121         }
1122
1123         return 0;
1124 }
1125
1126 static struct notifier_block cpufreq_interactive_idle_nb = {
1127         .notifier_call = cpufreq_interactive_idle_notifier,
1128 };
1129
1130 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1131                 unsigned int event)
1132 {
1133         int rc;
1134         unsigned int j;
1135         struct cpufreq_interactive_cpuinfo *pcpu;
1136         struct cpufreq_frequency_table *freq_table;
1137         struct cpufreq_interactive_tunables *tunables;
1138         unsigned long flags;
1139
1140         if (have_governor_per_policy())
1141                 tunables = policy->governor_data;
1142         else
1143                 tunables = common_tunables;
1144
1145         WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
1146
1147         switch (event) {
1148         case CPUFREQ_GOV_POLICY_INIT:
1149                 if (have_governor_per_policy()) {
1150                         WARN_ON(tunables);
1151                 } else if (tunables) {
1152                         tunables->usage_count++;
1153                         policy->governor_data = tunables;
1154                         return 0;
1155                 }
1156
1157                 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1158                 if (!tunables) {
1159                         pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
1160                         return -ENOMEM;
1161                 }
1162
1163                 tunables->usage_count = 1;
1164                 tunables->above_hispeed_delay = default_above_hispeed_delay;
1165                 tunables->nabove_hispeed_delay =
1166                         ARRAY_SIZE(default_above_hispeed_delay);
1167                 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1168                 tunables->target_loads = default_target_loads;
1169                 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1170                 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1171                 tunables->timer_rate = DEFAULT_TIMER_RATE;
1172                 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1173                 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1174
1175                 spin_lock_init(&tunables->target_loads_lock);
1176                 spin_lock_init(&tunables->above_hispeed_delay_lock);
1177
1178                 policy->governor_data = tunables;
1179                 if (!have_governor_per_policy())
1180                         common_tunables = tunables;
1181
1182                 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1183                                 get_sysfs_attr());
1184                 if (rc) {
1185                         kfree(tunables);
1186                         policy->governor_data = NULL;
1187                         if (!have_governor_per_policy())
1188                                 common_tunables = NULL;
1189                         return rc;
1190                 }
1191
1192                 if (!policy->governor->initialized) {
1193                         idle_notifier_register(&cpufreq_interactive_idle_nb);
1194                         cpufreq_register_notifier(&cpufreq_notifier_block,
1195                                         CPUFREQ_TRANSITION_NOTIFIER);
1196                 }
1197
1198                 break;
1199
1200         case CPUFREQ_GOV_POLICY_EXIT:
1201                 if (!--tunables->usage_count) {
1202                         if (policy->governor->initialized == 1) {
1203                                 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1204                                                 CPUFREQ_TRANSITION_NOTIFIER);
1205                                 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1206                         }
1207
1208                         sysfs_remove_group(get_governor_parent_kobj(policy),
1209                                         get_sysfs_attr());
1210                         kfree(tunables);
1211                         common_tunables = NULL;
1212                 }
1213
1214                 policy->governor_data = NULL;
1215                 break;
1216
1217         case CPUFREQ_GOV_START:
1218                 mutex_lock(&gov_lock);
1219
1220                 freq_table = cpufreq_frequency_get_table(policy->cpu);
1221                 if (!tunables->hispeed_freq)
1222                         tunables->hispeed_freq = policy->max;
1223
1224                 for_each_cpu(j, policy->cpus) {
1225                         pcpu = &per_cpu(cpuinfo, j);
1226                         pcpu->policy = policy;
1227                         pcpu->target_freq = policy->cur;
1228                         pcpu->freq_table = freq_table;
1229                         pcpu->floor_freq = pcpu->target_freq;
1230                         pcpu->floor_validate_time =
1231                                 ktime_to_us(ktime_get());
1232                         pcpu->hispeed_validate_time =
1233                                 pcpu->floor_validate_time;
1234                         pcpu->max_freq = policy->max;
1235                         down_write(&pcpu->enable_sem);
1236                         del_timer_sync(&pcpu->cpu_timer);
1237                         del_timer_sync(&pcpu->cpu_slack_timer);
1238                         cpufreq_interactive_timer_start(tunables, j);
1239                         pcpu->governor_enabled = 1;
1240                         up_write(&pcpu->enable_sem);
1241                 }
1242
1243                 mutex_unlock(&gov_lock);
1244                 break;
1245
1246         case CPUFREQ_GOV_STOP:
1247                 mutex_lock(&gov_lock);
1248                 for_each_cpu(j, policy->cpus) {
1249                         pcpu = &per_cpu(cpuinfo, j);
1250                         down_write(&pcpu->enable_sem);
1251                         pcpu->governor_enabled = 0;
1252                         del_timer_sync(&pcpu->cpu_timer);
1253                         del_timer_sync(&pcpu->cpu_slack_timer);
1254                         up_write(&pcpu->enable_sem);
1255                 }
1256
1257                 mutex_unlock(&gov_lock);
1258                 break;
1259
1260         case CPUFREQ_GOV_LIMITS:
1261                 if (policy->max < policy->cur)
1262                         __cpufreq_driver_target(policy,
1263                                         policy->max, CPUFREQ_RELATION_H);
1264                 else if (policy->min > policy->cur)
1265                         __cpufreq_driver_target(policy,
1266                                         policy->min, CPUFREQ_RELATION_L);
1267                 for_each_cpu(j, policy->cpus) {
1268                         pcpu = &per_cpu(cpuinfo, j);
1269
1270                         down_read(&pcpu->enable_sem);
1271                         if (pcpu->governor_enabled == 0) {
1272                                 up_read(&pcpu->enable_sem);
1273                                 continue;
1274                         }
1275
1276                         spin_lock_irqsave(&pcpu->target_freq_lock, flags);
1277                         if (policy->max < pcpu->target_freq)
1278                                 pcpu->target_freq = policy->max;
1279                         else if (policy->min > pcpu->target_freq)
1280                                 pcpu->target_freq = policy->min;
1281
1282                         spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
1283                         up_read(&pcpu->enable_sem);
1284
1285                         /* Reschedule timer only if policy->max is raised.
1286                          * Delete the timers, else the timer callback may
1287                          * return without re-arm the timer when failed
1288                          * acquire the semaphore. This race may cause timer
1289                          * stopped unexpectedly.
1290                          */
1291
1292                         if (policy->max > pcpu->max_freq) {
1293                                 down_write(&pcpu->enable_sem);
1294                                 del_timer_sync(&pcpu->cpu_timer);
1295                                 del_timer_sync(&pcpu->cpu_slack_timer);
1296                                 cpufreq_interactive_timer_start(tunables, j);
1297                                 up_write(&pcpu->enable_sem);
1298                         }
1299
1300                         pcpu->max_freq = policy->max;
1301                 }
1302                 break;
1303         }
1304         return 0;
1305 }
1306
1307 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1308 static
1309 #endif
1310 struct cpufreq_governor cpufreq_gov_interactive = {
1311         .name = "interactive",
1312         .governor = cpufreq_governor_interactive,
1313         .max_transition_latency = 10000000,
1314         .owner = THIS_MODULE,
1315 };
1316
1317 static void cpufreq_interactive_nop_timer(unsigned long data)
1318 {
1319 }
1320
1321 static int __init cpufreq_interactive_init(void)
1322 {
1323         unsigned int i;
1324         struct cpufreq_interactive_cpuinfo *pcpu;
1325         struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1326
1327         /* Initalize per-cpu timers */
1328         for_each_possible_cpu(i) {
1329                 pcpu = &per_cpu(cpuinfo, i);
1330                 init_timer_deferrable(&pcpu->cpu_timer);
1331                 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1332                 pcpu->cpu_timer.data = i;
1333                 init_timer(&pcpu->cpu_slack_timer);
1334                 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
1335                 spin_lock_init(&pcpu->load_lock);
1336                 spin_lock_init(&pcpu->target_freq_lock);
1337                 init_rwsem(&pcpu->enable_sem);
1338         }
1339
1340         spin_lock_init(&speedchange_cpumask_lock);
1341         mutex_init(&gov_lock);
1342         speedchange_task =
1343                 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1344                                "cfinteractive");
1345         if (IS_ERR(speedchange_task))
1346                 return PTR_ERR(speedchange_task);
1347
1348         sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1349         get_task_struct(speedchange_task);
1350
1351         /* NB: wake up so the thread does not look hung to the freezer */
1352         wake_up_process(speedchange_task);
1353
1354         return cpufreq_register_governor(&cpufreq_gov_interactive);
1355 }
1356
1357 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1358 fs_initcall(cpufreq_interactive_init);
1359 #else
1360 module_init(cpufreq_interactive_init);
1361 #endif
1362
1363 static void __exit cpufreq_interactive_exit(void)
1364 {
1365         cpufreq_unregister_governor(&cpufreq_gov_interactive);
1366         kthread_stop(speedchange_task);
1367         put_task_struct(speedchange_task);
1368 }
1369
1370 module_exit(cpufreq_interactive_exit);
1371
1372 MODULE_AUTHOR("Mike Chan <mike@android.com>");
1373 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1374         "Latency sensitive workloads");
1375 MODULE_LICENSE("GPL");