cpufreq: interactive: hold reference on global cpufreq kobject if needed
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_interactive.c
1 /*
2  * drivers/cpufreq/cpufreq_interactive.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * Author: Mike Chan (mike@android.com)
16  *
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/rwsem.h>
25 #include <linux/sched.h>
26 #include <linux/sched/rt.h>
27 #include <linux/tick.h>
28 #include <linux/time.h>
29 #include <linux/timer.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/slab.h>
33
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/cpufreq_interactive.h>
36
37 struct cpufreq_interactive_cpuinfo {
38         struct timer_list cpu_timer;
39         struct timer_list cpu_slack_timer;
40         spinlock_t load_lock; /* protects the next 4 fields */
41         u64 time_in_idle;
42         u64 time_in_idle_timestamp;
43         u64 cputime_speedadj;
44         u64 cputime_speedadj_timestamp;
45         struct cpufreq_policy *policy;
46         struct cpufreq_frequency_table *freq_table;
47         unsigned int target_freq;
48         unsigned int floor_freq;
49         u64 floor_validate_time;
50         u64 hispeed_validate_time;
51         struct rw_semaphore enable_sem;
52         int governor_enabled;
53 };
54
55 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
56
57 /* realtime thread handles frequency scaling */
58 static struct task_struct *speedchange_task;
59 static cpumask_t speedchange_cpumask;
60 static spinlock_t speedchange_cpumask_lock;
61 static struct mutex gov_lock;
62
63 /* Target load.  Lower values result in higher CPU speeds. */
64 #define DEFAULT_TARGET_LOAD 90
65 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
66
67 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
68 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
69 static unsigned int default_above_hispeed_delay[] = {
70         DEFAULT_ABOVE_HISPEED_DELAY };
71
72 struct cpufreq_interactive_tunables {
73         int usage_count;
74         /* Hi speed to bump to from lo speed when load burst (default max) */
75         unsigned int hispeed_freq;
76         /* Go to hi speed when CPU load at or above this value. */
77 #define DEFAULT_GO_HISPEED_LOAD 99
78         unsigned long go_hispeed_load;
79         /* Target load. Lower values result in higher CPU speeds. */
80         spinlock_t target_loads_lock;
81         unsigned int *target_loads;
82         int ntarget_loads;
83         /*
84          * The minimum amount of time to spend at a frequency before we can ramp
85          * down.
86          */
87 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
88         unsigned long min_sample_time;
89         /*
90          * The sample rate of the timer used to increase frequency
91          */
92         unsigned long timer_rate;
93         /*
94          * Wait this long before raising speed above hispeed, by default a
95          * single timer interval.
96          */
97         spinlock_t above_hispeed_delay_lock;
98         unsigned int *above_hispeed_delay;
99         int nabove_hispeed_delay;
100         /* Non-zero means indefinite speed boost active */
101         int boost_val;
102         /* Duration of a boot pulse in usecs */
103         int boostpulse_duration_val;
104         /* End time of boost pulse in ktime converted to usecs */
105         u64 boostpulse_endtime;
106         /*
107          * Max additional time to wait in idle, beyond timer_rate, at speeds
108          * above minimum before wakeup to reduce speed, or -1 if unnecessary.
109          */
110 #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
111         int timer_slack_val;
112         bool io_is_busy;
113 };
114
115 /* For cases where we have single governor instance for system */
116 struct cpufreq_interactive_tunables *common_tunables;
117
118 static struct attribute_group *get_sysfs_attr(void);
119
120 static void cpufreq_interactive_timer_resched(
121         struct cpufreq_interactive_cpuinfo *pcpu)
122 {
123         struct cpufreq_interactive_tunables *tunables =
124                 pcpu->policy->governor_data;
125         unsigned long expires;
126         unsigned long flags;
127
128         spin_lock_irqsave(&pcpu->load_lock, flags);
129         pcpu->time_in_idle =
130                 get_cpu_idle_time(smp_processor_id(),
131                                   &pcpu->time_in_idle_timestamp,
132                                   tunables->io_is_busy);
133         pcpu->cputime_speedadj = 0;
134         pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
135         expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
136         mod_timer_pinned(&pcpu->cpu_timer, expires);
137
138         if (tunables->timer_slack_val >= 0 &&
139             pcpu->target_freq > pcpu->policy->min) {
140                 expires += usecs_to_jiffies(tunables->timer_slack_val);
141                 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
142         }
143
144         spin_unlock_irqrestore(&pcpu->load_lock, flags);
145 }
146
147 /* The caller shall take enable_sem write semaphore to avoid any timer race.
148  * The cpu_timer and cpu_slack_timer must be deactivated when calling this
149  * function.
150  */
151 static void cpufreq_interactive_timer_start(
152         struct cpufreq_interactive_tunables *tunables, int cpu)
153 {
154         struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
155         unsigned long expires = jiffies +
156                 usecs_to_jiffies(tunables->timer_rate);
157         unsigned long flags;
158
159         pcpu->cpu_timer.expires = expires;
160         add_timer_on(&pcpu->cpu_timer, cpu);
161         if (tunables->timer_slack_val >= 0 &&
162             pcpu->target_freq > pcpu->policy->min) {
163                 expires += usecs_to_jiffies(tunables->timer_slack_val);
164                 pcpu->cpu_slack_timer.expires = expires;
165                 add_timer_on(&pcpu->cpu_slack_timer, cpu);
166         }
167
168         spin_lock_irqsave(&pcpu->load_lock, flags);
169         pcpu->time_in_idle =
170                 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
171                                   tunables->io_is_busy);
172         pcpu->cputime_speedadj = 0;
173         pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
174         spin_unlock_irqrestore(&pcpu->load_lock, flags);
175 }
176
177 static unsigned int freq_to_above_hispeed_delay(
178         struct cpufreq_interactive_tunables *tunables,
179         unsigned int freq)
180 {
181         int i;
182         unsigned int ret;
183         unsigned long flags;
184
185         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
186
187         for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
188                         freq >= tunables->above_hispeed_delay[i+1]; i += 2)
189                 ;
190
191         ret = tunables->above_hispeed_delay[i];
192         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
193         return ret;
194 }
195
196 static unsigned int freq_to_targetload(
197         struct cpufreq_interactive_tunables *tunables, unsigned int freq)
198 {
199         int i;
200         unsigned int ret;
201         unsigned long flags;
202
203         spin_lock_irqsave(&tunables->target_loads_lock, flags);
204
205         for (i = 0; i < tunables->ntarget_loads - 1 &&
206                     freq >= tunables->target_loads[i+1]; i += 2)
207                 ;
208
209         ret = tunables->target_loads[i];
210         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
211         return ret;
212 }
213
214 /*
215  * If increasing frequencies never map to a lower target load then
216  * choose_freq() will find the minimum frequency that does not exceed its
217  * target load given the current load.
218  */
219 static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
220                 unsigned int loadadjfreq)
221 {
222         unsigned int freq = pcpu->policy->cur;
223         unsigned int prevfreq, freqmin, freqmax;
224         unsigned int tl;
225         int index;
226
227         freqmin = 0;
228         freqmax = UINT_MAX;
229
230         do {
231                 prevfreq = freq;
232                 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
233
234                 /*
235                  * Find the lowest frequency where the computed load is less
236                  * than or equal to the target load.
237                  */
238
239                 if (cpufreq_frequency_table_target(
240                             pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
241                             CPUFREQ_RELATION_L, &index))
242                         break;
243                 freq = pcpu->freq_table[index].frequency;
244
245                 if (freq > prevfreq) {
246                         /* The previous frequency is too low. */
247                         freqmin = prevfreq;
248
249                         if (freq >= freqmax) {
250                                 /*
251                                  * Find the highest frequency that is less
252                                  * than freqmax.
253                                  */
254                                 if (cpufreq_frequency_table_target(
255                                             pcpu->policy, pcpu->freq_table,
256                                             freqmax - 1, CPUFREQ_RELATION_H,
257                                             &index))
258                                         break;
259                                 freq = pcpu->freq_table[index].frequency;
260
261                                 if (freq == freqmin) {
262                                         /*
263                                          * The first frequency below freqmax
264                                          * has already been found to be too
265                                          * low.  freqmax is the lowest speed
266                                          * we found that is fast enough.
267                                          */
268                                         freq = freqmax;
269                                         break;
270                                 }
271                         }
272                 } else if (freq < prevfreq) {
273                         /* The previous frequency is high enough. */
274                         freqmax = prevfreq;
275
276                         if (freq <= freqmin) {
277                                 /*
278                                  * Find the lowest frequency that is higher
279                                  * than freqmin.
280                                  */
281                                 if (cpufreq_frequency_table_target(
282                                             pcpu->policy, pcpu->freq_table,
283                                             freqmin + 1, CPUFREQ_RELATION_L,
284                                             &index))
285                                         break;
286                                 freq = pcpu->freq_table[index].frequency;
287
288                                 /*
289                                  * If freqmax is the first frequency above
290                                  * freqmin then we have already found that
291                                  * this speed is fast enough.
292                                  */
293                                 if (freq == freqmax)
294                                         break;
295                         }
296                 }
297
298                 /* If same frequency chosen as previous then done. */
299         } while (freq != prevfreq);
300
301         return freq;
302 }
303
304 static u64 update_load(int cpu)
305 {
306         struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
307         struct cpufreq_interactive_tunables *tunables =
308                 pcpu->policy->governor_data;
309         u64 now;
310         u64 now_idle;
311         unsigned int delta_idle;
312         unsigned int delta_time;
313         u64 active_time;
314
315         now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
316         delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
317         delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
318
319         if (delta_time <= delta_idle)
320                 active_time = 0;
321         else
322                 active_time = delta_time - delta_idle;
323
324         pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
325
326         pcpu->time_in_idle = now_idle;
327         pcpu->time_in_idle_timestamp = now;
328         return now;
329 }
330
331 static void cpufreq_interactive_timer(unsigned long data)
332 {
333         u64 now;
334         unsigned int delta_time;
335         u64 cputime_speedadj;
336         int cpu_load;
337         struct cpufreq_interactive_cpuinfo *pcpu =
338                 &per_cpu(cpuinfo, data);
339         struct cpufreq_interactive_tunables *tunables =
340                 pcpu->policy->governor_data;
341         unsigned int new_freq;
342         unsigned int loadadjfreq;
343         unsigned int index;
344         unsigned long flags;
345         bool boosted;
346
347         if (!down_read_trylock(&pcpu->enable_sem))
348                 return;
349         if (!pcpu->governor_enabled)
350                 goto exit;
351
352         spin_lock_irqsave(&pcpu->load_lock, flags);
353         now = update_load(data);
354         delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
355         cputime_speedadj = pcpu->cputime_speedadj;
356         spin_unlock_irqrestore(&pcpu->load_lock, flags);
357
358         if (WARN_ON_ONCE(!delta_time))
359                 goto rearm;
360
361         do_div(cputime_speedadj, delta_time);
362         loadadjfreq = (unsigned int)cputime_speedadj * 100;
363         cpu_load = loadadjfreq / pcpu->target_freq;
364         boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
365
366         if (cpu_load >= tunables->go_hispeed_load || boosted) {
367                 if (pcpu->target_freq < tunables->hispeed_freq) {
368                         new_freq = tunables->hispeed_freq;
369                 } else {
370                         new_freq = choose_freq(pcpu, loadadjfreq);
371
372                         if (new_freq < tunables->hispeed_freq)
373                                 new_freq = tunables->hispeed_freq;
374                 }
375         } else {
376                 new_freq = choose_freq(pcpu, loadadjfreq);
377         }
378
379         if (pcpu->target_freq >= tunables->hispeed_freq &&
380             new_freq > pcpu->target_freq &&
381             now - pcpu->hispeed_validate_time <
382             freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
383                 trace_cpufreq_interactive_notyet(
384                         data, cpu_load, pcpu->target_freq,
385                         pcpu->policy->cur, new_freq);
386                 goto rearm;
387         }
388
389         pcpu->hispeed_validate_time = now;
390
391         if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
392                                            new_freq, CPUFREQ_RELATION_L,
393                                            &index))
394                 goto rearm;
395
396         new_freq = pcpu->freq_table[index].frequency;
397
398         /*
399          * Do not scale below floor_freq unless we have been at or above the
400          * floor frequency for the minimum sample time since last validated.
401          */
402         if (new_freq < pcpu->floor_freq) {
403                 if (now - pcpu->floor_validate_time <
404                                 tunables->min_sample_time) {
405                         trace_cpufreq_interactive_notyet(
406                                 data, cpu_load, pcpu->target_freq,
407                                 pcpu->policy->cur, new_freq);
408                         goto rearm;
409                 }
410         }
411
412         /*
413          * Update the timestamp for checking whether speed has been held at
414          * or above the selected frequency for a minimum of min_sample_time,
415          * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
416          * allow the speed to drop as soon as the boostpulse duration expires
417          * (or the indefinite boost is turned off).
418          */
419
420         if (!boosted || new_freq > tunables->hispeed_freq) {
421                 pcpu->floor_freq = new_freq;
422                 pcpu->floor_validate_time = now;
423         }
424
425         if (pcpu->target_freq == new_freq) {
426                 trace_cpufreq_interactive_already(
427                         data, cpu_load, pcpu->target_freq,
428                         pcpu->policy->cur, new_freq);
429                 goto rearm_if_notmax;
430         }
431
432         trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
433                                          pcpu->policy->cur, new_freq);
434
435         pcpu->target_freq = new_freq;
436         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
437         cpumask_set_cpu(data, &speedchange_cpumask);
438         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
439         wake_up_process(speedchange_task);
440
441 rearm_if_notmax:
442         /*
443          * Already set max speed and don't see a need to change that,
444          * wait until next idle to re-evaluate, don't need timer.
445          */
446         if (pcpu->target_freq == pcpu->policy->max)
447                 goto exit;
448
449 rearm:
450         if (!timer_pending(&pcpu->cpu_timer))
451                 cpufreq_interactive_timer_resched(pcpu);
452
453 exit:
454         up_read(&pcpu->enable_sem);
455         return;
456 }
457
458 static void cpufreq_interactive_idle_start(void)
459 {
460         struct cpufreq_interactive_cpuinfo *pcpu =
461                 &per_cpu(cpuinfo, smp_processor_id());
462         int pending;
463
464         if (!down_read_trylock(&pcpu->enable_sem))
465                 return;
466         if (!pcpu->governor_enabled) {
467                 up_read(&pcpu->enable_sem);
468                 return;
469         }
470
471         pending = timer_pending(&pcpu->cpu_timer);
472
473         if (pcpu->target_freq != pcpu->policy->min) {
474                 /*
475                  * Entering idle while not at lowest speed.  On some
476                  * platforms this can hold the other CPU(s) at that speed
477                  * even though the CPU is idle. Set a timer to re-evaluate
478                  * speed so this idle CPU doesn't hold the other CPUs above
479                  * min indefinitely.  This should probably be a quirk of
480                  * the CPUFreq driver.
481                  */
482                 if (!pending)
483                         cpufreq_interactive_timer_resched(pcpu);
484         }
485
486         up_read(&pcpu->enable_sem);
487 }
488
489 static void cpufreq_interactive_idle_end(void)
490 {
491         struct cpufreq_interactive_cpuinfo *pcpu =
492                 &per_cpu(cpuinfo, smp_processor_id());
493
494         if (!down_read_trylock(&pcpu->enable_sem))
495                 return;
496         if (!pcpu->governor_enabled) {
497                 up_read(&pcpu->enable_sem);
498                 return;
499         }
500
501         /* Arm the timer for 1-2 ticks later if not already. */
502         if (!timer_pending(&pcpu->cpu_timer)) {
503                 cpufreq_interactive_timer_resched(pcpu);
504         } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
505                 del_timer(&pcpu->cpu_timer);
506                 del_timer(&pcpu->cpu_slack_timer);
507                 cpufreq_interactive_timer(smp_processor_id());
508         }
509
510         up_read(&pcpu->enable_sem);
511 }
512
513 static int cpufreq_interactive_speedchange_task(void *data)
514 {
515         unsigned int cpu;
516         cpumask_t tmp_mask;
517         unsigned long flags;
518         struct cpufreq_interactive_cpuinfo *pcpu;
519
520         while (1) {
521                 set_current_state(TASK_INTERRUPTIBLE);
522                 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
523
524                 if (cpumask_empty(&speedchange_cpumask)) {
525                         spin_unlock_irqrestore(&speedchange_cpumask_lock,
526                                                flags);
527                         schedule();
528
529                         if (kthread_should_stop())
530                                 break;
531
532                         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
533                 }
534
535                 set_current_state(TASK_RUNNING);
536                 tmp_mask = speedchange_cpumask;
537                 cpumask_clear(&speedchange_cpumask);
538                 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
539
540                 for_each_cpu(cpu, &tmp_mask) {
541                         unsigned int j;
542                         unsigned int max_freq = 0;
543
544                         pcpu = &per_cpu(cpuinfo, cpu);
545                         if (!down_read_trylock(&pcpu->enable_sem))
546                                 continue;
547                         if (!pcpu->governor_enabled) {
548                                 up_read(&pcpu->enable_sem);
549                                 continue;
550                         }
551
552                         for_each_cpu(j, pcpu->policy->cpus) {
553                                 struct cpufreq_interactive_cpuinfo *pjcpu =
554                                         &per_cpu(cpuinfo, j);
555
556                                 if (pjcpu->target_freq > max_freq)
557                                         max_freq = pjcpu->target_freq;
558                         }
559
560                         if (max_freq != pcpu->policy->cur)
561                                 __cpufreq_driver_target(pcpu->policy,
562                                                         max_freq,
563                                                         CPUFREQ_RELATION_H);
564                         trace_cpufreq_interactive_setspeed(cpu,
565                                                      pcpu->target_freq,
566                                                      pcpu->policy->cur);
567
568                         up_read(&pcpu->enable_sem);
569                 }
570         }
571
572         return 0;
573 }
574
575 static void cpufreq_interactive_boost(void)
576 {
577         int i;
578         int anyboost = 0;
579         unsigned long flags;
580         struct cpufreq_interactive_cpuinfo *pcpu;
581         struct cpufreq_interactive_tunables *tunables;
582
583         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
584
585         for_each_online_cpu(i) {
586                 pcpu = &per_cpu(cpuinfo, i);
587                 tunables = pcpu->policy->governor_data;
588
589                 if (pcpu->target_freq < tunables->hispeed_freq) {
590                         pcpu->target_freq = tunables->hispeed_freq;
591                         cpumask_set_cpu(i, &speedchange_cpumask);
592                         pcpu->hispeed_validate_time =
593                                 ktime_to_us(ktime_get());
594                         anyboost = 1;
595                 }
596
597                 /*
598                  * Set floor freq and (re)start timer for when last
599                  * validated.
600                  */
601
602                 pcpu->floor_freq = tunables->hispeed_freq;
603                 pcpu->floor_validate_time = ktime_to_us(ktime_get());
604         }
605
606         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
607
608         if (anyboost)
609                 wake_up_process(speedchange_task);
610 }
611
612 static int cpufreq_interactive_notifier(
613         struct notifier_block *nb, unsigned long val, void *data)
614 {
615         struct cpufreq_freqs *freq = data;
616         struct cpufreq_interactive_cpuinfo *pcpu;
617         int cpu;
618         unsigned long flags;
619
620         if (val == CPUFREQ_POSTCHANGE) {
621                 pcpu = &per_cpu(cpuinfo, freq->cpu);
622                 if (!down_read_trylock(&pcpu->enable_sem))
623                         return 0;
624                 if (!pcpu->governor_enabled) {
625                         up_read(&pcpu->enable_sem);
626                         return 0;
627                 }
628
629                 for_each_cpu(cpu, pcpu->policy->cpus) {
630                         struct cpufreq_interactive_cpuinfo *pjcpu =
631                                 &per_cpu(cpuinfo, cpu);
632                         if (cpu != freq->cpu) {
633                                 if (!down_read_trylock(&pjcpu->enable_sem))
634                                         continue;
635                                 if (!pjcpu->governor_enabled) {
636                                         up_read(&pjcpu->enable_sem);
637                                         continue;
638                                 }
639                         }
640                         spin_lock_irqsave(&pjcpu->load_lock, flags);
641                         update_load(cpu);
642                         spin_unlock_irqrestore(&pjcpu->load_lock, flags);
643                         if (cpu != freq->cpu)
644                                 up_read(&pjcpu->enable_sem);
645                 }
646
647                 up_read(&pcpu->enable_sem);
648         }
649         return 0;
650 }
651
652 static struct notifier_block cpufreq_notifier_block = {
653         .notifier_call = cpufreq_interactive_notifier,
654 };
655
656 static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
657 {
658         const char *cp;
659         int i;
660         int ntokens = 1;
661         unsigned int *tokenized_data;
662         int err = -EINVAL;
663
664         cp = buf;
665         while ((cp = strpbrk(cp + 1, " :")))
666                 ntokens++;
667
668         if (!(ntokens & 0x1))
669                 goto err;
670
671         tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
672         if (!tokenized_data) {
673                 err = -ENOMEM;
674                 goto err;
675         }
676
677         cp = buf;
678         i = 0;
679         while (i < ntokens) {
680                 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
681                         goto err_kfree;
682
683                 cp = strpbrk(cp, " :");
684                 if (!cp)
685                         break;
686                 cp++;
687         }
688
689         if (i != ntokens)
690                 goto err_kfree;
691
692         *num_tokens = ntokens;
693         return tokenized_data;
694
695 err_kfree:
696         kfree(tokenized_data);
697 err:
698         return ERR_PTR(err);
699 }
700
701 static ssize_t show_target_loads(
702         struct cpufreq_interactive_tunables *tunables,
703         char *buf)
704 {
705         int i;
706         ssize_t ret = 0;
707         unsigned long flags;
708
709         spin_lock_irqsave(&tunables->target_loads_lock, flags);
710
711         for (i = 0; i < tunables->ntarget_loads; i++)
712                 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
713                                i & 0x1 ? ":" : " ");
714
715         sprintf(buf + ret - 1, "\n");
716         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
717         return ret;
718 }
719
720 static ssize_t store_target_loads(
721         struct cpufreq_interactive_tunables *tunables,
722         const char *buf, size_t count)
723 {
724         int ntokens;
725         unsigned int *new_target_loads = NULL;
726         unsigned long flags;
727
728         new_target_loads = get_tokenized_data(buf, &ntokens);
729         if (IS_ERR(new_target_loads))
730                 return PTR_RET(new_target_loads);
731
732         spin_lock_irqsave(&tunables->target_loads_lock, flags);
733         if (tunables->target_loads != default_target_loads)
734                 kfree(tunables->target_loads);
735         tunables->target_loads = new_target_loads;
736         tunables->ntarget_loads = ntokens;
737         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
738         return count;
739 }
740
741 static ssize_t show_above_hispeed_delay(
742         struct cpufreq_interactive_tunables *tunables, char *buf)
743 {
744         int i;
745         ssize_t ret = 0;
746         unsigned long flags;
747
748         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
749
750         for (i = 0; i < tunables->nabove_hispeed_delay; i++)
751                 ret += sprintf(buf + ret, "%u%s",
752                                tunables->above_hispeed_delay[i],
753                                i & 0x1 ? ":" : " ");
754
755         sprintf(buf + ret - 1, "\n");
756         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
757         return ret;
758 }
759
760 static ssize_t store_above_hispeed_delay(
761         struct cpufreq_interactive_tunables *tunables,
762         const char *buf, size_t count)
763 {
764         int ntokens;
765         unsigned int *new_above_hispeed_delay = NULL;
766         unsigned long flags;
767
768         new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
769         if (IS_ERR(new_above_hispeed_delay))
770                 return PTR_RET(new_above_hispeed_delay);
771
772         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
773         if (tunables->above_hispeed_delay != default_above_hispeed_delay)
774                 kfree(tunables->above_hispeed_delay);
775         tunables->above_hispeed_delay = new_above_hispeed_delay;
776         tunables->nabove_hispeed_delay = ntokens;
777         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
778         return count;
779
780 }
781
782 static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
783                 char *buf)
784 {
785         return sprintf(buf, "%u\n", tunables->hispeed_freq);
786 }
787
788 static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
789                 const char *buf, size_t count)
790 {
791         int ret;
792         long unsigned int val;
793
794         ret = strict_strtoul(buf, 0, &val);
795         if (ret < 0)
796                 return ret;
797         tunables->hispeed_freq = val;
798         return count;
799 }
800
801 static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
802                 *tunables, char *buf)
803 {
804         return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
805 }
806
807 static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
808                 *tunables, const char *buf, size_t count)
809 {
810         int ret;
811         unsigned long val;
812
813         ret = strict_strtoul(buf, 0, &val);
814         if (ret < 0)
815                 return ret;
816         tunables->go_hispeed_load = val;
817         return count;
818 }
819
820 static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
821                 *tunables, char *buf)
822 {
823         return sprintf(buf, "%lu\n", tunables->min_sample_time);
824 }
825
826 static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
827                 *tunables, const char *buf, size_t count)
828 {
829         int ret;
830         unsigned long val;
831
832         ret = strict_strtoul(buf, 0, &val);
833         if (ret < 0)
834                 return ret;
835         tunables->min_sample_time = val;
836         return count;
837 }
838
839 static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
840                 char *buf)
841 {
842         return sprintf(buf, "%lu\n", tunables->timer_rate);
843 }
844
845 static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
846                 const char *buf, size_t count)
847 {
848         int ret;
849         unsigned long val;
850
851         ret = strict_strtoul(buf, 0, &val);
852         if (ret < 0)
853                 return ret;
854         tunables->timer_rate = val;
855         return count;
856 }
857
858 static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
859                 char *buf)
860 {
861         return sprintf(buf, "%d\n", tunables->timer_slack_val);
862 }
863
864 static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
865                 const char *buf, size_t count)
866 {
867         int ret;
868         unsigned long val;
869
870         ret = kstrtol(buf, 10, &val);
871         if (ret < 0)
872                 return ret;
873
874         tunables->timer_slack_val = val;
875         return count;
876 }
877
878 static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
879                           char *buf)
880 {
881         return sprintf(buf, "%d\n", tunables->boost_val);
882 }
883
884 static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
885                            const char *buf, size_t count)
886 {
887         int ret;
888         unsigned long val;
889
890         ret = kstrtoul(buf, 0, &val);
891         if (ret < 0)
892                 return ret;
893
894         tunables->boost_val = val;
895
896         if (tunables->boost_val) {
897                 trace_cpufreq_interactive_boost("on");
898                 cpufreq_interactive_boost();
899         } else {
900                 trace_cpufreq_interactive_unboost("off");
901         }
902
903         return count;
904 }
905
906 static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
907                                 const char *buf, size_t count)
908 {
909         int ret;
910         unsigned long val;
911
912         ret = kstrtoul(buf, 0, &val);
913         if (ret < 0)
914                 return ret;
915
916         tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
917                 tunables->boostpulse_duration_val;
918         trace_cpufreq_interactive_boost("pulse");
919         cpufreq_interactive_boost();
920         return count;
921 }
922
923 static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
924                 *tunables, char *buf)
925 {
926         return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
927 }
928
929 static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
930                 *tunables, const char *buf, size_t count)
931 {
932         int ret;
933         unsigned long val;
934
935         ret = kstrtoul(buf, 0, &val);
936         if (ret < 0)
937                 return ret;
938
939         tunables->boostpulse_duration_val = val;
940         return count;
941 }
942
943 static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
944                 char *buf)
945 {
946         return sprintf(buf, "%u\n", tunables->io_is_busy);
947 }
948
949 static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
950                 const char *buf, size_t count)
951 {
952         int ret;
953         unsigned long val;
954
955         ret = kstrtoul(buf, 0, &val);
956         if (ret < 0)
957                 return ret;
958         tunables->io_is_busy = val;
959         return count;
960 }
961
962 /*
963  * Create show/store routines
964  * - sys: One governor instance for complete SYSTEM
965  * - pol: One governor instance per struct cpufreq_policy
966  */
967 #define show_gov_pol_sys(file_name)                                     \
968 static ssize_t show_##file_name##_gov_sys                               \
969 (struct kobject *kobj, struct attribute *attr, char *buf)               \
970 {                                                                       \
971         return show_##file_name(common_tunables, buf);                  \
972 }                                                                       \
973                                                                         \
974 static ssize_t show_##file_name##_gov_pol                               \
975 (struct cpufreq_policy *policy, char *buf)                              \
976 {                                                                       \
977         return show_##file_name(policy->governor_data, buf);            \
978 }
979
980 #define store_gov_pol_sys(file_name)                                    \
981 static ssize_t store_##file_name##_gov_sys                              \
982 (struct kobject *kobj, struct attribute *attr, const char *buf,         \
983         size_t count)                                                   \
984 {                                                                       \
985         return store_##file_name(common_tunables, buf, count);          \
986 }                                                                       \
987                                                                         \
988 static ssize_t store_##file_name##_gov_pol                              \
989 (struct cpufreq_policy *policy, const char *buf, size_t count)          \
990 {                                                                       \
991         return store_##file_name(policy->governor_data, buf, count);    \
992 }
993
994 #define show_store_gov_pol_sys(file_name)                               \
995 show_gov_pol_sys(file_name);                                            \
996 store_gov_pol_sys(file_name)
997
998 show_store_gov_pol_sys(target_loads);
999 show_store_gov_pol_sys(above_hispeed_delay);
1000 show_store_gov_pol_sys(hispeed_freq);
1001 show_store_gov_pol_sys(go_hispeed_load);
1002 show_store_gov_pol_sys(min_sample_time);
1003 show_store_gov_pol_sys(timer_rate);
1004 show_store_gov_pol_sys(timer_slack);
1005 show_store_gov_pol_sys(boost);
1006 store_gov_pol_sys(boostpulse);
1007 show_store_gov_pol_sys(boostpulse_duration);
1008 show_store_gov_pol_sys(io_is_busy);
1009
1010 #define gov_sys_attr_rw(_name)                                          \
1011 static struct global_attr _name##_gov_sys =                             \
1012 __ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1013
1014 #define gov_pol_attr_rw(_name)                                          \
1015 static struct freq_attr _name##_gov_pol =                               \
1016 __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1017
1018 #define gov_sys_pol_attr_rw(_name)                                      \
1019         gov_sys_attr_rw(_name);                                         \
1020         gov_pol_attr_rw(_name)
1021
1022 gov_sys_pol_attr_rw(target_loads);
1023 gov_sys_pol_attr_rw(above_hispeed_delay);
1024 gov_sys_pol_attr_rw(hispeed_freq);
1025 gov_sys_pol_attr_rw(go_hispeed_load);
1026 gov_sys_pol_attr_rw(min_sample_time);
1027 gov_sys_pol_attr_rw(timer_rate);
1028 gov_sys_pol_attr_rw(timer_slack);
1029 gov_sys_pol_attr_rw(boost);
1030 gov_sys_pol_attr_rw(boostpulse_duration);
1031 gov_sys_pol_attr_rw(io_is_busy);
1032
1033 static struct global_attr boostpulse_gov_sys =
1034         __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1035
1036 static struct freq_attr boostpulse_gov_pol =
1037         __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1038
1039 /* One Governor instance for entire system */
1040 static struct attribute *interactive_attributes_gov_sys[] = {
1041         &target_loads_gov_sys.attr,
1042         &above_hispeed_delay_gov_sys.attr,
1043         &hispeed_freq_gov_sys.attr,
1044         &go_hispeed_load_gov_sys.attr,
1045         &min_sample_time_gov_sys.attr,
1046         &timer_rate_gov_sys.attr,
1047         &timer_slack_gov_sys.attr,
1048         &boost_gov_sys.attr,
1049         &boostpulse_gov_sys.attr,
1050         &boostpulse_duration_gov_sys.attr,
1051         &io_is_busy_gov_sys.attr,
1052         NULL,
1053 };
1054
1055 static struct attribute_group interactive_attr_group_gov_sys = {
1056         .attrs = interactive_attributes_gov_sys,
1057         .name = "interactive",
1058 };
1059
1060 /* Per policy governor instance */
1061 static struct attribute *interactive_attributes_gov_pol[] = {
1062         &target_loads_gov_pol.attr,
1063         &above_hispeed_delay_gov_pol.attr,
1064         &hispeed_freq_gov_pol.attr,
1065         &go_hispeed_load_gov_pol.attr,
1066         &min_sample_time_gov_pol.attr,
1067         &timer_rate_gov_pol.attr,
1068         &timer_slack_gov_pol.attr,
1069         &boost_gov_pol.attr,
1070         &boostpulse_gov_pol.attr,
1071         &boostpulse_duration_gov_pol.attr,
1072         &io_is_busy_gov_pol.attr,
1073         NULL,
1074 };
1075
1076 static struct attribute_group interactive_attr_group_gov_pol = {
1077         .attrs = interactive_attributes_gov_pol,
1078         .name = "interactive",
1079 };
1080
1081 static struct attribute_group *get_sysfs_attr(void)
1082 {
1083         if (have_governor_per_policy())
1084                 return &interactive_attr_group_gov_pol;
1085         else
1086                 return &interactive_attr_group_gov_sys;
1087 }
1088
1089 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1090                                              unsigned long val,
1091                                              void *data)
1092 {
1093         switch (val) {
1094         case IDLE_START:
1095                 cpufreq_interactive_idle_start();
1096                 break;
1097         case IDLE_END:
1098                 cpufreq_interactive_idle_end();
1099                 break;
1100         }
1101
1102         return 0;
1103 }
1104
1105 static struct notifier_block cpufreq_interactive_idle_nb = {
1106         .notifier_call = cpufreq_interactive_idle_notifier,
1107 };
1108
1109 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1110                 unsigned int event)
1111 {
1112         int rc;
1113         unsigned int j;
1114         struct cpufreq_interactive_cpuinfo *pcpu;
1115         struct cpufreq_frequency_table *freq_table;
1116         struct cpufreq_interactive_tunables *tunables;
1117
1118         if (have_governor_per_policy())
1119                 tunables = policy->governor_data;
1120         else
1121                 tunables = common_tunables;
1122
1123         WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
1124
1125         switch (event) {
1126         case CPUFREQ_GOV_POLICY_INIT:
1127                 if (have_governor_per_policy()) {
1128                         WARN_ON(tunables);
1129                 } else if (tunables) {
1130                         tunables->usage_count++;
1131                         policy->governor_data = tunables;
1132                         return 0;
1133                 }
1134
1135                 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1136                 if (!tunables) {
1137                         pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
1138                         return -ENOMEM;
1139                 }
1140
1141                 tunables->usage_count = 1;
1142                 tunables->above_hispeed_delay = default_above_hispeed_delay;
1143                 tunables->nabove_hispeed_delay =
1144                         ARRAY_SIZE(default_above_hispeed_delay);
1145                 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1146                 tunables->target_loads = default_target_loads;
1147                 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1148                 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1149                 tunables->timer_rate = DEFAULT_TIMER_RATE;
1150                 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1151                 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1152
1153                 spin_lock_init(&tunables->target_loads_lock);
1154                 spin_lock_init(&tunables->above_hispeed_delay_lock);
1155
1156                 policy->governor_data = tunables;
1157                 if (!have_governor_per_policy()) {
1158                         common_tunables = tunables;
1159                         WARN_ON(cpufreq_get_global_kobject());
1160                 }
1161
1162                 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1163                                 get_sysfs_attr());
1164                 if (rc) {
1165                         kfree(tunables);
1166                         policy->governor_data = NULL;
1167                         if (!have_governor_per_policy())
1168                                 common_tunables = NULL;
1169                         return rc;
1170                 }
1171
1172                 if (!policy->governor->initialized) {
1173                         idle_notifier_register(&cpufreq_interactive_idle_nb);
1174                         cpufreq_register_notifier(&cpufreq_notifier_block,
1175                                         CPUFREQ_TRANSITION_NOTIFIER);
1176                 }
1177
1178                 break;
1179
1180         case CPUFREQ_GOV_POLICY_EXIT:
1181                 if (!--tunables->usage_count) {
1182                         if (policy->governor->initialized == 1) {
1183                                 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1184                                                 CPUFREQ_TRANSITION_NOTIFIER);
1185                                 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1186                         }
1187
1188                         sysfs_remove_group(get_governor_parent_kobj(policy),
1189                                         get_sysfs_attr());
1190
1191                         if (!have_governor_per_policy())
1192                                 cpufreq_put_global_kobject();
1193
1194                         kfree(tunables);
1195                         common_tunables = NULL;
1196                 }
1197
1198                 policy->governor_data = NULL;
1199                 break;
1200
1201         case CPUFREQ_GOV_START:
1202                 mutex_lock(&gov_lock);
1203
1204                 freq_table = cpufreq_frequency_get_table(policy->cpu);
1205                 if (!tunables->hispeed_freq)
1206                         tunables->hispeed_freq = policy->max;
1207
1208                 for_each_cpu(j, policy->cpus) {
1209                         pcpu = &per_cpu(cpuinfo, j);
1210                         pcpu->policy = policy;
1211                         pcpu->target_freq = policy->cur;
1212                         pcpu->freq_table = freq_table;
1213                         pcpu->floor_freq = pcpu->target_freq;
1214                         pcpu->floor_validate_time =
1215                                 ktime_to_us(ktime_get());
1216                         pcpu->hispeed_validate_time =
1217                                 pcpu->floor_validate_time;
1218                         down_write(&pcpu->enable_sem);
1219                         del_timer_sync(&pcpu->cpu_timer);
1220                         del_timer_sync(&pcpu->cpu_slack_timer);
1221                         cpufreq_interactive_timer_start(tunables, j);
1222                         pcpu->governor_enabled = 1;
1223                         up_write(&pcpu->enable_sem);
1224                 }
1225
1226                 mutex_unlock(&gov_lock);
1227                 break;
1228
1229         case CPUFREQ_GOV_STOP:
1230                 mutex_lock(&gov_lock);
1231                 for_each_cpu(j, policy->cpus) {
1232                         pcpu = &per_cpu(cpuinfo, j);
1233                         down_write(&pcpu->enable_sem);
1234                         pcpu->governor_enabled = 0;
1235                         del_timer_sync(&pcpu->cpu_timer);
1236                         del_timer_sync(&pcpu->cpu_slack_timer);
1237                         up_write(&pcpu->enable_sem);
1238                 }
1239
1240                 mutex_unlock(&gov_lock);
1241                 break;
1242
1243         case CPUFREQ_GOV_LIMITS:
1244                 if (policy->max < policy->cur)
1245                         __cpufreq_driver_target(policy,
1246                                         policy->max, CPUFREQ_RELATION_H);
1247                 else if (policy->min > policy->cur)
1248                         __cpufreq_driver_target(policy,
1249                                         policy->min, CPUFREQ_RELATION_L);
1250                 for_each_cpu(j, policy->cpus) {
1251                         pcpu = &per_cpu(cpuinfo, j);
1252
1253                         /* hold write semaphore to avoid race */
1254                         down_write(&pcpu->enable_sem);
1255                         if (pcpu->governor_enabled == 0) {
1256                                 up_write(&pcpu->enable_sem);
1257                                 continue;
1258                         }
1259
1260                         /* update target_freq firstly */
1261                         if (policy->max < pcpu->target_freq)
1262                                 pcpu->target_freq = policy->max;
1263                         else if (policy->min > pcpu->target_freq)
1264                                 pcpu->target_freq = policy->min;
1265
1266                         /* Reschedule timer.
1267                          * Delete the timers, else the timer callback may
1268                          * return without re-arm the timer when failed
1269                          * acquire the semaphore. This race may cause timer
1270                          * stopped unexpectedly.
1271                          */
1272                         del_timer_sync(&pcpu->cpu_timer);
1273                         del_timer_sync(&pcpu->cpu_slack_timer);
1274                         cpufreq_interactive_timer_start(tunables, j);
1275                         up_write(&pcpu->enable_sem);
1276                 }
1277                 break;
1278         }
1279         return 0;
1280 }
1281
1282 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1283 static
1284 #endif
1285 struct cpufreq_governor cpufreq_gov_interactive = {
1286         .name = "interactive",
1287         .governor = cpufreq_governor_interactive,
1288         .max_transition_latency = 10000000,
1289         .owner = THIS_MODULE,
1290 };
1291
1292 static void cpufreq_interactive_nop_timer(unsigned long data)
1293 {
1294 }
1295
1296 static int __init cpufreq_interactive_init(void)
1297 {
1298         unsigned int i;
1299         struct cpufreq_interactive_cpuinfo *pcpu;
1300         struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1301
1302         /* Initalize per-cpu timers */
1303         for_each_possible_cpu(i) {
1304                 pcpu = &per_cpu(cpuinfo, i);
1305                 init_timer_deferrable(&pcpu->cpu_timer);
1306                 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1307                 pcpu->cpu_timer.data = i;
1308                 init_timer(&pcpu->cpu_slack_timer);
1309                 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
1310                 spin_lock_init(&pcpu->load_lock);
1311                 init_rwsem(&pcpu->enable_sem);
1312         }
1313
1314         spin_lock_init(&speedchange_cpumask_lock);
1315         mutex_init(&gov_lock);
1316         speedchange_task =
1317                 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1318                                "cfinteractive");
1319         if (IS_ERR(speedchange_task))
1320                 return PTR_ERR(speedchange_task);
1321
1322         sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1323         get_task_struct(speedchange_task);
1324
1325         /* NB: wake up so the thread does not look hung to the freezer */
1326         wake_up_process(speedchange_task);
1327
1328         return cpufreq_register_governor(&cpufreq_gov_interactive);
1329 }
1330
1331 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1332 fs_initcall(cpufreq_interactive_init);
1333 #else
1334 module_init(cpufreq_interactive_init);
1335 #endif
1336
1337 static void __exit cpufreq_interactive_exit(void)
1338 {
1339         cpufreq_unregister_governor(&cpufreq_gov_interactive);
1340         kthread_stop(speedchange_task);
1341         put_task_struct(speedchange_task);
1342 }
1343
1344 module_exit(cpufreq_interactive_exit);
1345
1346 MODULE_AUTHOR("Mike Chan <mike@android.com>");
1347 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1348         "Latency sensitive workloads");
1349 MODULE_LICENSE("GPL");