f77778e9e463eabe8b7e0d3e69ffac281674b9f1
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_interactive.c
1 /*
2  * drivers/cpufreq/cpufreq_interactive.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * Author: Mike Chan (mike@android.com)
16  *
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #ifdef CONFIG_ARCH_ROCKCHIP
23 #include <linux/input.h>
24 #endif
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/rwsem.h>
28 #include <linux/sched.h>
29 #include <linux/sched/rt.h>
30 #include <linux/tick.h>
31 #include <linux/time.h>
32 #include <linux/timer.h>
33 #include <linux/workqueue.h>
34 #include <linux/kthread.h>
35 #include <linux/slab.h>
36 #include "cpufreq_governor.h"
37
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/cpufreq_interactive.h>
40
41 struct cpufreq_interactive_cpuinfo {
42         struct timer_list cpu_timer;
43         struct timer_list cpu_slack_timer;
44         spinlock_t load_lock; /* protects the next 4 fields */
45         u64 time_in_idle;
46         u64 time_in_idle_timestamp;
47         u64 cputime_speedadj;
48         u64 cputime_speedadj_timestamp;
49         struct cpufreq_policy *policy;
50         struct cpufreq_frequency_table *freq_table;
51         spinlock_t target_freq_lock; /*protects target freq */
52         unsigned int target_freq;
53         unsigned int floor_freq;
54         unsigned int max_freq;
55         u64 floor_validate_time;
56         u64 hispeed_validate_time;
57         struct rw_semaphore enable_sem;
58         int governor_enabled;
59 };
60
61 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
62
63 /* realtime thread handles frequency scaling */
64 static struct task_struct *speedchange_task;
65 static cpumask_t speedchange_cpumask;
66 static spinlock_t speedchange_cpumask_lock;
67 static struct mutex gov_lock;
68
69 /* Target load.  Lower values result in higher CPU speeds. */
70 #define DEFAULT_TARGET_LOAD 90
71 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
72
73 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
74 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
75 static unsigned int default_above_hispeed_delay[] = {
76         DEFAULT_ABOVE_HISPEED_DELAY };
77
78 struct cpufreq_interactive_tunables {
79         int usage_count;
80         /* Hi speed to bump to from lo speed when load burst (default max) */
81         unsigned int hispeed_freq;
82         /* Go to hi speed when CPU load at or above this value. */
83 #define DEFAULT_GO_HISPEED_LOAD 99
84         unsigned long go_hispeed_load;
85         /* Target load. Lower values result in higher CPU speeds. */
86         spinlock_t target_loads_lock;
87         unsigned int *target_loads;
88         int ntarget_loads;
89         /*
90          * The minimum amount of time to spend at a frequency before we can ramp
91          * down.
92          */
93 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
94         unsigned long min_sample_time;
95         /*
96          * The sample rate of the timer used to increase frequency
97          */
98         unsigned long timer_rate;
99         /*
100          * Wait this long before raising speed above hispeed, by default a
101          * single timer interval.
102          */
103         spinlock_t above_hispeed_delay_lock;
104         unsigned int *above_hispeed_delay;
105         int nabove_hispeed_delay;
106         /* Non-zero means indefinite speed boost active */
107         int boost_val;
108         /* Duration of a boot pulse in usecs */
109         int boostpulse_duration_val;
110         /* End time of boost pulse in ktime converted to usecs */
111         u64 boostpulse_endtime;
112 #ifdef CONFIG_ARCH_ROCKCHIP
113         /* Frequency to which a touch boost takes the cpus to */
114         unsigned long touchboost_freq;
115         /* Duration of a touchboost pulse in usecs */
116         int touchboostpulse_duration_val;
117         /* End time of touchboost pulse in ktime converted to usecs */
118         u64 touchboostpulse_endtime;
119 #endif
120         bool boosted;
121         /*
122          * Max additional time to wait in idle, beyond timer_rate, at speeds
123          * above minimum before wakeup to reduce speed, or -1 if unnecessary.
124          */
125 #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
126         int timer_slack_val;
127         bool io_is_busy;
128 };
129
130 /* For cases where we have single governor instance for system */
131 static struct cpufreq_interactive_tunables *common_tunables;
132
133 static struct attribute_group *get_sysfs_attr(void);
134
135 static void cpufreq_interactive_timer_resched(
136         struct cpufreq_interactive_cpuinfo *pcpu)
137 {
138         struct cpufreq_interactive_tunables *tunables =
139                 pcpu->policy->governor_data;
140         unsigned long expires;
141         unsigned long flags;
142
143         spin_lock_irqsave(&pcpu->load_lock, flags);
144         pcpu->time_in_idle =
145                 get_cpu_idle_time(smp_processor_id(),
146                                   &pcpu->time_in_idle_timestamp,
147                                   tunables->io_is_busy);
148         pcpu->cputime_speedadj = 0;
149         pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
150         expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
151         mod_timer_pinned(&pcpu->cpu_timer, expires);
152
153         if (tunables->timer_slack_val >= 0 &&
154             pcpu->target_freq > pcpu->policy->min) {
155                 expires += usecs_to_jiffies(tunables->timer_slack_val);
156                 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
157         }
158
159         spin_unlock_irqrestore(&pcpu->load_lock, flags);
160 }
161
162 /* The caller shall take enable_sem write semaphore to avoid any timer race.
163  * The cpu_timer and cpu_slack_timer must be deactivated when calling this
164  * function.
165  */
166 static void cpufreq_interactive_timer_start(
167         struct cpufreq_interactive_tunables *tunables, int cpu)
168 {
169         struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
170         unsigned long expires = jiffies +
171                 usecs_to_jiffies(tunables->timer_rate);
172         unsigned long flags;
173
174         pcpu->cpu_timer.expires = expires;
175         add_timer_on(&pcpu->cpu_timer, cpu);
176         if (tunables->timer_slack_val >= 0 &&
177             pcpu->target_freq > pcpu->policy->min) {
178                 expires += usecs_to_jiffies(tunables->timer_slack_val);
179                 pcpu->cpu_slack_timer.expires = expires;
180                 add_timer_on(&pcpu->cpu_slack_timer, cpu);
181         }
182
183         spin_lock_irqsave(&pcpu->load_lock, flags);
184         pcpu->time_in_idle =
185                 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
186                                   tunables->io_is_busy);
187         pcpu->cputime_speedadj = 0;
188         pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
189         spin_unlock_irqrestore(&pcpu->load_lock, flags);
190 }
191
192 static unsigned int freq_to_above_hispeed_delay(
193         struct cpufreq_interactive_tunables *tunables,
194         unsigned int freq)
195 {
196         int i;
197         unsigned int ret;
198         unsigned long flags;
199
200         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
201
202         for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
203                         freq >= tunables->above_hispeed_delay[i+1]; i += 2)
204                 ;
205
206         ret = tunables->above_hispeed_delay[i];
207         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
208         return ret;
209 }
210
211 static unsigned int freq_to_targetload(
212         struct cpufreq_interactive_tunables *tunables, unsigned int freq)
213 {
214         int i;
215         unsigned int ret;
216         unsigned long flags;
217
218         spin_lock_irqsave(&tunables->target_loads_lock, flags);
219
220         for (i = 0; i < tunables->ntarget_loads - 1 &&
221                     freq >= tunables->target_loads[i+1]; i += 2)
222                 ;
223
224         ret = tunables->target_loads[i];
225         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
226         return ret;
227 }
228
229 /*
230  * If increasing frequencies never map to a lower target load then
231  * choose_freq() will find the minimum frequency that does not exceed its
232  * target load given the current load.
233  */
234 static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
235                 unsigned int loadadjfreq)
236 {
237         unsigned int freq = pcpu->policy->cur;
238         unsigned int prevfreq, freqmin, freqmax;
239         unsigned int tl;
240         int index;
241
242         freqmin = 0;
243         freqmax = UINT_MAX;
244
245         do {
246                 prevfreq = freq;
247                 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
248
249                 /*
250                  * Find the lowest frequency where the computed load is less
251                  * than or equal to the target load.
252                  */
253
254                 if (cpufreq_frequency_table_target(
255                             pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
256                             CPUFREQ_RELATION_L, &index))
257                         break;
258                 freq = pcpu->freq_table[index].frequency;
259
260                 if (freq > prevfreq) {
261                         /* The previous frequency is too low. */
262                         freqmin = prevfreq;
263
264                         if (freq >= freqmax) {
265                                 /*
266                                  * Find the highest frequency that is less
267                                  * than freqmax.
268                                  */
269                                 if (cpufreq_frequency_table_target(
270                                             pcpu->policy, pcpu->freq_table,
271                                             freqmax - 1, CPUFREQ_RELATION_H,
272                                             &index))
273                                         break;
274                                 freq = pcpu->freq_table[index].frequency;
275
276                                 if (freq == freqmin) {
277                                         /*
278                                          * The first frequency below freqmax
279                                          * has already been found to be too
280                                          * low.  freqmax is the lowest speed
281                                          * we found that is fast enough.
282                                          */
283                                         freq = freqmax;
284                                         break;
285                                 }
286                         }
287                 } else if (freq < prevfreq) {
288                         /* The previous frequency is high enough. */
289                         freqmax = prevfreq;
290
291                         if (freq <= freqmin) {
292                                 /*
293                                  * Find the lowest frequency that is higher
294                                  * than freqmin.
295                                  */
296                                 if (cpufreq_frequency_table_target(
297                                             pcpu->policy, pcpu->freq_table,
298                                             freqmin + 1, CPUFREQ_RELATION_L,
299                                             &index))
300                                         break;
301                                 freq = pcpu->freq_table[index].frequency;
302
303                                 /*
304                                  * If freqmax is the first frequency above
305                                  * freqmin then we have already found that
306                                  * this speed is fast enough.
307                                  */
308                                 if (freq == freqmax)
309                                         break;
310                         }
311                 }
312
313                 /* If same frequency chosen as previous then done. */
314         } while (freq != prevfreq);
315
316         return freq;
317 }
318
319 static u64 update_load(int cpu)
320 {
321         struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
322         struct cpufreq_interactive_tunables *tunables =
323                 pcpu->policy->governor_data;
324         u64 now;
325         u64 now_idle;
326         u64 delta_idle;
327         u64 delta_time;
328         u64 active_time;
329
330         now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
331         delta_idle = (now_idle - pcpu->time_in_idle);
332         delta_time = (now - pcpu->time_in_idle_timestamp);
333
334         if (delta_time <= delta_idle)
335                 active_time = 0;
336         else
337                 active_time = delta_time - delta_idle;
338
339         pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
340
341         pcpu->time_in_idle = now_idle;
342         pcpu->time_in_idle_timestamp = now;
343         return now;
344 }
345
346 static void cpufreq_interactive_timer(unsigned long data)
347 {
348         u64 now;
349         unsigned int delta_time;
350         u64 cputime_speedadj;
351         int cpu_load;
352         struct cpufreq_interactive_cpuinfo *pcpu =
353                 &per_cpu(cpuinfo, data);
354         struct cpufreq_interactive_tunables *tunables =
355                 pcpu->policy->governor_data;
356         unsigned int new_freq;
357         unsigned int loadadjfreq;
358         unsigned int index;
359         unsigned long flags;
360
361         if (!down_read_trylock(&pcpu->enable_sem))
362                 return;
363         if (!pcpu->governor_enabled)
364                 goto exit;
365
366         spin_lock_irqsave(&pcpu->load_lock, flags);
367         now = update_load(data);
368         delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
369         cputime_speedadj = pcpu->cputime_speedadj;
370         spin_unlock_irqrestore(&pcpu->load_lock, flags);
371
372         if (WARN_ON_ONCE(!delta_time))
373                 goto rearm;
374
375         spin_lock_irqsave(&pcpu->target_freq_lock, flags);
376         do_div(cputime_speedadj, delta_time);
377         loadadjfreq = (unsigned int)cputime_speedadj * 100;
378         cpu_load = loadadjfreq / pcpu->target_freq;
379         tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
380
381 #ifdef CONFIG_ARCH_ROCKCHIP
382         pcpu->target_freq = pcpu->policy->cur;
383         tunables->boosted |= now < tunables->touchboostpulse_endtime;
384 #endif
385
386         if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
387 #ifdef CONFIG_ARCH_ROCKCHIP
388                 if (now < tunables->touchboostpulse_endtime) {
389                         new_freq = choose_freq(pcpu, loadadjfreq);
390                         if (new_freq < tunables->touchboost_freq)
391                                 new_freq = tunables->touchboost_freq;
392                 } else
393 #endif
394                 if (pcpu->target_freq < tunables->hispeed_freq) {
395                         new_freq = tunables->hispeed_freq;
396                 } else {
397                         new_freq = choose_freq(pcpu, loadadjfreq);
398
399                         if (new_freq < tunables->hispeed_freq)
400                                 new_freq = tunables->hispeed_freq;
401                 }
402         } else {
403                 new_freq = choose_freq(pcpu, loadadjfreq);
404                 if (new_freq > tunables->hispeed_freq &&
405                                 pcpu->target_freq < tunables->hispeed_freq)
406                         new_freq = tunables->hispeed_freq;
407         }
408
409         if (pcpu->target_freq >= tunables->hispeed_freq &&
410             new_freq > pcpu->target_freq &&
411             now - pcpu->hispeed_validate_time <
412             freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
413                 trace_cpufreq_interactive_notyet(
414                         data, cpu_load, pcpu->target_freq,
415                         pcpu->policy->cur, new_freq);
416                 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
417                 goto rearm;
418         }
419
420         pcpu->hispeed_validate_time = now;
421
422         if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
423                                            new_freq, CPUFREQ_RELATION_L,
424                                            &index)) {
425                 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
426                 goto rearm;
427         }
428
429         new_freq = pcpu->freq_table[index].frequency;
430
431         /*
432          * Do not scale below floor_freq unless we have been at or above the
433          * floor frequency for the minimum sample time since last validated.
434          */
435         if (new_freq < pcpu->floor_freq) {
436                 if (now - pcpu->floor_validate_time <
437                                 tunables->min_sample_time) {
438                         trace_cpufreq_interactive_notyet(
439                                 data, cpu_load, pcpu->target_freq,
440                                 pcpu->policy->cur, new_freq);
441                         spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
442                         goto rearm;
443                 }
444         }
445
446         /*
447          * Update the timestamp for checking whether speed has been held at
448          * or above the selected frequency for a minimum of min_sample_time,
449          * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
450          * allow the speed to drop as soon as the boostpulse duration expires
451          * (or the indefinite boost is turned off).
452          */
453
454         if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
455                 pcpu->floor_freq = new_freq;
456                 pcpu->floor_validate_time = now;
457         }
458
459         if (pcpu->target_freq == new_freq &&
460                         pcpu->target_freq <= pcpu->policy->cur) {
461                 trace_cpufreq_interactive_already(
462                         data, cpu_load, pcpu->target_freq,
463                         pcpu->policy->cur, new_freq);
464                 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
465                 goto rearm_if_notmax;
466         }
467
468         trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
469                                          pcpu->policy->cur, new_freq);
470
471         pcpu->target_freq = new_freq;
472         spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
473         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
474         cpumask_set_cpu(data, &speedchange_cpumask);
475         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
476         wake_up_process(speedchange_task);
477
478 rearm_if_notmax:
479         /*
480          * Already set max speed and don't see a need to change that,
481          * wait until next idle to re-evaluate, don't need timer.
482          */
483         if (pcpu->target_freq == pcpu->policy->max)
484                 goto exit;
485
486 rearm:
487         if (!timer_pending(&pcpu->cpu_timer))
488                 cpufreq_interactive_timer_resched(pcpu);
489
490 exit:
491         up_read(&pcpu->enable_sem);
492         return;
493 }
494
495 static void cpufreq_interactive_idle_start(void)
496 {
497         struct cpufreq_interactive_cpuinfo *pcpu =
498                 &per_cpu(cpuinfo, smp_processor_id());
499         int pending;
500
501         if (!down_read_trylock(&pcpu->enable_sem))
502                 return;
503         if (!pcpu->governor_enabled) {
504                 up_read(&pcpu->enable_sem);
505                 return;
506         }
507
508         pending = timer_pending(&pcpu->cpu_timer);
509
510         if (pcpu->target_freq != pcpu->policy->min) {
511                 /*
512                  * Entering idle while not at lowest speed.  On some
513                  * platforms this can hold the other CPU(s) at that speed
514                  * even though the CPU is idle. Set a timer to re-evaluate
515                  * speed so this idle CPU doesn't hold the other CPUs above
516                  * min indefinitely.  This should probably be a quirk of
517                  * the CPUFreq driver.
518                  */
519                 if (!pending)
520                         cpufreq_interactive_timer_resched(pcpu);
521         }
522
523         up_read(&pcpu->enable_sem);
524 }
525
526 static void cpufreq_interactive_idle_end(void)
527 {
528         struct cpufreq_interactive_cpuinfo *pcpu =
529                 &per_cpu(cpuinfo, smp_processor_id());
530
531         if (!down_read_trylock(&pcpu->enable_sem))
532                 return;
533         if (!pcpu->governor_enabled) {
534                 up_read(&pcpu->enable_sem);
535                 return;
536         }
537
538         /* Arm the timer for 1-2 ticks later if not already. */
539         if (!timer_pending(&pcpu->cpu_timer)) {
540                 cpufreq_interactive_timer_resched(pcpu);
541         } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
542                 del_timer(&pcpu->cpu_timer);
543                 del_timer(&pcpu->cpu_slack_timer);
544                 cpufreq_interactive_timer(smp_processor_id());
545         }
546
547         up_read(&pcpu->enable_sem);
548 }
549
550 static int cpufreq_interactive_speedchange_task(void *data)
551 {
552         unsigned int cpu;
553         cpumask_t tmp_mask;
554         unsigned long flags;
555         struct cpufreq_interactive_cpuinfo *pcpu;
556
557         while (1) {
558                 set_current_state(TASK_INTERRUPTIBLE);
559                 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
560
561                 if (cpumask_empty(&speedchange_cpumask)) {
562                         spin_unlock_irqrestore(&speedchange_cpumask_lock,
563                                                flags);
564                         schedule();
565
566                         if (kthread_should_stop())
567                                 break;
568
569                         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
570                 }
571
572                 set_current_state(TASK_RUNNING);
573                 tmp_mask = speedchange_cpumask;
574                 cpumask_clear(&speedchange_cpumask);
575                 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
576
577                 for_each_cpu(cpu, &tmp_mask) {
578                         unsigned int j;
579                         unsigned int max_freq = 0;
580
581                         pcpu = &per_cpu(cpuinfo, cpu);
582                         if (!down_read_trylock(&pcpu->enable_sem))
583                                 continue;
584                         if (!pcpu->governor_enabled) {
585                                 up_read(&pcpu->enable_sem);
586                                 continue;
587                         }
588
589                         for_each_cpu(j, pcpu->policy->cpus) {
590                                 struct cpufreq_interactive_cpuinfo *pjcpu =
591                                         &per_cpu(cpuinfo, j);
592
593                                 if (pjcpu->target_freq > max_freq)
594                                         max_freq = pjcpu->target_freq;
595                         }
596
597                         if (max_freq != pcpu->policy->cur)
598                                 __cpufreq_driver_target(pcpu->policy,
599                                                         max_freq,
600                                                         CPUFREQ_RELATION_H);
601                         trace_cpufreq_interactive_setspeed(cpu,
602                                                      pcpu->target_freq,
603                                                      pcpu->policy->cur);
604
605                         up_read(&pcpu->enable_sem);
606                 }
607         }
608
609         return 0;
610 }
611
612 static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
613 {
614         int i;
615         int anyboost = 0;
616         unsigned long flags[2];
617         struct cpufreq_interactive_cpuinfo *pcpu;
618
619         tunables->boosted = true;
620
621         spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
622
623         for_each_online_cpu(i) {
624                 pcpu = &per_cpu(cpuinfo, i);
625                 if (tunables != pcpu->policy->governor_data)
626                         continue;
627
628                 spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
629                 if (pcpu->target_freq < tunables->hispeed_freq) {
630                         pcpu->target_freq = tunables->hispeed_freq;
631                         cpumask_set_cpu(i, &speedchange_cpumask);
632                         pcpu->hispeed_validate_time =
633                                 ktime_to_us(ktime_get());
634                         anyboost = 1;
635                 }
636
637                 /*
638                  * Set floor freq and (re)start timer for when last
639                  * validated.
640                  */
641
642                 pcpu->floor_freq = tunables->hispeed_freq;
643                 pcpu->floor_validate_time = ktime_to_us(ktime_get());
644                 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
645         }
646
647         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
648
649         if (anyboost)
650                 wake_up_process(speedchange_task);
651 }
652
653 static int cpufreq_interactive_notifier(
654         struct notifier_block *nb, unsigned long val, void *data)
655 {
656         struct cpufreq_freqs *freq = data;
657         struct cpufreq_interactive_cpuinfo *pcpu;
658         int cpu;
659         unsigned long flags;
660
661         if (val == CPUFREQ_POSTCHANGE) {
662                 pcpu = &per_cpu(cpuinfo, freq->cpu);
663                 if (!down_read_trylock(&pcpu->enable_sem))
664                         return 0;
665                 if (!pcpu->governor_enabled) {
666                         up_read(&pcpu->enable_sem);
667                         return 0;
668                 }
669
670                 for_each_cpu(cpu, pcpu->policy->cpus) {
671                         struct cpufreq_interactive_cpuinfo *pjcpu =
672                                 &per_cpu(cpuinfo, cpu);
673                         if (cpu != freq->cpu) {
674                                 if (!down_read_trylock(&pjcpu->enable_sem))
675                                         continue;
676                                 if (!pjcpu->governor_enabled) {
677                                         up_read(&pjcpu->enable_sem);
678                                         continue;
679                                 }
680                         }
681                         spin_lock_irqsave(&pjcpu->load_lock, flags);
682                         update_load(cpu);
683                         spin_unlock_irqrestore(&pjcpu->load_lock, flags);
684                         if (cpu != freq->cpu)
685                                 up_read(&pjcpu->enable_sem);
686                 }
687
688                 up_read(&pcpu->enable_sem);
689         }
690         return 0;
691 }
692
693 static struct notifier_block cpufreq_notifier_block = {
694         .notifier_call = cpufreq_interactive_notifier,
695 };
696
697 static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
698 {
699         const char *cp;
700         int i;
701         int ntokens = 1;
702         unsigned int *tokenized_data;
703         int err = -EINVAL;
704
705         cp = buf;
706         while ((cp = strpbrk(cp + 1, " :")))
707                 ntokens++;
708
709         if (!(ntokens & 0x1))
710                 goto err;
711
712         tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
713         if (!tokenized_data) {
714                 err = -ENOMEM;
715                 goto err;
716         }
717
718         cp = buf;
719         i = 0;
720         while (i < ntokens) {
721                 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
722                         goto err_kfree;
723
724                 cp = strpbrk(cp, " :");
725                 if (!cp)
726                         break;
727                 cp++;
728         }
729
730         if (i != ntokens)
731                 goto err_kfree;
732
733         *num_tokens = ntokens;
734         return tokenized_data;
735
736 err_kfree:
737         kfree(tokenized_data);
738 err:
739         return ERR_PTR(err);
740 }
741
742 static ssize_t show_target_loads(
743         struct cpufreq_interactive_tunables *tunables,
744         char *buf)
745 {
746         int i;
747         ssize_t ret = 0;
748         unsigned long flags;
749
750         spin_lock_irqsave(&tunables->target_loads_lock, flags);
751
752         for (i = 0; i < tunables->ntarget_loads; i++)
753                 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
754                                i & 0x1 ? ":" : " ");
755
756         sprintf(buf + ret - 1, "\n");
757         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
758         return ret;
759 }
760
761 static ssize_t store_target_loads(
762         struct cpufreq_interactive_tunables *tunables,
763         const char *buf, size_t count)
764 {
765         int ntokens;
766         unsigned int *new_target_loads = NULL;
767         unsigned long flags;
768
769         new_target_loads = get_tokenized_data(buf, &ntokens);
770         if (IS_ERR(new_target_loads))
771                 return PTR_RET(new_target_loads);
772
773         spin_lock_irqsave(&tunables->target_loads_lock, flags);
774         if (tunables->target_loads != default_target_loads)
775                 kfree(tunables->target_loads);
776         tunables->target_loads = new_target_loads;
777         tunables->ntarget_loads = ntokens;
778         spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
779         return count;
780 }
781
782 static ssize_t show_above_hispeed_delay(
783         struct cpufreq_interactive_tunables *tunables, char *buf)
784 {
785         int i;
786         ssize_t ret = 0;
787         unsigned long flags;
788
789         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
790
791         for (i = 0; i < tunables->nabove_hispeed_delay; i++)
792                 ret += sprintf(buf + ret, "%u%s",
793                                tunables->above_hispeed_delay[i],
794                                i & 0x1 ? ":" : " ");
795
796         sprintf(buf + ret - 1, "\n");
797         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
798         return ret;
799 }
800
801 static ssize_t store_above_hispeed_delay(
802         struct cpufreq_interactive_tunables *tunables,
803         const char *buf, size_t count)
804 {
805         int ntokens;
806         unsigned int *new_above_hispeed_delay = NULL;
807         unsigned long flags;
808
809         new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
810         if (IS_ERR(new_above_hispeed_delay))
811                 return PTR_RET(new_above_hispeed_delay);
812
813         spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
814         if (tunables->above_hispeed_delay != default_above_hispeed_delay)
815                 kfree(tunables->above_hispeed_delay);
816         tunables->above_hispeed_delay = new_above_hispeed_delay;
817         tunables->nabove_hispeed_delay = ntokens;
818         spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
819         return count;
820
821 }
822
823 static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
824                 char *buf)
825 {
826         return sprintf(buf, "%u\n", tunables->hispeed_freq);
827 }
828
829 static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
830                 const char *buf, size_t count)
831 {
832         int ret;
833         long unsigned int val;
834
835         ret = strict_strtoul(buf, 0, &val);
836         if (ret < 0)
837                 return ret;
838         tunables->hispeed_freq = val;
839         return count;
840 }
841
842 static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
843                 *tunables, char *buf)
844 {
845         return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
846 }
847
848 static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
849                 *tunables, const char *buf, size_t count)
850 {
851         int ret;
852         unsigned long val;
853
854         ret = strict_strtoul(buf, 0, &val);
855         if (ret < 0)
856                 return ret;
857         tunables->go_hispeed_load = val;
858         return count;
859 }
860
861 static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
862                 *tunables, char *buf)
863 {
864         return sprintf(buf, "%lu\n", tunables->min_sample_time);
865 }
866
867 static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
868                 *tunables, const char *buf, size_t count)
869 {
870         int ret;
871         unsigned long val;
872
873         ret = strict_strtoul(buf, 0, &val);
874         if (ret < 0)
875                 return ret;
876         tunables->min_sample_time = val;
877         return count;
878 }
879
880 static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
881                 char *buf)
882 {
883         return sprintf(buf, "%lu\n", tunables->timer_rate);
884 }
885
886 static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
887                 const char *buf, size_t count)
888 {
889         int ret;
890         unsigned long val;
891
892         ret = strict_strtoul(buf, 0, &val);
893         if (ret < 0)
894                 return ret;
895         tunables->timer_rate = val;
896         return count;
897 }
898
899 static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
900                 char *buf)
901 {
902         return sprintf(buf, "%d\n", tunables->timer_slack_val);
903 }
904
905 static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
906                 const char *buf, size_t count)
907 {
908         int ret;
909         unsigned long val;
910
911         ret = kstrtol(buf, 10, &val);
912         if (ret < 0)
913                 return ret;
914
915         tunables->timer_slack_val = val;
916         return count;
917 }
918
919 static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
920                           char *buf)
921 {
922         return sprintf(buf, "%d\n", tunables->boost_val);
923 }
924
925 static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
926                            const char *buf, size_t count)
927 {
928         int ret;
929         unsigned long val;
930
931         ret = kstrtoul(buf, 0, &val);
932         if (ret < 0)
933                 return ret;
934
935         tunables->boost_val = val;
936
937         if (tunables->boost_val) {
938                 trace_cpufreq_interactive_boost("on");
939                 if (!tunables->boosted)
940                         cpufreq_interactive_boost(tunables);
941         } else {
942                 tunables->boostpulse_endtime = ktime_to_us(ktime_get());
943                 trace_cpufreq_interactive_unboost("off");
944         }
945
946         return count;
947 }
948
949 static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
950                                 const char *buf, size_t count)
951 {
952         int ret;
953         unsigned long val;
954
955         ret = kstrtoul(buf, 0, &val);
956         if (ret < 0)
957                 return ret;
958
959         tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
960                 tunables->boostpulse_duration_val;
961         trace_cpufreq_interactive_boost("pulse");
962         if (!tunables->boosted)
963                 cpufreq_interactive_boost(tunables);
964         return count;
965 }
966
967 static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
968                 *tunables, char *buf)
969 {
970         return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
971 }
972
973 static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
974                 *tunables, const char *buf, size_t count)
975 {
976         int ret;
977         unsigned long val;
978
979         ret = kstrtoul(buf, 0, &val);
980         if (ret < 0)
981                 return ret;
982
983         tunables->boostpulse_duration_val = val;
984         return count;
985 }
986
987 static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
988                 char *buf)
989 {
990         return sprintf(buf, "%u\n", tunables->io_is_busy);
991 }
992
993 static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
994                 const char *buf, size_t count)
995 {
996         int ret;
997         unsigned long val;
998
999         ret = kstrtoul(buf, 0, &val);
1000         if (ret < 0)
1001                 return ret;
1002         tunables->io_is_busy = val;
1003         return count;
1004 }
1005
1006 /*
1007  * Create show/store routines
1008  * - sys: One governor instance for complete SYSTEM
1009  * - pol: One governor instance per struct cpufreq_policy
1010  */
1011 #define show_gov_pol_sys(file_name)                                     \
1012 static ssize_t show_##file_name##_gov_sys                               \
1013 (struct kobject *kobj, struct attribute *attr, char *buf)               \
1014 {                                                                       \
1015         return show_##file_name(common_tunables, buf);                  \
1016 }                                                                       \
1017                                                                         \
1018 static ssize_t show_##file_name##_gov_pol                               \
1019 (struct cpufreq_policy *policy, char *buf)                              \
1020 {                                                                       \
1021         return show_##file_name(policy->governor_data, buf);            \
1022 }
1023
1024 #define store_gov_pol_sys(file_name)                                    \
1025 static ssize_t store_##file_name##_gov_sys                              \
1026 (struct kobject *kobj, struct attribute *attr, const char *buf,         \
1027         size_t count)                                                   \
1028 {                                                                       \
1029         return store_##file_name(common_tunables, buf, count);          \
1030 }                                                                       \
1031                                                                         \
1032 static ssize_t store_##file_name##_gov_pol                              \
1033 (struct cpufreq_policy *policy, const char *buf, size_t count)          \
1034 {                                                                       \
1035         return store_##file_name(policy->governor_data, buf, count);    \
1036 }
1037
1038 #define show_store_gov_pol_sys(file_name)                               \
1039 show_gov_pol_sys(file_name);                                            \
1040 store_gov_pol_sys(file_name)
1041
1042 show_store_gov_pol_sys(target_loads);
1043 show_store_gov_pol_sys(above_hispeed_delay);
1044 show_store_gov_pol_sys(hispeed_freq);
1045 show_store_gov_pol_sys(go_hispeed_load);
1046 show_store_gov_pol_sys(min_sample_time);
1047 show_store_gov_pol_sys(timer_rate);
1048 show_store_gov_pol_sys(timer_slack);
1049 show_store_gov_pol_sys(boost);
1050 store_gov_pol_sys(boostpulse);
1051 show_store_gov_pol_sys(boostpulse_duration);
1052 show_store_gov_pol_sys(io_is_busy);
1053
1054 #define gov_sys_attr_rw(_name)                                          \
1055 static struct global_attr _name##_gov_sys =                             \
1056 __ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1057
1058 #define gov_pol_attr_rw(_name)                                          \
1059 static struct freq_attr _name##_gov_pol =                               \
1060 __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1061
1062 #define gov_sys_pol_attr_rw(_name)                                      \
1063         gov_sys_attr_rw(_name);                                         \
1064         gov_pol_attr_rw(_name)
1065
1066 gov_sys_pol_attr_rw(target_loads);
1067 gov_sys_pol_attr_rw(above_hispeed_delay);
1068 gov_sys_pol_attr_rw(hispeed_freq);
1069 gov_sys_pol_attr_rw(go_hispeed_load);
1070 gov_sys_pol_attr_rw(min_sample_time);
1071 gov_sys_pol_attr_rw(timer_rate);
1072 gov_sys_pol_attr_rw(timer_slack);
1073 gov_sys_pol_attr_rw(boost);
1074 gov_sys_pol_attr_rw(boostpulse_duration);
1075 gov_sys_pol_attr_rw(io_is_busy);
1076
1077 static struct global_attr boostpulse_gov_sys =
1078         __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1079
1080 static struct freq_attr boostpulse_gov_pol =
1081         __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1082
1083 /* One Governor instance for entire system */
1084 static struct attribute *interactive_attributes_gov_sys[] = {
1085         &target_loads_gov_sys.attr,
1086         &above_hispeed_delay_gov_sys.attr,
1087         &hispeed_freq_gov_sys.attr,
1088         &go_hispeed_load_gov_sys.attr,
1089         &min_sample_time_gov_sys.attr,
1090         &timer_rate_gov_sys.attr,
1091         &timer_slack_gov_sys.attr,
1092         &boost_gov_sys.attr,
1093         &boostpulse_gov_sys.attr,
1094         &boostpulse_duration_gov_sys.attr,
1095         &io_is_busy_gov_sys.attr,
1096         NULL,
1097 };
1098
1099 static struct attribute_group interactive_attr_group_gov_sys = {
1100         .attrs = interactive_attributes_gov_sys,
1101         .name = "interactive",
1102 };
1103
1104 /* Per policy governor instance */
1105 static struct attribute *interactive_attributes_gov_pol[] = {
1106         &target_loads_gov_pol.attr,
1107         &above_hispeed_delay_gov_pol.attr,
1108         &hispeed_freq_gov_pol.attr,
1109         &go_hispeed_load_gov_pol.attr,
1110         &min_sample_time_gov_pol.attr,
1111         &timer_rate_gov_pol.attr,
1112         &timer_slack_gov_pol.attr,
1113         &boost_gov_pol.attr,
1114         &boostpulse_gov_pol.attr,
1115         &boostpulse_duration_gov_pol.attr,
1116         &io_is_busy_gov_pol.attr,
1117         NULL,
1118 };
1119
1120 static struct attribute_group interactive_attr_group_gov_pol = {
1121         .attrs = interactive_attributes_gov_pol,
1122         .name = "interactive",
1123 };
1124
1125 static struct attribute_group *get_sysfs_attr(void)
1126 {
1127         if (have_governor_per_policy())
1128                 return &interactive_attr_group_gov_pol;
1129         else
1130                 return &interactive_attr_group_gov_sys;
1131 }
1132
1133 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1134                                              unsigned long val,
1135                                              void *data)
1136 {
1137         switch (val) {
1138         case IDLE_START:
1139                 cpufreq_interactive_idle_start();
1140                 break;
1141         case IDLE_END:
1142                 cpufreq_interactive_idle_end();
1143                 break;
1144         }
1145
1146         return 0;
1147 }
1148
1149 static struct notifier_block cpufreq_interactive_idle_nb = {
1150         .notifier_call = cpufreq_interactive_idle_notifier,
1151 };
1152
1153 #ifdef CONFIG_ARCH_ROCKCHIP
1154 static void cpufreq_interactive_input_event(struct input_handle *handle, unsigned int type,
1155                 unsigned int code, int value)
1156 {
1157         u64 now, endtime;
1158         int i;
1159         int anyboost = 0;
1160         unsigned long flags[2];
1161         struct cpufreq_interactive_cpuinfo *pcpu;
1162         struct cpufreq_interactive_tunables *tunables;
1163
1164         if (type != EV_ABS)
1165                 return;
1166
1167         trace_cpufreq_interactive_boost("touch");
1168         spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
1169
1170         now = ktime_to_us(ktime_get());
1171         for_each_online_cpu(i) {
1172                 pcpu = &per_cpu(cpuinfo, i);
1173                 tunables = pcpu->policy->governor_data;
1174
1175                 endtime = now + tunables->touchboostpulse_duration_val;
1176                 if (endtime < (tunables->touchboostpulse_endtime + 10 * USEC_PER_MSEC))
1177                         break;
1178                 tunables->touchboostpulse_endtime = endtime;
1179
1180                 spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
1181                 if (pcpu->target_freq < tunables->touchboost_freq) {
1182                         pcpu->target_freq = tunables->touchboost_freq;
1183                         cpumask_set_cpu(i, &speedchange_cpumask);
1184                         pcpu->hispeed_validate_time =
1185                                         ktime_to_us(ktime_get());
1186                         anyboost = 1;
1187                 }
1188
1189                 pcpu->floor_freq = tunables->touchboost_freq;
1190                 pcpu->floor_validate_time = ktime_to_us(ktime_get());
1191
1192                 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
1193         }
1194
1195         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
1196         if (anyboost)
1197                 wake_up_process(speedchange_task);
1198 }
1199
1200 static int cpufreq_interactive_input_connect(struct input_handler *handler,
1201                 struct input_dev *dev, const struct input_device_id *id)
1202 {
1203         struct input_handle *handle;
1204         int error;
1205
1206         handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
1207         if (!handle)
1208                 return -ENOMEM;
1209
1210         handle->dev = dev;
1211         handle->handler = handler;
1212         handle->name = "cpufreq";
1213
1214         error = input_register_handle(handle);
1215         if (error)
1216                 goto err2;
1217
1218         error = input_open_device(handle);
1219         if (error)
1220                 goto err1;
1221
1222         return 0;
1223 err1:
1224         input_unregister_handle(handle);
1225 err2:
1226         kfree(handle);
1227         return error;
1228 }
1229
1230 static void cpufreq_interactive_input_disconnect(struct input_handle *handle)
1231 {
1232         input_close_device(handle);
1233         input_unregister_handle(handle);
1234         kfree(handle);
1235 }
1236
1237 static const struct input_device_id cpufreq_interactive_ids[] = {
1238         {
1239                 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
1240                         INPUT_DEVICE_ID_MATCH_ABSBIT,
1241                 .evbit = { BIT_MASK(EV_ABS) },
1242                 .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
1243                         BIT_MASK(ABS_MT_POSITION_X) |
1244                         BIT_MASK(ABS_MT_POSITION_Y) },
1245         },
1246         {
1247                 .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
1248                         INPUT_DEVICE_ID_MATCH_ABSBIT,
1249                 .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
1250                 .absbit = { [BIT_WORD(ABS_X)] =
1251                         BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
1252         },
1253         { },
1254 };
1255
1256 static struct input_handler cpufreq_interactive_input_handler = {
1257         .event          = cpufreq_interactive_input_event,
1258         .connect        = cpufreq_interactive_input_connect,
1259         .disconnect     = cpufreq_interactive_input_disconnect,
1260         .name           = "cpufreq_interactive",
1261         .id_table       = cpufreq_interactive_ids,
1262 };
1263 #endif
1264
1265 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1266                 unsigned int event)
1267 {
1268         int rc;
1269         unsigned int j;
1270         struct cpufreq_interactive_cpuinfo *pcpu;
1271         struct cpufreq_frequency_table *freq_table;
1272         struct cpufreq_interactive_tunables *tunables;
1273         unsigned long flags;
1274
1275         if (have_governor_per_policy())
1276                 tunables = policy->governor_data;
1277         else
1278                 tunables = common_tunables;
1279
1280         WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
1281
1282         switch (event) {
1283         case CPUFREQ_GOV_POLICY_INIT:
1284                 if (have_governor_per_policy()) {
1285                         WARN_ON(tunables);
1286                 } else if (tunables) {
1287                         tunables->usage_count++;
1288                         policy->governor_data = tunables;
1289                         return 0;
1290                 }
1291
1292                 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1293                 if (!tunables) {
1294                         pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
1295                         return -ENOMEM;
1296                 }
1297
1298                 tunables->usage_count = 1;
1299                 tunables->above_hispeed_delay = default_above_hispeed_delay;
1300                 tunables->nabove_hispeed_delay =
1301                         ARRAY_SIZE(default_above_hispeed_delay);
1302                 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1303                 tunables->target_loads = default_target_loads;
1304                 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1305                 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1306                 tunables->timer_rate = DEFAULT_TIMER_RATE;
1307                 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1308                 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1309
1310                 spin_lock_init(&tunables->target_loads_lock);
1311                 spin_lock_init(&tunables->above_hispeed_delay_lock);
1312
1313 #ifdef CONFIG_ARCH_ROCKCHIP
1314                 {
1315                         unsigned int index;
1316                         freq_table = cpufreq_frequency_get_table(policy->cpu);
1317                         tunables->hispeed_freq = policy->max;
1318                         if (policy->min < 600000)
1319                                 tunables->hispeed_freq = 600000;
1320                         else if (cpufreq_frequency_table_target(policy, freq_table, policy->min + 1, CPUFREQ_RELATION_L, &index) == 0)
1321                                 tunables->hispeed_freq = freq_table[index].frequency;
1322                         tunables->timer_slack_val = 20 * USEC_PER_MSEC;
1323                         tunables->min_sample_time = 40 * USEC_PER_MSEC;
1324                         store_above_hispeed_delay(tunables, "20000 1000000:80000 1200000:100000 1700000:20000", 0);
1325                         store_target_loads(tunables, "70 600000:70 800000:75 1500000:80 1700000:90", 0);
1326                         tunables->boostpulse_duration_val = 40 * USEC_PER_MSEC;
1327                         tunables->touchboostpulse_duration_val = 500 * USEC_PER_MSEC;
1328                         tunables->touchboost_freq = 1200000;
1329                 }
1330 #endif
1331
1332                 policy->governor_data = tunables;
1333                 if (!have_governor_per_policy())
1334                         common_tunables = tunables;
1335
1336                 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1337                                 get_sysfs_attr());
1338                 if (rc) {
1339                         kfree(tunables);
1340                         policy->governor_data = NULL;
1341                         if (!have_governor_per_policy())
1342                                 common_tunables = NULL;
1343                         return rc;
1344                 }
1345
1346                 if (!policy->governor->initialized) {
1347                         idle_notifier_register(&cpufreq_interactive_idle_nb);
1348                         cpufreq_register_notifier(&cpufreq_notifier_block,
1349                                         CPUFREQ_TRANSITION_NOTIFIER);
1350 #ifdef CONFIG_ARCH_ROCKCHIP
1351                         rc = input_register_handler(&cpufreq_interactive_input_handler);
1352 #endif
1353                 }
1354
1355                 break;
1356
1357         case CPUFREQ_GOV_POLICY_EXIT:
1358                 if (!--tunables->usage_count) {
1359                         if (policy->governor->initialized == 1) {
1360 #ifdef CONFIG_ARCH_ROCKCHIP
1361                                 input_unregister_handler(&cpufreq_interactive_input_handler);
1362 #endif
1363                                 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1364                                                 CPUFREQ_TRANSITION_NOTIFIER);
1365                                 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1366                         }
1367
1368                         sysfs_remove_group(get_governor_parent_kobj(policy),
1369                                         get_sysfs_attr());
1370                         kfree(tunables);
1371                         common_tunables = NULL;
1372                 }
1373
1374                 policy->governor_data = NULL;
1375                 break;
1376
1377         case CPUFREQ_GOV_START:
1378                 mutex_lock(&gov_lock);
1379
1380                 freq_table = cpufreq_frequency_get_table(policy->cpu);
1381                 if (!tunables->hispeed_freq)
1382                         tunables->hispeed_freq = policy->max;
1383
1384                 for_each_cpu(j, policy->cpus) {
1385                         pcpu = &per_cpu(cpuinfo, j);
1386                         pcpu->policy = policy;
1387                         pcpu->target_freq = policy->cur;
1388                         pcpu->freq_table = freq_table;
1389                         pcpu->floor_freq = pcpu->target_freq;
1390                         pcpu->floor_validate_time =
1391                                 ktime_to_us(ktime_get());
1392                         pcpu->hispeed_validate_time =
1393                                 pcpu->floor_validate_time;
1394                         pcpu->max_freq = policy->max;
1395                         down_write(&pcpu->enable_sem);
1396                         del_timer_sync(&pcpu->cpu_timer);
1397                         del_timer_sync(&pcpu->cpu_slack_timer);
1398                         cpufreq_interactive_timer_start(tunables, j);
1399                         pcpu->governor_enabled = 1;
1400                         up_write(&pcpu->enable_sem);
1401                 }
1402
1403                 mutex_unlock(&gov_lock);
1404                 break;
1405
1406         case CPUFREQ_GOV_STOP:
1407                 mutex_lock(&gov_lock);
1408                 for_each_cpu(j, policy->cpus) {
1409                         pcpu = &per_cpu(cpuinfo, j);
1410                         down_write(&pcpu->enable_sem);
1411                         pcpu->governor_enabled = 0;
1412                         del_timer_sync(&pcpu->cpu_timer);
1413                         del_timer_sync(&pcpu->cpu_slack_timer);
1414                         up_write(&pcpu->enable_sem);
1415                 }
1416
1417                 mutex_unlock(&gov_lock);
1418                 break;
1419
1420         case CPUFREQ_GOV_LIMITS:
1421                 if (policy->max < policy->cur)
1422                         __cpufreq_driver_target(policy,
1423                                         policy->max, CPUFREQ_RELATION_H);
1424                 else if (policy->min > policy->cur)
1425                         __cpufreq_driver_target(policy,
1426                                         policy->min, CPUFREQ_RELATION_L);
1427                 for_each_cpu(j, policy->cpus) {
1428                         pcpu = &per_cpu(cpuinfo, j);
1429
1430                         down_read(&pcpu->enable_sem);
1431                         if (pcpu->governor_enabled == 0) {
1432                                 up_read(&pcpu->enable_sem);
1433                                 continue;
1434                         }
1435
1436                         spin_lock_irqsave(&pcpu->target_freq_lock, flags);
1437                         if (policy->max < pcpu->target_freq)
1438                                 pcpu->target_freq = policy->max;
1439                         else if (policy->min > pcpu->target_freq)
1440                                 pcpu->target_freq = policy->min;
1441
1442                         spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
1443                         up_read(&pcpu->enable_sem);
1444
1445                         /* Reschedule timer only if policy->max is raised.
1446                          * Delete the timers, else the timer callback may
1447                          * return without re-arm the timer when failed
1448                          * acquire the semaphore. This race may cause timer
1449                          * stopped unexpectedly.
1450                          */
1451
1452                         if (policy->max > pcpu->max_freq) {
1453                                 down_write(&pcpu->enable_sem);
1454                                 del_timer_sync(&pcpu->cpu_timer);
1455                                 del_timer_sync(&pcpu->cpu_slack_timer);
1456                                 cpufreq_interactive_timer_start(tunables, j);
1457                                 up_write(&pcpu->enable_sem);
1458                         }
1459
1460                         pcpu->max_freq = policy->max;
1461                 }
1462                 break;
1463         }
1464         return 0;
1465 }
1466
1467 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1468 static
1469 #endif
1470 struct cpufreq_governor cpufreq_gov_interactive = {
1471         .name = "interactive",
1472         .governor = cpufreq_governor_interactive,
1473         .max_transition_latency = 10000000,
1474         .owner = THIS_MODULE,
1475 };
1476
1477 static void cpufreq_interactive_nop_timer(unsigned long data)
1478 {
1479 }
1480
1481 static int __init cpufreq_interactive_init(void)
1482 {
1483         unsigned int i;
1484         struct cpufreq_interactive_cpuinfo *pcpu;
1485         struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1486
1487         /* Initalize per-cpu timers */
1488         for_each_possible_cpu(i) {
1489                 pcpu = &per_cpu(cpuinfo, i);
1490                 init_timer_deferrable(&pcpu->cpu_timer);
1491                 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1492                 pcpu->cpu_timer.data = i;
1493                 init_timer(&pcpu->cpu_slack_timer);
1494                 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
1495                 spin_lock_init(&pcpu->load_lock);
1496                 spin_lock_init(&pcpu->target_freq_lock);
1497                 init_rwsem(&pcpu->enable_sem);
1498         }
1499
1500         spin_lock_init(&speedchange_cpumask_lock);
1501         mutex_init(&gov_lock);
1502         speedchange_task =
1503                 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1504                                "cfinteractive");
1505         if (IS_ERR(speedchange_task))
1506                 return PTR_ERR(speedchange_task);
1507
1508         sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1509         get_task_struct(speedchange_task);
1510
1511         /* NB: wake up so the thread does not look hung to the freezer */
1512         wake_up_process(speedchange_task);
1513
1514         return cpufreq_register_governor(&cpufreq_gov_interactive);
1515 }
1516
1517 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1518 fs_initcall(cpufreq_interactive_init);
1519 #else
1520 module_init(cpufreq_interactive_init);
1521 #endif
1522
1523 static void __exit cpufreq_interactive_exit(void)
1524 {
1525         cpufreq_unregister_governor(&cpufreq_gov_interactive);
1526         kthread_stop(speedchange_task);
1527         put_task_struct(speedchange_task);
1528 }
1529
1530 module_exit(cpufreq_interactive_exit);
1531
1532 MODULE_AUTHOR("Mike Chan <mike@android.com>");
1533 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1534         "Latency sensitive workloads");
1535 MODULE_LICENSE("GPL");