cpufreq: interactive: don't handle transition notification if not enabled
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_interactive.c
1 /*
2  * drivers/cpufreq/cpufreq_interactive.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * Author: Mike Chan (mike@android.com)
16  *
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/rwsem.h>
25 #include <linux/sched.h>
26 #include <linux/sched/rt.h>
27 #include <linux/tick.h>
28 #include <linux/time.h>
29 #include <linux/timer.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/slab.h>
33 #include <asm/cputime.h>
34
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/cpufreq_interactive.h>
37
38 static atomic_t active_count = ATOMIC_INIT(0);
39
40 struct cpufreq_interactive_cpuinfo {
41         struct timer_list cpu_timer;
42         struct timer_list cpu_slack_timer;
43         spinlock_t load_lock; /* protects the next 4 fields */
44         u64 time_in_idle;
45         u64 time_in_idle_timestamp;
46         u64 cputime_speedadj;
47         u64 cputime_speedadj_timestamp;
48         struct cpufreq_policy *policy;
49         struct cpufreq_frequency_table *freq_table;
50         unsigned int target_freq;
51         unsigned int floor_freq;
52         u64 floor_validate_time;
53         u64 hispeed_validate_time;
54         struct rw_semaphore enable_sem;
55         int governor_enabled;
56 };
57
58 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
59
60 /* realtime thread handles frequency scaling */
61 static struct task_struct *speedchange_task;
62 static cpumask_t speedchange_cpumask;
63 static spinlock_t speedchange_cpumask_lock;
64
65 /* Hi speed to bump to from lo speed when load burst (default max) */
66 static unsigned int hispeed_freq;
67
68 /* Go to hi speed when CPU load at or above this value. */
69 #define DEFAULT_GO_HISPEED_LOAD 99
70 static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
71
72 /* Target load.  Lower values result in higher CPU speeds. */
73 #define DEFAULT_TARGET_LOAD 90
74 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
75 static spinlock_t target_loads_lock;
76 static unsigned int *target_loads = default_target_loads;
77 static int ntarget_loads = ARRAY_SIZE(default_target_loads);
78
79 /*
80  * The minimum amount of time to spend at a frequency before we can ramp down.
81  */
82 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
83 static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
84
85 /*
86  * The sample rate of the timer used to increase frequency
87  */
88 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
89 static unsigned long timer_rate = DEFAULT_TIMER_RATE;
90
91 /*
92  * Wait this long before raising speed above hispeed, by default a single
93  * timer interval.
94  */
95 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
96 static unsigned long above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
97
98 /* Non-zero means indefinite speed boost active */
99 static int boost_val;
100 /* Duration of a boot pulse in usecs */
101 static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
102 /* End time of boost pulse in ktime converted to usecs */
103 static u64 boostpulse_endtime;
104
105 /*
106  * Max additional time to wait in idle, beyond timer_rate, at speeds above
107  * minimum before wakeup to reduce speed, or -1 if unnecessary.
108  */
109 #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
110 static int timer_slack_val = DEFAULT_TIMER_SLACK;
111
112 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
113                 unsigned int event);
114
115 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
116 static
117 #endif
118 struct cpufreq_governor cpufreq_gov_interactive = {
119         .name = "interactive",
120         .governor = cpufreq_governor_interactive,
121         .max_transition_latency = 10000000,
122         .owner = THIS_MODULE,
123 };
124
125 static void cpufreq_interactive_timer_resched(
126         struct cpufreq_interactive_cpuinfo *pcpu)
127 {
128         unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
129
130         mod_timer_pinned(&pcpu->cpu_timer, expires);
131         if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
132                 expires += usecs_to_jiffies(timer_slack_val);
133                 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
134         }
135
136         spin_lock(&pcpu->load_lock);
137         pcpu->time_in_idle =
138                 get_cpu_idle_time_us(smp_processor_id(),
139                                      &pcpu->time_in_idle_timestamp);
140         pcpu->cputime_speedadj = 0;
141         pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
142         spin_unlock(&pcpu->load_lock);
143 }
144
145 static unsigned int freq_to_targetload(unsigned int freq)
146 {
147         int i;
148         unsigned int ret;
149
150         spin_lock(&target_loads_lock);
151
152         for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
153                 ;
154
155         ret = target_loads[i];
156         spin_unlock(&target_loads_lock);
157         return ret;
158 }
159
160 /*
161  * If increasing frequencies never map to a lower target load then
162  * choose_freq() will find the minimum frequency that does not exceed its
163  * target load given the current load.
164  */
165
166 static unsigned int choose_freq(
167         struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
168 {
169         unsigned int freq = pcpu->policy->cur;
170         unsigned int prevfreq, freqmin, freqmax;
171         unsigned int tl;
172         int index;
173
174         freqmin = 0;
175         freqmax = UINT_MAX;
176
177         do {
178                 prevfreq = freq;
179                 tl = freq_to_targetload(freq);
180
181                 /*
182                  * Find the lowest frequency where the computed load is less
183                  * than or equal to the target load.
184                  */
185
186                 cpufreq_frequency_table_target(
187                         pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
188                         CPUFREQ_RELATION_L, &index);
189                 freq = pcpu->freq_table[index].frequency;
190
191                 if (freq > prevfreq) {
192                         /* The previous frequency is too low. */
193                         freqmin = prevfreq;
194
195                         if (freq >= freqmax) {
196                                 /*
197                                  * Find the highest frequency that is less
198                                  * than freqmax.
199                                  */
200                                 cpufreq_frequency_table_target(
201                                         pcpu->policy, pcpu->freq_table,
202                                         freqmax - 1, CPUFREQ_RELATION_H,
203                                         &index);
204                                 freq = pcpu->freq_table[index].frequency;
205
206                                 if (freq == freqmin) {
207                                         /*
208                                          * The first frequency below freqmax
209                                          * has already been found to be too
210                                          * low.  freqmax is the lowest speed
211                                          * we found that is fast enough.
212                                          */
213                                         freq = freqmax;
214                                         break;
215                                 }
216                         }
217                 } else if (freq < prevfreq) {
218                         /* The previous frequency is high enough. */
219                         freqmax = prevfreq;
220
221                         if (freq <= freqmin) {
222                                 /*
223                                  * Find the lowest frequency that is higher
224                                  * than freqmin.
225                                  */
226                                 cpufreq_frequency_table_target(
227                                         pcpu->policy, pcpu->freq_table,
228                                         freqmin + 1, CPUFREQ_RELATION_L,
229                                         &index);
230                                 freq = pcpu->freq_table[index].frequency;
231
232                                 /*
233                                  * If freqmax is the first frequency above
234                                  * freqmin then we have already found that
235                                  * this speed is fast enough.
236                                  */
237                                 if (freq == freqmax)
238                                         break;
239                         }
240                 }
241
242                 /* If same frequency chosen as previous then done. */
243         } while (freq != prevfreq);
244
245         return freq;
246 }
247
248 static u64 update_load(int cpu)
249 {
250         struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
251         u64 now;
252         u64 now_idle;
253         unsigned int delta_idle;
254         unsigned int delta_time;
255         u64 active_time;
256
257         now_idle = get_cpu_idle_time_us(cpu, &now);
258         delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
259         delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
260         active_time = delta_time - delta_idle;
261         pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
262
263         pcpu->time_in_idle = now_idle;
264         pcpu->time_in_idle_timestamp = now;
265         return now;
266 }
267
268 static void cpufreq_interactive_timer(unsigned long data)
269 {
270         u64 now;
271         unsigned int delta_time;
272         u64 cputime_speedadj;
273         int cpu_load;
274         struct cpufreq_interactive_cpuinfo *pcpu =
275                 &per_cpu(cpuinfo, data);
276         unsigned int new_freq;
277         unsigned int loadadjfreq;
278         unsigned int index;
279         unsigned long flags;
280         bool boosted;
281
282         if (!down_read_trylock(&pcpu->enable_sem))
283                 return;
284         if (!pcpu->governor_enabled)
285                 goto exit;
286
287         spin_lock(&pcpu->load_lock);
288         now = update_load(data);
289         delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
290         cputime_speedadj = pcpu->cputime_speedadj;
291         spin_unlock(&pcpu->load_lock);
292
293         if (WARN_ON_ONCE(!delta_time))
294                 goto rearm;
295
296         do_div(cputime_speedadj, delta_time);
297         loadadjfreq = (unsigned int)cputime_speedadj * 100;
298         cpu_load = loadadjfreq / pcpu->target_freq;
299         boosted = boost_val || now < boostpulse_endtime;
300
301         if (cpu_load >= go_hispeed_load || boosted) {
302                 if (pcpu->target_freq < hispeed_freq) {
303                         new_freq = hispeed_freq;
304                 } else {
305                         new_freq = choose_freq(pcpu, loadadjfreq);
306
307                         if (new_freq < hispeed_freq)
308                                 new_freq = hispeed_freq;
309                 }
310         } else {
311                 new_freq = choose_freq(pcpu, loadadjfreq);
312         }
313
314         if (pcpu->target_freq >= hispeed_freq &&
315             new_freq > pcpu->target_freq &&
316             now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
317                 trace_cpufreq_interactive_notyet(
318                         data, cpu_load, pcpu->target_freq,
319                         pcpu->policy->cur, new_freq);
320                 goto rearm;
321         }
322
323         pcpu->hispeed_validate_time = now;
324
325         if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
326                                            new_freq, CPUFREQ_RELATION_L,
327                                            &index)) {
328                 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
329                              (int) data);
330                 goto rearm;
331         }
332
333         new_freq = pcpu->freq_table[index].frequency;
334
335         /*
336          * Do not scale below floor_freq unless we have been at or above the
337          * floor frequency for the minimum sample time since last validated.
338          */
339         if (new_freq < pcpu->floor_freq) {
340                 if (now - pcpu->floor_validate_time < min_sample_time) {
341                         trace_cpufreq_interactive_notyet(
342                                 data, cpu_load, pcpu->target_freq,
343                                 pcpu->policy->cur, new_freq);
344                         goto rearm;
345                 }
346         }
347
348         /*
349          * Update the timestamp for checking whether speed has been held at
350          * or above the selected frequency for a minimum of min_sample_time,
351          * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
352          * allow the speed to drop as soon as the boostpulse duration expires
353          * (or the indefinite boost is turned off).
354          */
355
356         if (!boosted || new_freq > hispeed_freq) {
357                 pcpu->floor_freq = new_freq;
358                 pcpu->floor_validate_time = now;
359         }
360
361         if (pcpu->target_freq == new_freq) {
362                 trace_cpufreq_interactive_already(
363                         data, cpu_load, pcpu->target_freq,
364                         pcpu->policy->cur, new_freq);
365                 goto rearm_if_notmax;
366         }
367
368         trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
369                                          pcpu->policy->cur, new_freq);
370
371         pcpu->target_freq = new_freq;
372         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
373         cpumask_set_cpu(data, &speedchange_cpumask);
374         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
375         wake_up_process(speedchange_task);
376
377 rearm_if_notmax:
378         /*
379          * Already set max speed and don't see a need to change that,
380          * wait until next idle to re-evaluate, don't need timer.
381          */
382         if (pcpu->target_freq == pcpu->policy->max)
383                 goto exit;
384
385 rearm:
386         if (!timer_pending(&pcpu->cpu_timer))
387                 cpufreq_interactive_timer_resched(pcpu);
388
389 exit:
390         up_read(&pcpu->enable_sem);
391         return;
392 }
393
394 static void cpufreq_interactive_idle_start(void)
395 {
396         struct cpufreq_interactive_cpuinfo *pcpu =
397                 &per_cpu(cpuinfo, smp_processor_id());
398         int pending;
399
400         if (!down_read_trylock(&pcpu->enable_sem))
401                 return;
402         if (!pcpu->governor_enabled) {
403                 up_read(&pcpu->enable_sem);
404                 return;
405         }
406
407         pending = timer_pending(&pcpu->cpu_timer);
408
409         if (pcpu->target_freq != pcpu->policy->min) {
410                 /*
411                  * Entering idle while not at lowest speed.  On some
412                  * platforms this can hold the other CPU(s) at that speed
413                  * even though the CPU is idle. Set a timer to re-evaluate
414                  * speed so this idle CPU doesn't hold the other CPUs above
415                  * min indefinitely.  This should probably be a quirk of
416                  * the CPUFreq driver.
417                  */
418                 if (!pending)
419                         cpufreq_interactive_timer_resched(pcpu);
420         }
421
422         up_read(&pcpu->enable_sem);
423 }
424
425 static void cpufreq_interactive_idle_end(void)
426 {
427         struct cpufreq_interactive_cpuinfo *pcpu =
428                 &per_cpu(cpuinfo, smp_processor_id());
429
430         if (!down_read_trylock(&pcpu->enable_sem))
431                 return;
432         if (!pcpu->governor_enabled) {
433                 up_read(&pcpu->enable_sem);
434                 return;
435         }
436
437         /* Arm the timer for 1-2 ticks later if not already. */
438         if (!timer_pending(&pcpu->cpu_timer)) {
439                 cpufreq_interactive_timer_resched(pcpu);
440         } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
441                 del_timer(&pcpu->cpu_timer);
442                 del_timer(&pcpu->cpu_slack_timer);
443                 cpufreq_interactive_timer(smp_processor_id());
444         }
445
446         up_read(&pcpu->enable_sem);
447 }
448
449 static int cpufreq_interactive_speedchange_task(void *data)
450 {
451         unsigned int cpu;
452         cpumask_t tmp_mask;
453         unsigned long flags;
454         struct cpufreq_interactive_cpuinfo *pcpu;
455
456         while (1) {
457                 set_current_state(TASK_INTERRUPTIBLE);
458                 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
459
460                 if (cpumask_empty(&speedchange_cpumask)) {
461                         spin_unlock_irqrestore(&speedchange_cpumask_lock,
462                                                flags);
463                         schedule();
464
465                         if (kthread_should_stop())
466                                 break;
467
468                         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
469                 }
470
471                 set_current_state(TASK_RUNNING);
472                 tmp_mask = speedchange_cpumask;
473                 cpumask_clear(&speedchange_cpumask);
474                 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
475
476                 for_each_cpu(cpu, &tmp_mask) {
477                         unsigned int j;
478                         unsigned int max_freq = 0;
479
480                         pcpu = &per_cpu(cpuinfo, cpu);
481                         if (!down_read_trylock(&pcpu->enable_sem))
482                                 continue;
483                         if (!pcpu->governor_enabled) {
484                                 up_read(&pcpu->enable_sem);
485                                 continue;
486                         }
487
488                         for_each_cpu(j, pcpu->policy->cpus) {
489                                 struct cpufreq_interactive_cpuinfo *pjcpu =
490                                         &per_cpu(cpuinfo, j);
491
492                                 if (pjcpu->target_freq > max_freq)
493                                         max_freq = pjcpu->target_freq;
494                         }
495
496                         if (max_freq != pcpu->policy->cur)
497                                 __cpufreq_driver_target(pcpu->policy,
498                                                         max_freq,
499                                                         CPUFREQ_RELATION_H);
500                         trace_cpufreq_interactive_setspeed(cpu,
501                                                      pcpu->target_freq,
502                                                      pcpu->policy->cur);
503
504                         up_read(&pcpu->enable_sem);
505                 }
506         }
507
508         return 0;
509 }
510
511 static void cpufreq_interactive_boost(void)
512 {
513         int i;
514         int anyboost = 0;
515         unsigned long flags;
516         struct cpufreq_interactive_cpuinfo *pcpu;
517
518         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
519
520         for_each_online_cpu(i) {
521                 pcpu = &per_cpu(cpuinfo, i);
522
523                 if (pcpu->target_freq < hispeed_freq) {
524                         pcpu->target_freq = hispeed_freq;
525                         cpumask_set_cpu(i, &speedchange_cpumask);
526                         pcpu->hispeed_validate_time =
527                                 ktime_to_us(ktime_get());
528                         anyboost = 1;
529                 }
530
531                 /*
532                  * Set floor freq and (re)start timer for when last
533                  * validated.
534                  */
535
536                 pcpu->floor_freq = hispeed_freq;
537                 pcpu->floor_validate_time = ktime_to_us(ktime_get());
538         }
539
540         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
541
542         if (anyboost)
543                 wake_up_process(speedchange_task);
544 }
545
546 static int cpufreq_interactive_notifier(
547         struct notifier_block *nb, unsigned long val, void *data)
548 {
549         struct cpufreq_freqs *freq = data;
550         struct cpufreq_interactive_cpuinfo *pcpu;
551         int cpu;
552
553         if (val == CPUFREQ_POSTCHANGE) {
554                 pcpu = &per_cpu(cpuinfo, freq->cpu);
555                 if (!down_read_trylock(&pcpu->enable_sem))
556                         return 0;
557                 if (!pcpu->governor_enabled) {
558                         up_read(&pcpu->enable_sem);
559                         return 0;
560                 }
561
562                 for_each_cpu(cpu, pcpu->policy->cpus) {
563                         struct cpufreq_interactive_cpuinfo *pjcpu =
564                                 &per_cpu(cpuinfo, cpu);
565                         spin_lock(&pjcpu->load_lock);
566                         update_load(cpu);
567                         spin_unlock(&pjcpu->load_lock);
568                 }
569
570                 up_read(&pcpu->enable_sem);
571         }
572         return 0;
573 }
574
575 static struct notifier_block cpufreq_notifier_block = {
576         .notifier_call = cpufreq_interactive_notifier,
577 };
578
579 static ssize_t show_target_loads(
580         struct kobject *kobj, struct attribute *attr, char *buf)
581 {
582         int i;
583         ssize_t ret = 0;
584
585         spin_lock(&target_loads_lock);
586
587         for (i = 0; i < ntarget_loads; i++)
588                 ret += sprintf(buf + ret, "%u%s", target_loads[i],
589                                i & 0x1 ? ":" : " ");
590
591         ret += sprintf(buf + ret, "\n");
592         spin_unlock(&target_loads_lock);
593         return ret;
594 }
595
596 static ssize_t store_target_loads(
597         struct kobject *kobj, struct attribute *attr, const char *buf,
598         size_t count)
599 {
600         int ret;
601         const char *cp;
602         unsigned int *new_target_loads = NULL;
603         int ntokens = 1;
604         int i;
605
606         cp = buf;
607         while ((cp = strpbrk(cp + 1, " :")))
608                 ntokens++;
609
610         if (!(ntokens & 0x1))
611                 goto err_inval;
612
613         new_target_loads = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
614         if (!new_target_loads) {
615                 ret = -ENOMEM;
616                 goto err;
617         }
618
619         cp = buf;
620         i = 0;
621         while (i < ntokens) {
622                 if (sscanf(cp, "%u", &new_target_loads[i++]) != 1)
623                         goto err_inval;
624
625                 cp = strpbrk(cp, " :");
626                 if (!cp)
627                         break;
628                 cp++;
629         }
630
631         if (i != ntokens)
632                 goto err_inval;
633
634         spin_lock(&target_loads_lock);
635         if (target_loads != default_target_loads)
636                 kfree(target_loads);
637         target_loads = new_target_loads;
638         ntarget_loads = ntokens;
639         spin_unlock(&target_loads_lock);
640         return count;
641
642 err_inval:
643         ret = -EINVAL;
644 err:
645         kfree(new_target_loads);
646         return ret;
647 }
648
649 static struct global_attr target_loads_attr =
650         __ATTR(target_loads, S_IRUGO | S_IWUSR,
651                 show_target_loads, store_target_loads);
652
653 static ssize_t show_hispeed_freq(struct kobject *kobj,
654                                  struct attribute *attr, char *buf)
655 {
656         return sprintf(buf, "%u\n", hispeed_freq);
657 }
658
659 static ssize_t store_hispeed_freq(struct kobject *kobj,
660                                   struct attribute *attr, const char *buf,
661                                   size_t count)
662 {
663         int ret;
664         long unsigned int val;
665
666         ret = strict_strtoul(buf, 0, &val);
667         if (ret < 0)
668                 return ret;
669         hispeed_freq = val;
670         return count;
671 }
672
673 static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
674                 show_hispeed_freq, store_hispeed_freq);
675
676
677 static ssize_t show_go_hispeed_load(struct kobject *kobj,
678                                      struct attribute *attr, char *buf)
679 {
680         return sprintf(buf, "%lu\n", go_hispeed_load);
681 }
682
683 static ssize_t store_go_hispeed_load(struct kobject *kobj,
684                         struct attribute *attr, const char *buf, size_t count)
685 {
686         int ret;
687         unsigned long val;
688
689         ret = strict_strtoul(buf, 0, &val);
690         if (ret < 0)
691                 return ret;
692         go_hispeed_load = val;
693         return count;
694 }
695
696 static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
697                 show_go_hispeed_load, store_go_hispeed_load);
698
699 static ssize_t show_min_sample_time(struct kobject *kobj,
700                                 struct attribute *attr, char *buf)
701 {
702         return sprintf(buf, "%lu\n", min_sample_time);
703 }
704
705 static ssize_t store_min_sample_time(struct kobject *kobj,
706                         struct attribute *attr, const char *buf, size_t count)
707 {
708         int ret;
709         unsigned long val;
710
711         ret = strict_strtoul(buf, 0, &val);
712         if (ret < 0)
713                 return ret;
714         min_sample_time = val;
715         return count;
716 }
717
718 static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
719                 show_min_sample_time, store_min_sample_time);
720
721 static ssize_t show_above_hispeed_delay(struct kobject *kobj,
722                                         struct attribute *attr, char *buf)
723 {
724         return sprintf(buf, "%lu\n", above_hispeed_delay_val);
725 }
726
727 static ssize_t store_above_hispeed_delay(struct kobject *kobj,
728                                          struct attribute *attr,
729                                          const char *buf, size_t count)
730 {
731         int ret;
732         unsigned long val;
733
734         ret = strict_strtoul(buf, 0, &val);
735         if (ret < 0)
736                 return ret;
737         above_hispeed_delay_val = val;
738         return count;
739 }
740
741 define_one_global_rw(above_hispeed_delay);
742
743 static ssize_t show_timer_rate(struct kobject *kobj,
744                         struct attribute *attr, char *buf)
745 {
746         return sprintf(buf, "%lu\n", timer_rate);
747 }
748
749 static ssize_t store_timer_rate(struct kobject *kobj,
750                         struct attribute *attr, const char *buf, size_t count)
751 {
752         int ret;
753         unsigned long val;
754
755         ret = strict_strtoul(buf, 0, &val);
756         if (ret < 0)
757                 return ret;
758         timer_rate = val;
759         return count;
760 }
761
762 static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
763                 show_timer_rate, store_timer_rate);
764
765 static ssize_t show_timer_slack(
766         struct kobject *kobj, struct attribute *attr, char *buf)
767 {
768         return sprintf(buf, "%d\n", timer_slack_val);
769 }
770
771 static ssize_t store_timer_slack(
772         struct kobject *kobj, struct attribute *attr, const char *buf,
773         size_t count)
774 {
775         int ret;
776         unsigned long val;
777
778         ret = kstrtol(buf, 10, &val);
779         if (ret < 0)
780                 return ret;
781
782         timer_slack_val = val;
783         return count;
784 }
785
786 define_one_global_rw(timer_slack);
787
788 static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
789                           char *buf)
790 {
791         return sprintf(buf, "%d\n", boost_val);
792 }
793
794 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
795                            const char *buf, size_t count)
796 {
797         int ret;
798         unsigned long val;
799
800         ret = kstrtoul(buf, 0, &val);
801         if (ret < 0)
802                 return ret;
803
804         boost_val = val;
805
806         if (boost_val) {
807                 trace_cpufreq_interactive_boost("on");
808                 cpufreq_interactive_boost();
809         } else {
810                 trace_cpufreq_interactive_unboost("off");
811         }
812
813         return count;
814 }
815
816 define_one_global_rw(boost);
817
818 static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
819                                 const char *buf, size_t count)
820 {
821         int ret;
822         unsigned long val;
823
824         ret = kstrtoul(buf, 0, &val);
825         if (ret < 0)
826                 return ret;
827
828         boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
829         trace_cpufreq_interactive_boost("pulse");
830         cpufreq_interactive_boost();
831         return count;
832 }
833
834 static struct global_attr boostpulse =
835         __ATTR(boostpulse, 0200, NULL, store_boostpulse);
836
837 static ssize_t show_boostpulse_duration(
838         struct kobject *kobj, struct attribute *attr, char *buf)
839 {
840         return sprintf(buf, "%d\n", boostpulse_duration_val);
841 }
842
843 static ssize_t store_boostpulse_duration(
844         struct kobject *kobj, struct attribute *attr, const char *buf,
845         size_t count)
846 {
847         int ret;
848         unsigned long val;
849
850         ret = kstrtoul(buf, 0, &val);
851         if (ret < 0)
852                 return ret;
853
854         boostpulse_duration_val = val;
855         return count;
856 }
857
858 define_one_global_rw(boostpulse_duration);
859
860 static struct attribute *interactive_attributes[] = {
861         &target_loads_attr.attr,
862         &hispeed_freq_attr.attr,
863         &go_hispeed_load_attr.attr,
864         &above_hispeed_delay.attr,
865         &min_sample_time_attr.attr,
866         &timer_rate_attr.attr,
867         &timer_slack.attr,
868         &boost.attr,
869         &boostpulse.attr,
870         &boostpulse_duration.attr,
871         NULL,
872 };
873
874 static struct attribute_group interactive_attr_group = {
875         .attrs = interactive_attributes,
876         .name = "interactive",
877 };
878
879 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
880                                              unsigned long val,
881                                              void *data)
882 {
883         switch (val) {
884         case IDLE_START:
885                 cpufreq_interactive_idle_start();
886                 break;
887         case IDLE_END:
888                 cpufreq_interactive_idle_end();
889                 break;
890         }
891
892         return 0;
893 }
894
895 static struct notifier_block cpufreq_interactive_idle_nb = {
896         .notifier_call = cpufreq_interactive_idle_notifier,
897 };
898
899 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
900                 unsigned int event)
901 {
902         int rc;
903         unsigned int j;
904         struct cpufreq_interactive_cpuinfo *pcpu;
905         struct cpufreq_frequency_table *freq_table;
906
907         switch (event) {
908         case CPUFREQ_GOV_START:
909                 if (!cpu_online(policy->cpu))
910                         return -EINVAL;
911
912                 freq_table =
913                         cpufreq_frequency_get_table(policy->cpu);
914                 if (!hispeed_freq)
915                         hispeed_freq = policy->max;
916
917                 for_each_cpu(j, policy->cpus) {
918                         unsigned long expires;
919
920                         pcpu = &per_cpu(cpuinfo, j);
921                         pcpu->policy = policy;
922                         pcpu->target_freq = policy->cur;
923                         pcpu->freq_table = freq_table;
924                         pcpu->floor_freq = pcpu->target_freq;
925                         pcpu->floor_validate_time =
926                                 ktime_to_us(ktime_get());
927                         pcpu->hispeed_validate_time =
928                                 pcpu->floor_validate_time;
929                         down_write(&pcpu->enable_sem);
930                         expires = jiffies + usecs_to_jiffies(timer_rate);
931                         pcpu->cpu_timer.expires = expires;
932                         add_timer_on(&pcpu->cpu_timer, j);
933                         if (timer_slack_val >= 0) {
934                                 expires += usecs_to_jiffies(timer_slack_val);
935                                 pcpu->cpu_slack_timer.expires = expires;
936                                 add_timer_on(&pcpu->cpu_slack_timer, j);
937                         }
938                         pcpu->governor_enabled = 1;
939                         up_write(&pcpu->enable_sem);
940                 }
941
942                 /*
943                  * Do not register the idle hook and create sysfs
944                  * entries if we have already done so.
945                  */
946                 if (atomic_inc_return(&active_count) > 1)
947                         return 0;
948
949                 rc = sysfs_create_group(cpufreq_global_kobject,
950                                 &interactive_attr_group);
951                 if (rc)
952                         return rc;
953
954                 idle_notifier_register(&cpufreq_interactive_idle_nb);
955                 cpufreq_register_notifier(
956                         &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
957                 break;
958
959         case CPUFREQ_GOV_STOP:
960                 for_each_cpu(j, policy->cpus) {
961                         pcpu = &per_cpu(cpuinfo, j);
962                         down_write(&pcpu->enable_sem);
963                         pcpu->governor_enabled = 0;
964                         del_timer_sync(&pcpu->cpu_timer);
965                         del_timer_sync(&pcpu->cpu_slack_timer);
966                         up_write(&pcpu->enable_sem);
967                 }
968
969                 if (atomic_dec_return(&active_count) > 0)
970                         return 0;
971
972                 cpufreq_unregister_notifier(
973                         &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
974                 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
975                 sysfs_remove_group(cpufreq_global_kobject,
976                                 &interactive_attr_group);
977
978                 break;
979
980         case CPUFREQ_GOV_LIMITS:
981                 if (policy->max < policy->cur)
982                         __cpufreq_driver_target(policy,
983                                         policy->max, CPUFREQ_RELATION_H);
984                 else if (policy->min > policy->cur)
985                         __cpufreq_driver_target(policy,
986                                         policy->min, CPUFREQ_RELATION_L);
987                 break;
988         }
989         return 0;
990 }
991
992 static void cpufreq_interactive_nop_timer(unsigned long data)
993 {
994 }
995
996 static int __init cpufreq_interactive_init(void)
997 {
998         unsigned int i;
999         struct cpufreq_interactive_cpuinfo *pcpu;
1000         struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1001
1002         /* Initalize per-cpu timers */
1003         for_each_possible_cpu(i) {
1004                 pcpu = &per_cpu(cpuinfo, i);
1005                 init_timer_deferrable(&pcpu->cpu_timer);
1006                 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1007                 pcpu->cpu_timer.data = i;
1008                 init_timer(&pcpu->cpu_slack_timer);
1009                 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
1010                 spin_lock_init(&pcpu->load_lock);
1011                 init_rwsem(&pcpu->enable_sem);
1012         }
1013
1014         spin_lock_init(&target_loads_lock);
1015         spin_lock_init(&speedchange_cpumask_lock);
1016         speedchange_task =
1017                 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1018                                "cfinteractive");
1019         if (IS_ERR(speedchange_task))
1020                 return PTR_ERR(speedchange_task);
1021
1022         sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1023         get_task_struct(speedchange_task);
1024
1025         /* NB: wake up so the thread does not look hung to the freezer */
1026         wake_up_process(speedchange_task);
1027
1028         return cpufreq_register_governor(&cpufreq_gov_interactive);
1029 }
1030
1031 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1032 fs_initcall(cpufreq_interactive_init);
1033 #else
1034 module_init(cpufreq_interactive_init);
1035 #endif
1036
1037 static void __exit cpufreq_interactive_exit(void)
1038 {
1039         cpufreq_unregister_governor(&cpufreq_gov_interactive);
1040         kthread_stop(speedchange_task);
1041         put_task_struct(speedchange_task);
1042 }
1043
1044 module_exit(cpufreq_interactive_exit);
1045
1046 MODULE_AUTHOR("Mike Chan <mike@android.com>");
1047 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1048         "Latency sensitive workloads");
1049 MODULE_LICENSE("GPL");