cpufreq: interactive: specify duration of CPU speed boost pulse
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_interactive.c
1 /*
2  * drivers/cpufreq/cpufreq_interactive.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * Author: Mike Chan (mike@android.com)
16  *
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/mutex.h>
25 #include <linux/sched.h>
26 #include <linux/sched/rt.h>
27 #include <linux/tick.h>
28 #include <linux/time.h>
29 #include <linux/timer.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/mutex.h>
33 #include <linux/slab.h>
34 #include <asm/cputime.h>
35
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/cpufreq_interactive.h>
38
39 static atomic_t active_count = ATOMIC_INIT(0);
40
41 struct cpufreq_interactive_cpuinfo {
42         struct timer_list cpu_timer;
43         int timer_idlecancel;
44         spinlock_t load_lock; /* protects the next 4 fields */
45         u64 time_in_idle;
46         u64 time_in_idle_timestamp;
47         u64 cputime_speedadj;
48         u64 cputime_speedadj_timestamp;
49         struct cpufreq_policy *policy;
50         struct cpufreq_frequency_table *freq_table;
51         unsigned int target_freq;
52         unsigned int floor_freq;
53         u64 floor_validate_time;
54         u64 hispeed_validate_time;
55         int governor_enabled;
56 };
57
58 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
59
60 /* realtime thread handles frequency scaling */
61 static struct task_struct *speedchange_task;
62 static cpumask_t speedchange_cpumask;
63 static spinlock_t speedchange_cpumask_lock;
64
65 /* Hi speed to bump to from lo speed when load burst (default max) */
66 static unsigned int hispeed_freq;
67
68 /* Go to hi speed when CPU load at or above this value. */
69 #define DEFAULT_GO_HISPEED_LOAD 85
70 static unsigned long go_hispeed_load;
71
72 /* Target load.  Lower values result in higher CPU speeds. */
73 #define DEFAULT_TARGET_LOAD 90
74 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
75 static spinlock_t target_loads_lock;
76 static unsigned int *target_loads = default_target_loads;
77 static int ntarget_loads = ARRAY_SIZE(default_target_loads);
78
79 /*
80  * The minimum amount of time to spend at a frequency before we can ramp down.
81  */
82 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
83 static unsigned long min_sample_time;
84
85 /*
86  * The sample rate of the timer used to increase frequency
87  */
88 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
89 static unsigned long timer_rate;
90
91 /*
92  * Wait this long before raising speed above hispeed, by default a single
93  * timer interval.
94  */
95 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
96 static unsigned long above_hispeed_delay_val;
97
98 /* Non-zero means indefinite speed boost active */
99 static int boost_val;
100 /* Duration of a boot pulse in usecs */
101 static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
102 /* End time of boost pulse in ktime converted to usecs */
103 static u64 boostpulse_endtime;
104
105 static bool governidle;
106 module_param(governidle, bool, S_IWUSR | S_IRUGO);
107 MODULE_PARM_DESC(governidle,
108         "Set to 1 to wake up CPUs from idle to reduce speed (default 0)");
109
110 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
111                 unsigned int event);
112
113 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
114 static
115 #endif
116 struct cpufreq_governor cpufreq_gov_interactive = {
117         .name = "interactive",
118         .governor = cpufreq_governor_interactive,
119         .max_transition_latency = 10000000,
120         .owner = THIS_MODULE,
121 };
122
123 static void cpufreq_interactive_timer_resched(
124         struct cpufreq_interactive_cpuinfo *pcpu)
125 {
126         mod_timer_pinned(&pcpu->cpu_timer,
127                          jiffies + usecs_to_jiffies(timer_rate));
128         spin_lock(&pcpu->load_lock);
129         pcpu->time_in_idle =
130                 get_cpu_idle_time_us(smp_processor_id(),
131                                      &pcpu->time_in_idle_timestamp);
132         pcpu->cputime_speedadj = 0;
133         pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
134         spin_unlock(&pcpu->load_lock);
135 }
136
137 static unsigned int freq_to_targetload(unsigned int freq)
138 {
139         int i;
140         unsigned int ret;
141
142         spin_lock(&target_loads_lock);
143
144         for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
145                 ;
146
147         ret = target_loads[i];
148         spin_unlock(&target_loads_lock);
149         return ret;
150 }
151
152 /*
153  * If increasing frequencies never map to a lower target load then
154  * choose_freq() will find the minimum frequency that does not exceed its
155  * target load given the current load.
156  */
157
158 static unsigned int choose_freq(
159         struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
160 {
161         unsigned int freq = pcpu->policy->cur;
162         unsigned int prevfreq, freqmin, freqmax;
163         unsigned int tl;
164         int index;
165
166         freqmin = 0;
167         freqmax = UINT_MAX;
168
169         do {
170                 prevfreq = freq;
171                 tl = freq_to_targetload(freq);
172
173                 /*
174                  * Find the lowest frequency where the computed load is less
175                  * than or equal to the target load.
176                  */
177
178                 cpufreq_frequency_table_target(
179                         pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
180                         CPUFREQ_RELATION_L, &index);
181                 freq = pcpu->freq_table[index].frequency;
182
183                 if (freq > prevfreq) {
184                         /* The previous frequency is too low. */
185                         freqmin = prevfreq;
186
187                         if (freq >= freqmax) {
188                                 /*
189                                  * Find the highest frequency that is less
190                                  * than freqmax.
191                                  */
192                                 cpufreq_frequency_table_target(
193                                         pcpu->policy, pcpu->freq_table,
194                                         freqmax - 1, CPUFREQ_RELATION_H,
195                                         &index);
196                                 freq = pcpu->freq_table[index].frequency;
197
198                                 if (freq == freqmin) {
199                                         /*
200                                          * The first frequency below freqmax
201                                          * has already been found to be too
202                                          * low.  freqmax is the lowest speed
203                                          * we found that is fast enough.
204                                          */
205                                         freq = freqmax;
206                                         break;
207                                 }
208                         }
209                 } else if (freq < prevfreq) {
210                         /* The previous frequency is high enough. */
211                         freqmax = prevfreq;
212
213                         if (freq <= freqmin) {
214                                 /*
215                                  * Find the lowest frequency that is higher
216                                  * than freqmin.
217                                  */
218                                 cpufreq_frequency_table_target(
219                                         pcpu->policy, pcpu->freq_table,
220                                         freqmin + 1, CPUFREQ_RELATION_L,
221                                         &index);
222                                 freq = pcpu->freq_table[index].frequency;
223
224                                 /*
225                                  * If freqmax is the first frequency above
226                                  * freqmin then we have already found that
227                                  * this speed is fast enough.
228                                  */
229                                 if (freq == freqmax)
230                                         break;
231                         }
232                 }
233
234                 /* If same frequency chosen as previous then done. */
235         } while (freq != prevfreq);
236
237         return freq;
238 }
239
240 static u64 update_load(int cpu)
241 {
242         struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
243         u64 now;
244         u64 now_idle;
245         unsigned int delta_idle;
246         unsigned int delta_time;
247         u64 active_time;
248
249         now_idle = get_cpu_idle_time_us(cpu, &now);
250         delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
251         delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
252         active_time = delta_time - delta_idle;
253         pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
254
255         pcpu->time_in_idle = now_idle;
256         pcpu->time_in_idle_timestamp = now;
257         return now;
258 }
259
260 static void cpufreq_interactive_timer(unsigned long data)
261 {
262         u64 now;
263         unsigned int delta_time;
264         u64 cputime_speedadj;
265         int cpu_load;
266         struct cpufreq_interactive_cpuinfo *pcpu =
267                 &per_cpu(cpuinfo, data);
268         unsigned int new_freq;
269         unsigned int loadadjfreq;
270         unsigned int index;
271         unsigned long flags;
272         bool boosted;
273
274         smp_rmb();
275
276         if (!pcpu->governor_enabled)
277                 goto exit;
278
279         spin_lock(&pcpu->load_lock);
280         now = update_load(data);
281         delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
282         cputime_speedadj = pcpu->cputime_speedadj;
283         spin_unlock(&pcpu->load_lock);
284
285         if (WARN_ON_ONCE(!delta_time))
286                 goto rearm;
287
288         do_div(cputime_speedadj, delta_time);
289         loadadjfreq = (unsigned int)cputime_speedadj * 100;
290         cpu_load = loadadjfreq / pcpu->target_freq;
291         boosted = boost_val || now < boostpulse_endtime;
292
293         if ((cpu_load >= go_hispeed_load || boosted) &&
294             pcpu->target_freq < hispeed_freq)
295                 new_freq = hispeed_freq;
296         else
297                 new_freq = choose_freq(pcpu, loadadjfreq);
298
299         if (pcpu->target_freq >= hispeed_freq &&
300             new_freq > pcpu->target_freq &&
301             now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
302                 trace_cpufreq_interactive_notyet(
303                         data, cpu_load, pcpu->target_freq,
304                         pcpu->policy->cur, new_freq);
305                 goto rearm;
306         }
307
308         pcpu->hispeed_validate_time = now;
309
310         if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
311                                            new_freq, CPUFREQ_RELATION_L,
312                                            &index)) {
313                 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
314                              (int) data);
315                 goto rearm;
316         }
317
318         new_freq = pcpu->freq_table[index].frequency;
319
320         /*
321          * Do not scale below floor_freq unless we have been at or above the
322          * floor frequency for the minimum sample time since last validated.
323          */
324         if (new_freq < pcpu->floor_freq) {
325                 if (now - pcpu->floor_validate_time < min_sample_time) {
326                         trace_cpufreq_interactive_notyet(
327                                 data, cpu_load, pcpu->target_freq,
328                                 pcpu->policy->cur, new_freq);
329                         goto rearm;
330                 }
331         }
332
333         /*
334          * Update the timestamp for checking whether speed has been held at
335          * or above the selected frequency for a minimum of min_sample_time,
336          * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
337          * allow the speed to drop as soon as the boostpulse duration expires
338          * (or the indefinite boost is turned off).
339          */
340
341         if (!boosted || new_freq > hispeed_freq) {
342                 pcpu->floor_freq = new_freq;
343                 pcpu->floor_validate_time = now;
344         }
345
346         if (pcpu->target_freq == new_freq) {
347                 trace_cpufreq_interactive_already(
348                         data, cpu_load, pcpu->target_freq,
349                         pcpu->policy->cur, new_freq);
350                 goto rearm_if_notmax;
351         }
352
353         trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
354                                          pcpu->policy->cur, new_freq);
355
356         pcpu->target_freq = new_freq;
357         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
358         cpumask_set_cpu(data, &speedchange_cpumask);
359         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
360         wake_up_process(speedchange_task);
361
362 rearm_if_notmax:
363         /*
364          * Already set max speed and don't see a need to change that,
365          * wait until next idle to re-evaluate, don't need timer.
366          */
367         if (pcpu->target_freq == pcpu->policy->max)
368                 goto exit;
369
370 rearm:
371         if (!timer_pending(&pcpu->cpu_timer)) {
372                 /*
373                  * If governing speed in idle and already at min, cancel the
374                  * timer if that CPU goes idle.  We don't need to re-evaluate
375                  * speed until the next idle exit.
376                  */
377                 if (governidle && pcpu->target_freq == pcpu->policy->min)
378                         pcpu->timer_idlecancel = 1;
379
380                 cpufreq_interactive_timer_resched(pcpu);
381         }
382
383 exit:
384         return;
385 }
386
387 static void cpufreq_interactive_idle_start(void)
388 {
389         struct cpufreq_interactive_cpuinfo *pcpu =
390                 &per_cpu(cpuinfo, smp_processor_id());
391         int pending;
392
393         if (!pcpu->governor_enabled)
394                 return;
395
396         pending = timer_pending(&pcpu->cpu_timer);
397
398         if (pcpu->target_freq != pcpu->policy->min) {
399                 /*
400                  * Entering idle while not at lowest speed.  On some
401                  * platforms this can hold the other CPU(s) at that speed
402                  * even though the CPU is idle. Set a timer to re-evaluate
403                  * speed so this idle CPU doesn't hold the other CPUs above
404                  * min indefinitely.  This should probably be a quirk of
405                  * the CPUFreq driver.
406                  */
407                 if (!pending) {
408                         pcpu->timer_idlecancel = 0;
409                         cpufreq_interactive_timer_resched(pcpu);
410                 }
411         } else if (governidle) {
412                 /*
413                  * If at min speed and entering idle after load has
414                  * already been evaluated, and a timer has been set just in
415                  * case the CPU suddenly goes busy, cancel that timer.  The
416                  * CPU didn't go busy; we'll recheck things upon idle exit.
417                  */
418                 if (pending && pcpu->timer_idlecancel) {
419                         del_timer(&pcpu->cpu_timer);
420                         pcpu->timer_idlecancel = 0;
421                 }
422         }
423
424 }
425
426 static void cpufreq_interactive_idle_end(void)
427 {
428         struct cpufreq_interactive_cpuinfo *pcpu =
429                 &per_cpu(cpuinfo, smp_processor_id());
430
431         if (!pcpu->governor_enabled)
432                 return;
433
434         /* Arm the timer for 1-2 ticks later if not already. */
435         if (!timer_pending(&pcpu->cpu_timer)) {
436                 pcpu->timer_idlecancel = 0;
437                 cpufreq_interactive_timer_resched(pcpu);
438         } else if (!governidle &&
439                    time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
440                 del_timer(&pcpu->cpu_timer);
441                 cpufreq_interactive_timer(smp_processor_id());
442         }
443 }
444
445 static int cpufreq_interactive_speedchange_task(void *data)
446 {
447         unsigned int cpu;
448         cpumask_t tmp_mask;
449         unsigned long flags;
450         struct cpufreq_interactive_cpuinfo *pcpu;
451
452         while (1) {
453                 set_current_state(TASK_INTERRUPTIBLE);
454                 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
455
456                 if (cpumask_empty(&speedchange_cpumask)) {
457                         spin_unlock_irqrestore(&speedchange_cpumask_lock,
458                                                flags);
459                         schedule();
460
461                         if (kthread_should_stop())
462                                 break;
463
464                         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
465                 }
466
467                 set_current_state(TASK_RUNNING);
468                 tmp_mask = speedchange_cpumask;
469                 cpumask_clear(&speedchange_cpumask);
470                 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
471
472                 for_each_cpu(cpu, &tmp_mask) {
473                         unsigned int j;
474                         unsigned int max_freq = 0;
475
476                         pcpu = &per_cpu(cpuinfo, cpu);
477                         smp_rmb();
478
479                         if (!pcpu->governor_enabled)
480                                 continue;
481
482                         for_each_cpu(j, pcpu->policy->cpus) {
483                                 struct cpufreq_interactive_cpuinfo *pjcpu =
484                                         &per_cpu(cpuinfo, j);
485
486                                 if (pjcpu->target_freq > max_freq)
487                                         max_freq = pjcpu->target_freq;
488                         }
489
490                         if (max_freq != pcpu->policy->cur)
491                                 __cpufreq_driver_target(pcpu->policy,
492                                                         max_freq,
493                                                         CPUFREQ_RELATION_H);
494                         trace_cpufreq_interactive_setspeed(cpu,
495                                                      pcpu->target_freq,
496                                                      pcpu->policy->cur);
497                 }
498         }
499
500         return 0;
501 }
502
503 static void cpufreq_interactive_boost(void)
504 {
505         int i;
506         int anyboost = 0;
507         unsigned long flags;
508         struct cpufreq_interactive_cpuinfo *pcpu;
509
510         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
511
512         for_each_online_cpu(i) {
513                 pcpu = &per_cpu(cpuinfo, i);
514
515                 if (pcpu->target_freq < hispeed_freq) {
516                         pcpu->target_freq = hispeed_freq;
517                         cpumask_set_cpu(i, &speedchange_cpumask);
518                         pcpu->hispeed_validate_time =
519                                 ktime_to_us(ktime_get());
520                         anyboost = 1;
521                 }
522
523                 /*
524                  * Set floor freq and (re)start timer for when last
525                  * validated.
526                  */
527
528                 pcpu->floor_freq = hispeed_freq;
529                 pcpu->floor_validate_time = ktime_to_us(ktime_get());
530         }
531
532         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
533
534         if (anyboost)
535                 wake_up_process(speedchange_task);
536 }
537
538 static int cpufreq_interactive_notifier(
539         struct notifier_block *nb, unsigned long val, void *data)
540 {
541         struct cpufreq_freqs *freq = data;
542         struct cpufreq_interactive_cpuinfo *pcpu;
543         int cpu;
544
545         if (val == CPUFREQ_POSTCHANGE) {
546                 pcpu = &per_cpu(cpuinfo, freq->cpu);
547
548                 for_each_cpu(cpu, pcpu->policy->cpus) {
549                         struct cpufreq_interactive_cpuinfo *pjcpu =
550                                 &per_cpu(cpuinfo, cpu);
551                         spin_lock(&pjcpu->load_lock);
552                         update_load(cpu);
553                         spin_unlock(&pjcpu->load_lock);
554                 }
555         }
556
557         return 0;
558 }
559
560 static struct notifier_block cpufreq_notifier_block = {
561         .notifier_call = cpufreq_interactive_notifier,
562 };
563
564 static ssize_t show_target_loads(
565         struct kobject *kobj, struct attribute *attr, char *buf)
566 {
567         int i;
568         ssize_t ret = 0;
569
570         spin_lock(&target_loads_lock);
571
572         for (i = 0; i < ntarget_loads; i++)
573                 ret += sprintf(buf + ret, "%u%s", target_loads[i],
574                                i & 0x1 ? ":" : " ");
575
576         ret += sprintf(buf + ret, "\n");
577         spin_unlock(&target_loads_lock);
578         return ret;
579 }
580
581 static ssize_t store_target_loads(
582         struct kobject *kobj, struct attribute *attr, const char *buf,
583         size_t count)
584 {
585         int ret;
586         const char *cp;
587         unsigned int *new_target_loads = NULL;
588         int ntokens = 1;
589         int i;
590
591         cp = buf;
592         while ((cp = strpbrk(cp + 1, " :")))
593                 ntokens++;
594
595         if (!(ntokens & 0x1))
596                 goto err_inval;
597
598         new_target_loads = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
599         if (!new_target_loads) {
600                 ret = -ENOMEM;
601                 goto err;
602         }
603
604         cp = buf;
605         i = 0;
606         while (i < ntokens) {
607                 if (sscanf(cp, "%u", &new_target_loads[i++]) != 1)
608                         goto err_inval;
609
610                 cp = strpbrk(cp, " :");
611                 if (!cp)
612                         break;
613                 cp++;
614         }
615
616         if (i != ntokens)
617                 goto err_inval;
618
619         spin_lock(&target_loads_lock);
620         if (target_loads != default_target_loads)
621                 kfree(target_loads);
622         target_loads = new_target_loads;
623         ntarget_loads = ntokens;
624         spin_unlock(&target_loads_lock);
625         return count;
626
627 err_inval:
628         ret = -EINVAL;
629 err:
630         kfree(new_target_loads);
631         return ret;
632 }
633
634 static struct global_attr target_loads_attr =
635         __ATTR(target_loads, S_IRUGO | S_IWUSR,
636                 show_target_loads, store_target_loads);
637
638 static ssize_t show_hispeed_freq(struct kobject *kobj,
639                                  struct attribute *attr, char *buf)
640 {
641         return sprintf(buf, "%u\n", hispeed_freq);
642 }
643
644 static ssize_t store_hispeed_freq(struct kobject *kobj,
645                                   struct attribute *attr, const char *buf,
646                                   size_t count)
647 {
648         int ret;
649         long unsigned int val;
650
651         ret = strict_strtoul(buf, 0, &val);
652         if (ret < 0)
653                 return ret;
654         hispeed_freq = val;
655         return count;
656 }
657
658 static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
659                 show_hispeed_freq, store_hispeed_freq);
660
661
662 static ssize_t show_go_hispeed_load(struct kobject *kobj,
663                                      struct attribute *attr, char *buf)
664 {
665         return sprintf(buf, "%lu\n", go_hispeed_load);
666 }
667
668 static ssize_t store_go_hispeed_load(struct kobject *kobj,
669                         struct attribute *attr, const char *buf, size_t count)
670 {
671         int ret;
672         unsigned long val;
673
674         ret = strict_strtoul(buf, 0, &val);
675         if (ret < 0)
676                 return ret;
677         go_hispeed_load = val;
678         return count;
679 }
680
681 static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
682                 show_go_hispeed_load, store_go_hispeed_load);
683
684 static ssize_t show_min_sample_time(struct kobject *kobj,
685                                 struct attribute *attr, char *buf)
686 {
687         return sprintf(buf, "%lu\n", min_sample_time);
688 }
689
690 static ssize_t store_min_sample_time(struct kobject *kobj,
691                         struct attribute *attr, const char *buf, size_t count)
692 {
693         int ret;
694         unsigned long val;
695
696         ret = strict_strtoul(buf, 0, &val);
697         if (ret < 0)
698                 return ret;
699         min_sample_time = val;
700         return count;
701 }
702
703 static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
704                 show_min_sample_time, store_min_sample_time);
705
706 static ssize_t show_above_hispeed_delay(struct kobject *kobj,
707                                         struct attribute *attr, char *buf)
708 {
709         return sprintf(buf, "%lu\n", above_hispeed_delay_val);
710 }
711
712 static ssize_t store_above_hispeed_delay(struct kobject *kobj,
713                                          struct attribute *attr,
714                                          const char *buf, size_t count)
715 {
716         int ret;
717         unsigned long val;
718
719         ret = strict_strtoul(buf, 0, &val);
720         if (ret < 0)
721                 return ret;
722         above_hispeed_delay_val = val;
723         return count;
724 }
725
726 define_one_global_rw(above_hispeed_delay);
727
728 static ssize_t show_timer_rate(struct kobject *kobj,
729                         struct attribute *attr, char *buf)
730 {
731         return sprintf(buf, "%lu\n", timer_rate);
732 }
733
734 static ssize_t store_timer_rate(struct kobject *kobj,
735                         struct attribute *attr, const char *buf, size_t count)
736 {
737         int ret;
738         unsigned long val;
739
740         ret = strict_strtoul(buf, 0, &val);
741         if (ret < 0)
742                 return ret;
743         timer_rate = val;
744         return count;
745 }
746
747 static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
748                 show_timer_rate, store_timer_rate);
749
750 static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
751                           char *buf)
752 {
753         return sprintf(buf, "%d\n", boost_val);
754 }
755
756 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
757                            const char *buf, size_t count)
758 {
759         int ret;
760         unsigned long val;
761
762         ret = kstrtoul(buf, 0, &val);
763         if (ret < 0)
764                 return ret;
765
766         boost_val = val;
767
768         if (boost_val) {
769                 trace_cpufreq_interactive_boost("on");
770                 cpufreq_interactive_boost();
771         } else {
772                 trace_cpufreq_interactive_unboost("off");
773         }
774
775         return count;
776 }
777
778 define_one_global_rw(boost);
779
780 static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
781                                 const char *buf, size_t count)
782 {
783         int ret;
784         unsigned long val;
785
786         ret = kstrtoul(buf, 0, &val);
787         if (ret < 0)
788                 return ret;
789
790         boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
791         trace_cpufreq_interactive_boost("pulse");
792         cpufreq_interactive_boost();
793         return count;
794 }
795
796 static struct global_attr boostpulse =
797         __ATTR(boostpulse, 0200, NULL, store_boostpulse);
798
799 static ssize_t show_boostpulse_duration(
800         struct kobject *kobj, struct attribute *attr, char *buf)
801 {
802         return sprintf(buf, "%d\n", boostpulse_duration_val);
803 }
804
805 static ssize_t store_boostpulse_duration(
806         struct kobject *kobj, struct attribute *attr, const char *buf,
807         size_t count)
808 {
809         int ret;
810         unsigned long val;
811
812         ret = kstrtoul(buf, 0, &val);
813         if (ret < 0)
814                 return ret;
815
816         boostpulse_duration_val = val;
817         return count;
818 }
819
820 define_one_global_rw(boostpulse_duration);
821
822 static struct attribute *interactive_attributes[] = {
823         &target_loads_attr.attr,
824         &hispeed_freq_attr.attr,
825         &go_hispeed_load_attr.attr,
826         &above_hispeed_delay.attr,
827         &min_sample_time_attr.attr,
828         &timer_rate_attr.attr,
829         &boost.attr,
830         &boostpulse.attr,
831         &boostpulse_duration.attr,
832         NULL,
833 };
834
835 static struct attribute_group interactive_attr_group = {
836         .attrs = interactive_attributes,
837         .name = "interactive",
838 };
839
840 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
841                                              unsigned long val,
842                                              void *data)
843 {
844         switch (val) {
845         case IDLE_START:
846                 cpufreq_interactive_idle_start();
847                 break;
848         case IDLE_END:
849                 cpufreq_interactive_idle_end();
850                 break;
851         }
852
853         return 0;
854 }
855
856 static struct notifier_block cpufreq_interactive_idle_nb = {
857         .notifier_call = cpufreq_interactive_idle_notifier,
858 };
859
860 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
861                 unsigned int event)
862 {
863         int rc;
864         unsigned int j;
865         struct cpufreq_interactive_cpuinfo *pcpu;
866         struct cpufreq_frequency_table *freq_table;
867
868         switch (event) {
869         case CPUFREQ_GOV_START:
870                 if (!cpu_online(policy->cpu))
871                         return -EINVAL;
872
873                 freq_table =
874                         cpufreq_frequency_get_table(policy->cpu);
875                 if (!hispeed_freq)
876                         hispeed_freq = policy->max;
877
878                 for_each_cpu(j, policy->cpus) {
879                         pcpu = &per_cpu(cpuinfo, j);
880                         pcpu->policy = policy;
881                         pcpu->target_freq = policy->cur;
882                         pcpu->freq_table = freq_table;
883                         pcpu->floor_freq = pcpu->target_freq;
884                         pcpu->floor_validate_time =
885                                 ktime_to_us(ktime_get());
886                         pcpu->hispeed_validate_time =
887                                 pcpu->floor_validate_time;
888                         pcpu->governor_enabled = 1;
889                         smp_wmb();
890                         pcpu->cpu_timer.expires =
891                                 jiffies + usecs_to_jiffies(timer_rate);
892                         add_timer_on(&pcpu->cpu_timer, j);
893                 }
894
895                 /*
896                  * Do not register the idle hook and create sysfs
897                  * entries if we have already done so.
898                  */
899                 if (atomic_inc_return(&active_count) > 1)
900                         return 0;
901
902                 rc = sysfs_create_group(cpufreq_global_kobject,
903                                 &interactive_attr_group);
904                 if (rc)
905                         return rc;
906
907                 idle_notifier_register(&cpufreq_interactive_idle_nb);
908                 cpufreq_register_notifier(
909                         &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
910                 break;
911
912         case CPUFREQ_GOV_STOP:
913                 for_each_cpu(j, policy->cpus) {
914                         pcpu = &per_cpu(cpuinfo, j);
915                         pcpu->governor_enabled = 0;
916                         smp_wmb();
917                         del_timer_sync(&pcpu->cpu_timer);
918                 }
919
920                 if (atomic_dec_return(&active_count) > 0)
921                         return 0;
922
923                 cpufreq_unregister_notifier(
924                         &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
925                 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
926                 sysfs_remove_group(cpufreq_global_kobject,
927                                 &interactive_attr_group);
928
929                 break;
930
931         case CPUFREQ_GOV_LIMITS:
932                 if (policy->max < policy->cur)
933                         __cpufreq_driver_target(policy,
934                                         policy->max, CPUFREQ_RELATION_H);
935                 else if (policy->min > policy->cur)
936                         __cpufreq_driver_target(policy,
937                                         policy->min, CPUFREQ_RELATION_L);
938                 break;
939         }
940         return 0;
941 }
942
943 static int __init cpufreq_interactive_init(void)
944 {
945         unsigned int i;
946         struct cpufreq_interactive_cpuinfo *pcpu;
947         struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
948
949         go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
950         min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
951         above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
952         timer_rate = DEFAULT_TIMER_RATE;
953
954         /* Initalize per-cpu timers */
955         for_each_possible_cpu(i) {
956                 pcpu = &per_cpu(cpuinfo, i);
957                 if (governidle)
958                         init_timer(&pcpu->cpu_timer);
959                 else
960                         init_timer_deferrable(&pcpu->cpu_timer);
961                 pcpu->cpu_timer.function = cpufreq_interactive_timer;
962                 pcpu->cpu_timer.data = i;
963                 spin_lock_init(&pcpu->load_lock);
964         }
965
966         spin_lock_init(&target_loads_lock);
967         spin_lock_init(&speedchange_cpumask_lock);
968         speedchange_task =
969                 kthread_create(cpufreq_interactive_speedchange_task, NULL,
970                                "cfinteractive");
971         if (IS_ERR(speedchange_task))
972                 return PTR_ERR(speedchange_task);
973
974         sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
975         get_task_struct(speedchange_task);
976
977         /* NB: wake up so the thread does not look hung to the freezer */
978         wake_up_process(speedchange_task);
979
980         return cpufreq_register_governor(&cpufreq_gov_interactive);
981 }
982
983 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
984 fs_initcall(cpufreq_interactive_init);
985 #else
986 module_init(cpufreq_interactive_init);
987 #endif
988
989 static void __exit cpufreq_interactive_exit(void)
990 {
991         cpufreq_unregister_governor(&cpufreq_gov_interactive);
992         kthread_stop(speedchange_task);
993         put_task_struct(speedchange_task);
994 }
995
996 module_exit(cpufreq_interactive_exit);
997
998 MODULE_AUTHOR("Mike Chan <mike@android.com>");
999 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1000         "Latency sensitive workloads");
1001 MODULE_LICENSE("GPL");