cpufreq: interactive: fix race on governor start/stop
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq_interactive.c
1 /*
2  * drivers/cpufreq/cpufreq_interactive.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * Author: Mike Chan (mike@android.com)
16  *
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/rwsem.h>
25 #include <linux/sched.h>
26 #include <linux/sched/rt.h>
27 #include <linux/tick.h>
28 #include <linux/time.h>
29 #include <linux/timer.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/slab.h>
33 #include <asm/cputime.h>
34
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/cpufreq_interactive.h>
37
38 static int active_count;
39
40 struct cpufreq_interactive_cpuinfo {
41         struct timer_list cpu_timer;
42         struct timer_list cpu_slack_timer;
43         spinlock_t load_lock; /* protects the next 4 fields */
44         u64 time_in_idle;
45         u64 time_in_idle_timestamp;
46         u64 cputime_speedadj;
47         u64 cputime_speedadj_timestamp;
48         struct cpufreq_policy *policy;
49         struct cpufreq_frequency_table *freq_table;
50         unsigned int target_freq;
51         unsigned int floor_freq;
52         u64 floor_validate_time;
53         u64 hispeed_validate_time;
54         struct rw_semaphore enable_sem;
55         int governor_enabled;
56 };
57
58 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
59
60 /* realtime thread handles frequency scaling */
61 static struct task_struct *speedchange_task;
62 static cpumask_t speedchange_cpumask;
63 static spinlock_t speedchange_cpumask_lock;
64 static struct mutex gov_lock;
65
66 /* Hi speed to bump to from lo speed when load burst (default max) */
67 static unsigned int hispeed_freq;
68
69 /* Go to hi speed when CPU load at or above this value. */
70 #define DEFAULT_GO_HISPEED_LOAD 99
71 static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
72
73 /* Target load.  Lower values result in higher CPU speeds. */
74 #define DEFAULT_TARGET_LOAD 90
75 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
76 static spinlock_t target_loads_lock;
77 static unsigned int *target_loads = default_target_loads;
78 static int ntarget_loads = ARRAY_SIZE(default_target_loads);
79
80 /*
81  * The minimum amount of time to spend at a frequency before we can ramp down.
82  */
83 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
84 static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
85
86 /*
87  * The sample rate of the timer used to increase frequency
88  */
89 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
90 static unsigned long timer_rate = DEFAULT_TIMER_RATE;
91
92 /*
93  * Wait this long before raising speed above hispeed, by default a single
94  * timer interval.
95  */
96 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
97 static unsigned long above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
98
99 /* Non-zero means indefinite speed boost active */
100 static int boost_val;
101 /* Duration of a boot pulse in usecs */
102 static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
103 /* End time of boost pulse in ktime converted to usecs */
104 static u64 boostpulse_endtime;
105
106 /*
107  * Max additional time to wait in idle, beyond timer_rate, at speeds above
108  * minimum before wakeup to reduce speed, or -1 if unnecessary.
109  */
110 #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
111 static int timer_slack_val = DEFAULT_TIMER_SLACK;
112
113 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
114                 unsigned int event);
115
116 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
117 static
118 #endif
119 struct cpufreq_governor cpufreq_gov_interactive = {
120         .name = "interactive",
121         .governor = cpufreq_governor_interactive,
122         .max_transition_latency = 10000000,
123         .owner = THIS_MODULE,
124 };
125
126 static void cpufreq_interactive_timer_resched(
127         struct cpufreq_interactive_cpuinfo *pcpu)
128 {
129         unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
130         unsigned long flags;
131
132         mod_timer_pinned(&pcpu->cpu_timer, expires);
133         if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
134                 expires += usecs_to_jiffies(timer_slack_val);
135                 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
136         }
137
138         spin_lock_irqsave(&pcpu->load_lock, flags);
139         pcpu->time_in_idle =
140                 get_cpu_idle_time_us(smp_processor_id(),
141                                      &pcpu->time_in_idle_timestamp);
142         pcpu->cputime_speedadj = 0;
143         pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
144         spin_unlock_irqrestore(&pcpu->load_lock, flags);
145 }
146
147 static unsigned int freq_to_targetload(unsigned int freq)
148 {
149         int i;
150         unsigned int ret;
151         unsigned long flags;
152
153         spin_lock_irqsave(&target_loads_lock, flags);
154
155         for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
156                 ;
157
158         ret = target_loads[i];
159         spin_unlock_irqrestore(&target_loads_lock, flags);
160         return ret;
161 }
162
163 /*
164  * If increasing frequencies never map to a lower target load then
165  * choose_freq() will find the minimum frequency that does not exceed its
166  * target load given the current load.
167  */
168
169 static unsigned int choose_freq(
170         struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
171 {
172         unsigned int freq = pcpu->policy->cur;
173         unsigned int prevfreq, freqmin, freqmax;
174         unsigned int tl;
175         int index;
176
177         freqmin = 0;
178         freqmax = UINT_MAX;
179
180         do {
181                 prevfreq = freq;
182                 tl = freq_to_targetload(freq);
183
184                 /*
185                  * Find the lowest frequency where the computed load is less
186                  * than or equal to the target load.
187                  */
188
189                 cpufreq_frequency_table_target(
190                         pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
191                         CPUFREQ_RELATION_L, &index);
192                 freq = pcpu->freq_table[index].frequency;
193
194                 if (freq > prevfreq) {
195                         /* The previous frequency is too low. */
196                         freqmin = prevfreq;
197
198                         if (freq >= freqmax) {
199                                 /*
200                                  * Find the highest frequency that is less
201                                  * than freqmax.
202                                  */
203                                 cpufreq_frequency_table_target(
204                                         pcpu->policy, pcpu->freq_table,
205                                         freqmax - 1, CPUFREQ_RELATION_H,
206                                         &index);
207                                 freq = pcpu->freq_table[index].frequency;
208
209                                 if (freq == freqmin) {
210                                         /*
211                                          * The first frequency below freqmax
212                                          * has already been found to be too
213                                          * low.  freqmax is the lowest speed
214                                          * we found that is fast enough.
215                                          */
216                                         freq = freqmax;
217                                         break;
218                                 }
219                         }
220                 } else if (freq < prevfreq) {
221                         /* The previous frequency is high enough. */
222                         freqmax = prevfreq;
223
224                         if (freq <= freqmin) {
225                                 /*
226                                  * Find the lowest frequency that is higher
227                                  * than freqmin.
228                                  */
229                                 cpufreq_frequency_table_target(
230                                         pcpu->policy, pcpu->freq_table,
231                                         freqmin + 1, CPUFREQ_RELATION_L,
232                                         &index);
233                                 freq = pcpu->freq_table[index].frequency;
234
235                                 /*
236                                  * If freqmax is the first frequency above
237                                  * freqmin then we have already found that
238                                  * this speed is fast enough.
239                                  */
240                                 if (freq == freqmax)
241                                         break;
242                         }
243                 }
244
245                 /* If same frequency chosen as previous then done. */
246         } while (freq != prevfreq);
247
248         return freq;
249 }
250
251 static u64 update_load(int cpu)
252 {
253         struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
254         u64 now;
255         u64 now_idle;
256         unsigned int delta_idle;
257         unsigned int delta_time;
258         u64 active_time;
259
260         now_idle = get_cpu_idle_time_us(cpu, &now);
261         delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
262         delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
263         active_time = delta_time - delta_idle;
264         pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
265
266         pcpu->time_in_idle = now_idle;
267         pcpu->time_in_idle_timestamp = now;
268         return now;
269 }
270
271 static void cpufreq_interactive_timer(unsigned long data)
272 {
273         u64 now;
274         unsigned int delta_time;
275         u64 cputime_speedadj;
276         int cpu_load;
277         struct cpufreq_interactive_cpuinfo *pcpu =
278                 &per_cpu(cpuinfo, data);
279         unsigned int new_freq;
280         unsigned int loadadjfreq;
281         unsigned int index;
282         unsigned long flags;
283         bool boosted;
284
285         if (!down_read_trylock(&pcpu->enable_sem))
286                 return;
287         if (!pcpu->governor_enabled)
288                 goto exit;
289
290         spin_lock_irqsave(&pcpu->load_lock, flags);
291         now = update_load(data);
292         delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
293         cputime_speedadj = pcpu->cputime_speedadj;
294         spin_unlock_irqrestore(&pcpu->load_lock, flags);
295
296         if (WARN_ON_ONCE(!delta_time))
297                 goto rearm;
298
299         do_div(cputime_speedadj, delta_time);
300         loadadjfreq = (unsigned int)cputime_speedadj * 100;
301         cpu_load = loadadjfreq / pcpu->target_freq;
302         boosted = boost_val || now < boostpulse_endtime;
303
304         if (cpu_load >= go_hispeed_load || boosted) {
305                 if (pcpu->target_freq < hispeed_freq) {
306                         new_freq = hispeed_freq;
307                 } else {
308                         new_freq = choose_freq(pcpu, loadadjfreq);
309
310                         if (new_freq < hispeed_freq)
311                                 new_freq = hispeed_freq;
312                 }
313         } else {
314                 new_freq = choose_freq(pcpu, loadadjfreq);
315         }
316
317         if (pcpu->target_freq >= hispeed_freq &&
318             new_freq > pcpu->target_freq &&
319             now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
320                 trace_cpufreq_interactive_notyet(
321                         data, cpu_load, pcpu->target_freq,
322                         pcpu->policy->cur, new_freq);
323                 goto rearm;
324         }
325
326         pcpu->hispeed_validate_time = now;
327
328         if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
329                                            new_freq, CPUFREQ_RELATION_L,
330                                            &index)) {
331                 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
332                              (int) data);
333                 goto rearm;
334         }
335
336         new_freq = pcpu->freq_table[index].frequency;
337
338         /*
339          * Do not scale below floor_freq unless we have been at or above the
340          * floor frequency for the minimum sample time since last validated.
341          */
342         if (new_freq < pcpu->floor_freq) {
343                 if (now - pcpu->floor_validate_time < min_sample_time) {
344                         trace_cpufreq_interactive_notyet(
345                                 data, cpu_load, pcpu->target_freq,
346                                 pcpu->policy->cur, new_freq);
347                         goto rearm;
348                 }
349         }
350
351         /*
352          * Update the timestamp for checking whether speed has been held at
353          * or above the selected frequency for a minimum of min_sample_time,
354          * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
355          * allow the speed to drop as soon as the boostpulse duration expires
356          * (or the indefinite boost is turned off).
357          */
358
359         if (!boosted || new_freq > hispeed_freq) {
360                 pcpu->floor_freq = new_freq;
361                 pcpu->floor_validate_time = now;
362         }
363
364         if (pcpu->target_freq == new_freq) {
365                 trace_cpufreq_interactive_already(
366                         data, cpu_load, pcpu->target_freq,
367                         pcpu->policy->cur, new_freq);
368                 goto rearm_if_notmax;
369         }
370
371         trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
372                                          pcpu->policy->cur, new_freq);
373
374         pcpu->target_freq = new_freq;
375         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
376         cpumask_set_cpu(data, &speedchange_cpumask);
377         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
378         wake_up_process(speedchange_task);
379
380 rearm_if_notmax:
381         /*
382          * Already set max speed and don't see a need to change that,
383          * wait until next idle to re-evaluate, don't need timer.
384          */
385         if (pcpu->target_freq == pcpu->policy->max)
386                 goto exit;
387
388 rearm:
389         if (!timer_pending(&pcpu->cpu_timer))
390                 cpufreq_interactive_timer_resched(pcpu);
391
392 exit:
393         up_read(&pcpu->enable_sem);
394         return;
395 }
396
397 static void cpufreq_interactive_idle_start(void)
398 {
399         struct cpufreq_interactive_cpuinfo *pcpu =
400                 &per_cpu(cpuinfo, smp_processor_id());
401         int pending;
402
403         if (!down_read_trylock(&pcpu->enable_sem))
404                 return;
405         if (!pcpu->governor_enabled) {
406                 up_read(&pcpu->enable_sem);
407                 return;
408         }
409
410         pending = timer_pending(&pcpu->cpu_timer);
411
412         if (pcpu->target_freq != pcpu->policy->min) {
413                 /*
414                  * Entering idle while not at lowest speed.  On some
415                  * platforms this can hold the other CPU(s) at that speed
416                  * even though the CPU is idle. Set a timer to re-evaluate
417                  * speed so this idle CPU doesn't hold the other CPUs above
418                  * min indefinitely.  This should probably be a quirk of
419                  * the CPUFreq driver.
420                  */
421                 if (!pending)
422                         cpufreq_interactive_timer_resched(pcpu);
423         }
424
425         up_read(&pcpu->enable_sem);
426 }
427
428 static void cpufreq_interactive_idle_end(void)
429 {
430         struct cpufreq_interactive_cpuinfo *pcpu =
431                 &per_cpu(cpuinfo, smp_processor_id());
432
433         if (!down_read_trylock(&pcpu->enable_sem))
434                 return;
435         if (!pcpu->governor_enabled) {
436                 up_read(&pcpu->enable_sem);
437                 return;
438         }
439
440         /* Arm the timer for 1-2 ticks later if not already. */
441         if (!timer_pending(&pcpu->cpu_timer)) {
442                 cpufreq_interactive_timer_resched(pcpu);
443         } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
444                 del_timer(&pcpu->cpu_timer);
445                 del_timer(&pcpu->cpu_slack_timer);
446                 cpufreq_interactive_timer(smp_processor_id());
447         }
448
449         up_read(&pcpu->enable_sem);
450 }
451
452 static int cpufreq_interactive_speedchange_task(void *data)
453 {
454         unsigned int cpu;
455         cpumask_t tmp_mask;
456         unsigned long flags;
457         struct cpufreq_interactive_cpuinfo *pcpu;
458
459         while (1) {
460                 set_current_state(TASK_INTERRUPTIBLE);
461                 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
462
463                 if (cpumask_empty(&speedchange_cpumask)) {
464                         spin_unlock_irqrestore(&speedchange_cpumask_lock,
465                                                flags);
466                         schedule();
467
468                         if (kthread_should_stop())
469                                 break;
470
471                         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
472                 }
473
474                 set_current_state(TASK_RUNNING);
475                 tmp_mask = speedchange_cpumask;
476                 cpumask_clear(&speedchange_cpumask);
477                 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
478
479                 for_each_cpu(cpu, &tmp_mask) {
480                         unsigned int j;
481                         unsigned int max_freq = 0;
482
483                         pcpu = &per_cpu(cpuinfo, cpu);
484                         if (!down_read_trylock(&pcpu->enable_sem))
485                                 continue;
486                         if (!pcpu->governor_enabled) {
487                                 up_read(&pcpu->enable_sem);
488                                 continue;
489                         }
490
491                         for_each_cpu(j, pcpu->policy->cpus) {
492                                 struct cpufreq_interactive_cpuinfo *pjcpu =
493                                         &per_cpu(cpuinfo, j);
494
495                                 if (pjcpu->target_freq > max_freq)
496                                         max_freq = pjcpu->target_freq;
497                         }
498
499                         if (max_freq != pcpu->policy->cur)
500                                 __cpufreq_driver_target(pcpu->policy,
501                                                         max_freq,
502                                                         CPUFREQ_RELATION_H);
503                         trace_cpufreq_interactive_setspeed(cpu,
504                                                      pcpu->target_freq,
505                                                      pcpu->policy->cur);
506
507                         up_read(&pcpu->enable_sem);
508                 }
509         }
510
511         return 0;
512 }
513
514 static void cpufreq_interactive_boost(void)
515 {
516         int i;
517         int anyboost = 0;
518         unsigned long flags;
519         struct cpufreq_interactive_cpuinfo *pcpu;
520
521         spin_lock_irqsave(&speedchange_cpumask_lock, flags);
522
523         for_each_online_cpu(i) {
524                 pcpu = &per_cpu(cpuinfo, i);
525
526                 if (pcpu->target_freq < hispeed_freq) {
527                         pcpu->target_freq = hispeed_freq;
528                         cpumask_set_cpu(i, &speedchange_cpumask);
529                         pcpu->hispeed_validate_time =
530                                 ktime_to_us(ktime_get());
531                         anyboost = 1;
532                 }
533
534                 /*
535                  * Set floor freq and (re)start timer for when last
536                  * validated.
537                  */
538
539                 pcpu->floor_freq = hispeed_freq;
540                 pcpu->floor_validate_time = ktime_to_us(ktime_get());
541         }
542
543         spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
544
545         if (anyboost)
546                 wake_up_process(speedchange_task);
547 }
548
549 static int cpufreq_interactive_notifier(
550         struct notifier_block *nb, unsigned long val, void *data)
551 {
552         struct cpufreq_freqs *freq = data;
553         struct cpufreq_interactive_cpuinfo *pcpu;
554         int cpu;
555         unsigned long flags;
556
557         if (val == CPUFREQ_POSTCHANGE) {
558                 pcpu = &per_cpu(cpuinfo, freq->cpu);
559                 if (!down_read_trylock(&pcpu->enable_sem))
560                         return 0;
561                 if (!pcpu->governor_enabled) {
562                         up_read(&pcpu->enable_sem);
563                         return 0;
564                 }
565
566                 for_each_cpu(cpu, pcpu->policy->cpus) {
567                         struct cpufreq_interactive_cpuinfo *pjcpu =
568                                 &per_cpu(cpuinfo, cpu);
569                         spin_lock_irqsave(&pjcpu->load_lock, flags);
570                         update_load(cpu);
571                         spin_unlock_irqrestore(&pjcpu->load_lock, flags);
572                 }
573
574                 up_read(&pcpu->enable_sem);
575         }
576         return 0;
577 }
578
579 static struct notifier_block cpufreq_notifier_block = {
580         .notifier_call = cpufreq_interactive_notifier,
581 };
582
583 static ssize_t show_target_loads(
584         struct kobject *kobj, struct attribute *attr, char *buf)
585 {
586         int i;
587         ssize_t ret = 0;
588         unsigned long flags;
589
590         spin_lock_irqsave(&target_loads_lock, flags);
591
592         for (i = 0; i < ntarget_loads; i++)
593                 ret += sprintf(buf + ret, "%u%s", target_loads[i],
594                                i & 0x1 ? ":" : " ");
595
596         ret += sprintf(buf + ret, "\n");
597         spin_unlock_irqrestore(&target_loads_lock, flags);
598         return ret;
599 }
600
601 static ssize_t store_target_loads(
602         struct kobject *kobj, struct attribute *attr, const char *buf,
603         size_t count)
604 {
605         int ret;
606         const char *cp;
607         unsigned int *new_target_loads = NULL;
608         int ntokens = 1;
609         int i;
610         unsigned long flags;
611
612         cp = buf;
613         while ((cp = strpbrk(cp + 1, " :")))
614                 ntokens++;
615
616         if (!(ntokens & 0x1))
617                 goto err_inval;
618
619         new_target_loads = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
620         if (!new_target_loads) {
621                 ret = -ENOMEM;
622                 goto err;
623         }
624
625         cp = buf;
626         i = 0;
627         while (i < ntokens) {
628                 if (sscanf(cp, "%u", &new_target_loads[i++]) != 1)
629                         goto err_inval;
630
631                 cp = strpbrk(cp, " :");
632                 if (!cp)
633                         break;
634                 cp++;
635         }
636
637         if (i != ntokens)
638                 goto err_inval;
639
640         spin_lock_irqsave(&target_loads_lock, flags);
641         if (target_loads != default_target_loads)
642                 kfree(target_loads);
643         target_loads = new_target_loads;
644         ntarget_loads = ntokens;
645         spin_unlock_irqrestore(&target_loads_lock, flags);
646         return count;
647
648 err_inval:
649         ret = -EINVAL;
650 err:
651         kfree(new_target_loads);
652         return ret;
653 }
654
655 static struct global_attr target_loads_attr =
656         __ATTR(target_loads, S_IRUGO | S_IWUSR,
657                 show_target_loads, store_target_loads);
658
659 static ssize_t show_hispeed_freq(struct kobject *kobj,
660                                  struct attribute *attr, char *buf)
661 {
662         return sprintf(buf, "%u\n", hispeed_freq);
663 }
664
665 static ssize_t store_hispeed_freq(struct kobject *kobj,
666                                   struct attribute *attr, const char *buf,
667                                   size_t count)
668 {
669         int ret;
670         long unsigned int val;
671
672         ret = strict_strtoul(buf, 0, &val);
673         if (ret < 0)
674                 return ret;
675         hispeed_freq = val;
676         return count;
677 }
678
679 static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
680                 show_hispeed_freq, store_hispeed_freq);
681
682
683 static ssize_t show_go_hispeed_load(struct kobject *kobj,
684                                      struct attribute *attr, char *buf)
685 {
686         return sprintf(buf, "%lu\n", go_hispeed_load);
687 }
688
689 static ssize_t store_go_hispeed_load(struct kobject *kobj,
690                         struct attribute *attr, const char *buf, size_t count)
691 {
692         int ret;
693         unsigned long val;
694
695         ret = strict_strtoul(buf, 0, &val);
696         if (ret < 0)
697                 return ret;
698         go_hispeed_load = val;
699         return count;
700 }
701
702 static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
703                 show_go_hispeed_load, store_go_hispeed_load);
704
705 static ssize_t show_min_sample_time(struct kobject *kobj,
706                                 struct attribute *attr, char *buf)
707 {
708         return sprintf(buf, "%lu\n", min_sample_time);
709 }
710
711 static ssize_t store_min_sample_time(struct kobject *kobj,
712                         struct attribute *attr, const char *buf, size_t count)
713 {
714         int ret;
715         unsigned long val;
716
717         ret = strict_strtoul(buf, 0, &val);
718         if (ret < 0)
719                 return ret;
720         min_sample_time = val;
721         return count;
722 }
723
724 static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
725                 show_min_sample_time, store_min_sample_time);
726
727 static ssize_t show_above_hispeed_delay(struct kobject *kobj,
728                                         struct attribute *attr, char *buf)
729 {
730         return sprintf(buf, "%lu\n", above_hispeed_delay_val);
731 }
732
733 static ssize_t store_above_hispeed_delay(struct kobject *kobj,
734                                          struct attribute *attr,
735                                          const char *buf, size_t count)
736 {
737         int ret;
738         unsigned long val;
739
740         ret = strict_strtoul(buf, 0, &val);
741         if (ret < 0)
742                 return ret;
743         above_hispeed_delay_val = val;
744         return count;
745 }
746
747 define_one_global_rw(above_hispeed_delay);
748
749 static ssize_t show_timer_rate(struct kobject *kobj,
750                         struct attribute *attr, char *buf)
751 {
752         return sprintf(buf, "%lu\n", timer_rate);
753 }
754
755 static ssize_t store_timer_rate(struct kobject *kobj,
756                         struct attribute *attr, const char *buf, size_t count)
757 {
758         int ret;
759         unsigned long val;
760
761         ret = strict_strtoul(buf, 0, &val);
762         if (ret < 0)
763                 return ret;
764         timer_rate = val;
765         return count;
766 }
767
768 static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
769                 show_timer_rate, store_timer_rate);
770
771 static ssize_t show_timer_slack(
772         struct kobject *kobj, struct attribute *attr, char *buf)
773 {
774         return sprintf(buf, "%d\n", timer_slack_val);
775 }
776
777 static ssize_t store_timer_slack(
778         struct kobject *kobj, struct attribute *attr, const char *buf,
779         size_t count)
780 {
781         int ret;
782         unsigned long val;
783
784         ret = kstrtol(buf, 10, &val);
785         if (ret < 0)
786                 return ret;
787
788         timer_slack_val = val;
789         return count;
790 }
791
792 define_one_global_rw(timer_slack);
793
794 static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
795                           char *buf)
796 {
797         return sprintf(buf, "%d\n", boost_val);
798 }
799
800 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
801                            const char *buf, size_t count)
802 {
803         int ret;
804         unsigned long val;
805
806         ret = kstrtoul(buf, 0, &val);
807         if (ret < 0)
808                 return ret;
809
810         boost_val = val;
811
812         if (boost_val) {
813                 trace_cpufreq_interactive_boost("on");
814                 cpufreq_interactive_boost();
815         } else {
816                 trace_cpufreq_interactive_unboost("off");
817         }
818
819         return count;
820 }
821
822 define_one_global_rw(boost);
823
824 static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
825                                 const char *buf, size_t count)
826 {
827         int ret;
828         unsigned long val;
829
830         ret = kstrtoul(buf, 0, &val);
831         if (ret < 0)
832                 return ret;
833
834         boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
835         trace_cpufreq_interactive_boost("pulse");
836         cpufreq_interactive_boost();
837         return count;
838 }
839
840 static struct global_attr boostpulse =
841         __ATTR(boostpulse, 0200, NULL, store_boostpulse);
842
843 static ssize_t show_boostpulse_duration(
844         struct kobject *kobj, struct attribute *attr, char *buf)
845 {
846         return sprintf(buf, "%d\n", boostpulse_duration_val);
847 }
848
849 static ssize_t store_boostpulse_duration(
850         struct kobject *kobj, struct attribute *attr, const char *buf,
851         size_t count)
852 {
853         int ret;
854         unsigned long val;
855
856         ret = kstrtoul(buf, 0, &val);
857         if (ret < 0)
858                 return ret;
859
860         boostpulse_duration_val = val;
861         return count;
862 }
863
864 define_one_global_rw(boostpulse_duration);
865
866 static struct attribute *interactive_attributes[] = {
867         &target_loads_attr.attr,
868         &hispeed_freq_attr.attr,
869         &go_hispeed_load_attr.attr,
870         &above_hispeed_delay.attr,
871         &min_sample_time_attr.attr,
872         &timer_rate_attr.attr,
873         &timer_slack.attr,
874         &boost.attr,
875         &boostpulse.attr,
876         &boostpulse_duration.attr,
877         NULL,
878 };
879
880 static struct attribute_group interactive_attr_group = {
881         .attrs = interactive_attributes,
882         .name = "interactive",
883 };
884
885 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
886                                              unsigned long val,
887                                              void *data)
888 {
889         switch (val) {
890         case IDLE_START:
891                 cpufreq_interactive_idle_start();
892                 break;
893         case IDLE_END:
894                 cpufreq_interactive_idle_end();
895                 break;
896         }
897
898         return 0;
899 }
900
901 static struct notifier_block cpufreq_interactive_idle_nb = {
902         .notifier_call = cpufreq_interactive_idle_notifier,
903 };
904
905 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
906                 unsigned int event)
907 {
908         int rc;
909         unsigned int j;
910         struct cpufreq_interactive_cpuinfo *pcpu;
911         struct cpufreq_frequency_table *freq_table;
912
913         switch (event) {
914         case CPUFREQ_GOV_START:
915                 if (!cpu_online(policy->cpu))
916                         return -EINVAL;
917
918                 mutex_lock(&gov_lock);
919
920                 freq_table =
921                         cpufreq_frequency_get_table(policy->cpu);
922                 if (!hispeed_freq)
923                         hispeed_freq = policy->max;
924
925                 for_each_cpu(j, policy->cpus) {
926                         unsigned long expires;
927
928                         pcpu = &per_cpu(cpuinfo, j);
929                         pcpu->policy = policy;
930                         pcpu->target_freq = policy->cur;
931                         pcpu->freq_table = freq_table;
932                         pcpu->floor_freq = pcpu->target_freq;
933                         pcpu->floor_validate_time =
934                                 ktime_to_us(ktime_get());
935                         pcpu->hispeed_validate_time =
936                                 pcpu->floor_validate_time;
937                         down_write(&pcpu->enable_sem);
938                         expires = jiffies + usecs_to_jiffies(timer_rate);
939                         pcpu->cpu_timer.expires = expires;
940                         add_timer_on(&pcpu->cpu_timer, j);
941                         if (timer_slack_val >= 0) {
942                                 expires += usecs_to_jiffies(timer_slack_val);
943                                 pcpu->cpu_slack_timer.expires = expires;
944                                 add_timer_on(&pcpu->cpu_slack_timer, j);
945                         }
946                         pcpu->governor_enabled = 1;
947                         up_write(&pcpu->enable_sem);
948                 }
949
950                 /*
951                  * Do not register the idle hook and create sysfs
952                  * entries if we have already done so.
953                  */
954                 if (++active_count > 1) {
955                         mutex_unlock(&gov_lock);
956                         return 0;
957                 }
958
959                 rc = sysfs_create_group(cpufreq_global_kobject,
960                                 &interactive_attr_group);
961                 if (rc) {
962                         mutex_unlock(&gov_lock);
963                         return rc;
964                 }
965
966                 idle_notifier_register(&cpufreq_interactive_idle_nb);
967                 cpufreq_register_notifier(
968                         &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
969                 mutex_unlock(&gov_lock);
970                 break;
971
972         case CPUFREQ_GOV_STOP:
973                 mutex_lock(&gov_lock);
974                 for_each_cpu(j, policy->cpus) {
975                         pcpu = &per_cpu(cpuinfo, j);
976                         down_write(&pcpu->enable_sem);
977                         pcpu->governor_enabled = 0;
978                         del_timer_sync(&pcpu->cpu_timer);
979                         del_timer_sync(&pcpu->cpu_slack_timer);
980                         up_write(&pcpu->enable_sem);
981                 }
982
983                 if (--active_count > 0) {
984                         mutex_unlock(&gov_lock);
985                         return 0;
986                 }
987
988                 cpufreq_unregister_notifier(
989                         &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
990                 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
991                 sysfs_remove_group(cpufreq_global_kobject,
992                                 &interactive_attr_group);
993                 mutex_unlock(&gov_lock);
994
995                 break;
996
997         case CPUFREQ_GOV_LIMITS:
998                 if (policy->max < policy->cur)
999                         __cpufreq_driver_target(policy,
1000                                         policy->max, CPUFREQ_RELATION_H);
1001                 else if (policy->min > policy->cur)
1002                         __cpufreq_driver_target(policy,
1003                                         policy->min, CPUFREQ_RELATION_L);
1004                 break;
1005         }
1006         return 0;
1007 }
1008
1009 static void cpufreq_interactive_nop_timer(unsigned long data)
1010 {
1011 }
1012
1013 static int __init cpufreq_interactive_init(void)
1014 {
1015         unsigned int i;
1016         struct cpufreq_interactive_cpuinfo *pcpu;
1017         struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1018
1019         /* Initalize per-cpu timers */
1020         for_each_possible_cpu(i) {
1021                 pcpu = &per_cpu(cpuinfo, i);
1022                 init_timer_deferrable(&pcpu->cpu_timer);
1023                 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1024                 pcpu->cpu_timer.data = i;
1025                 init_timer(&pcpu->cpu_slack_timer);
1026                 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
1027                 spin_lock_init(&pcpu->load_lock);
1028                 init_rwsem(&pcpu->enable_sem);
1029         }
1030
1031         spin_lock_init(&target_loads_lock);
1032         spin_lock_init(&speedchange_cpumask_lock);
1033         mutex_init(&gov_lock);
1034         speedchange_task =
1035                 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1036                                "cfinteractive");
1037         if (IS_ERR(speedchange_task))
1038                 return PTR_ERR(speedchange_task);
1039
1040         sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1041         get_task_struct(speedchange_task);
1042
1043         /* NB: wake up so the thread does not look hung to the freezer */
1044         wake_up_process(speedchange_task);
1045
1046         return cpufreq_register_governor(&cpufreq_gov_interactive);
1047 }
1048
1049 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1050 fs_initcall(cpufreq_interactive_init);
1051 #else
1052 module_init(cpufreq_interactive_init);
1053 #endif
1054
1055 static void __exit cpufreq_interactive_exit(void)
1056 {
1057         cpufreq_unregister_governor(&cpufreq_gov_interactive);
1058         kthread_stop(speedchange_task);
1059         put_task_struct(speedchange_task);
1060 }
1061
1062 module_exit(cpufreq_interactive_exit);
1063
1064 MODULE_AUTHOR("Mike Chan <mike@android.com>");
1065 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1066         "Latency sensitive workloads");
1067 MODULE_LICENSE("GPL");