813a32e671d714010a1a83fb4f75d2319d553d00
[firefly-linux-kernel-4.4.55.git] / drivers / cpuquiet / governors / balanced.c
1 /*
2  * Copyright (c) 2012 NVIDIA CORPORATION.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; version 2 of the License.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along
14  * with this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
16  *
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/cpuquiet.h>
21 #include <linux/cpumask.h>
22 #include <linux/module.h>
23 #include <linux/cpufreq.h>
24 #include <linux/pm_qos_params.h>
25 #include <linux/jiffies.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/sched.h>
29 #include <linux/tick.h>
30 #include <asm/cputime.h>
31
32 #define CPUNAMELEN 8
33
34 typedef enum {
35         CPU_SPEED_BALANCED,
36         CPU_SPEED_BIASED,
37         CPU_SPEED_SKEWED,
38 } CPU_SPEED_BALANCE;
39
40 typedef enum {
41         IDLE,
42         DOWN,
43         UP,
44 } BALANCED_STATE;
45
46 struct idle_info {
47         u64 idle_last;
48         u64 last_timestamp;
49         u64 idle_current;
50         u64 timestamp;
51 };
52
53 static DEFINE_PER_CPU(struct idle_info, idleinfo);
54 static DEFINE_PER_CPU(unsigned int, cpu_load);
55
56 static struct timer_list load_timer;
57 static bool load_timer_active;
58 struct balanced_attribute {
59         struct attribute attr;
60         ssize_t (*show)(struct balanced_attribute *attr, char *buf);
61         ssize_t (*store)(struct balanced_attribute *attr, const char *buf,
62                                 size_t count);
63         unsigned long *param;
64 };
65
66 #define BALANCED_ATTRIBUTE(_name, _mode) \
67         static struct balanced_attribute _name ## _attr = {             \
68                 .attr = {.name = __stringify(_name), .mode = _mode },   \
69                 .show   = show_attribute,                               \
70                 .store  = store_attribute,                              \
71                 .param  = &_name,                                       \
72 }
73
74 /* configurable parameters */
75 static unsigned long balance_level = 75;
76 static unsigned long idle_bottom_freq;
77 static unsigned long idle_top_freq;
78 static unsigned long up_delay;
79 static unsigned long down_delay;
80
81 static struct workqueue_struct *balanced_wq;
82 static struct delayed_work balanced_work;
83 static BALANCED_STATE balanced_state;
84 static struct kobject *balanced_kobject;
85
86 static void calculate_load_timer(unsigned long data)
87 {
88         int i;
89         u64 idle_time, elapsed_time;
90
91         if (!load_timer_active)
92                 return;
93
94         for_each_online_cpu(i) {
95                 struct idle_info *iinfo = &per_cpu(idleinfo, i);
96                 unsigned int *load = &per_cpu(cpu_load, i);
97
98                 iinfo->idle_last = iinfo->idle_current;
99                 iinfo->last_timestamp = iinfo->timestamp;
100                 iinfo->idle_current =
101                         get_cpu_idle_time_us(i, &iinfo->timestamp);
102                 elapsed_time = iinfo->timestamp - iinfo->last_timestamp;
103
104                 idle_time = iinfo->idle_current - iinfo->idle_last;
105                 idle_time *= 100;
106                 do_div(idle_time, elapsed_time);
107                 *load = 100 - idle_time;
108         }
109         mod_timer(&load_timer, jiffies + msecs_to_jiffies(100));
110 }
111
112 static void start_load_timer(void)
113 {
114         int i;
115
116         if (load_timer_active)
117                 return;
118
119         load_timer_active = true;
120
121         for_each_online_cpu(i) {
122                 struct idle_info *iinfo = &per_cpu(idleinfo, i);
123
124                 iinfo->idle_current =
125                         get_cpu_idle_time_us(i, &iinfo->timestamp);
126         }
127         mod_timer(&load_timer, jiffies + msecs_to_jiffies(100));
128 }
129
130 static void stop_load_timer(void)
131 {
132         if (!load_timer_active)
133                 return;
134
135         load_timer_active = false;
136         del_timer(&load_timer);
137 }
138
139 static unsigned int get_slowest_cpu_n(void)
140 {
141         unsigned int cpu = nr_cpu_ids;
142         unsigned long minload = ULONG_MAX;
143         int i;
144
145         for_each_online_cpu(i) {
146                 unsigned int *load = &per_cpu(cpu_load, i);
147
148                 if ((i > 0) && (minload > *load)) {
149                         cpu = i;
150                         minload = *load;
151                 }
152         }
153
154         return cpu;
155 }
156
157 static unsigned int cpu_highest_speed(void)
158 {
159         unsigned int maxload = 0;
160         int i;
161
162         for_each_online_cpu(i) {
163                 unsigned int *load = &per_cpu(cpu_load, i);
164
165                 maxload = max(maxload, *load);
166         }
167
168         return maxload;
169 }
170
171 static unsigned int count_slow_cpus(unsigned int limit)
172 {
173         unsigned int cnt = 0;
174         int i;
175
176         for_each_online_cpu(i) {
177                 unsigned int *load = &per_cpu(cpu_load, i);
178
179                 if (*load <= limit)
180                         cnt++;
181         }
182
183         return cnt;
184 }
185
186 static CPU_SPEED_BALANCE balanced_speed_balance(void)
187 {
188         unsigned long highest_speed = cpu_highest_speed();
189         unsigned long balanced_speed = highest_speed * balance_level / 100;
190         unsigned long skewed_speed = balanced_speed / 2;
191         unsigned int nr_cpus = num_online_cpus();
192         unsigned int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS) ? : 4;
193
194         /* balanced: freq targets for all CPUs are above 50% of highest speed
195            biased: freq target for at least one CPU is below 50% threshold
196            skewed: freq targets for at least 2 CPUs are below 25% threshold */
197         if (count_slow_cpus(skewed_speed) >= 2 || nr_cpus > max_cpus)
198                 return CPU_SPEED_SKEWED;
199
200         if (count_slow_cpus(balanced_speed) >= 1 || nr_cpus == max_cpus)
201                 return CPU_SPEED_BIASED;
202
203         return CPU_SPEED_BALANCED;
204 }
205
206 static void balanced_work_func(struct work_struct *work)
207 {
208         bool up = false;
209         unsigned int cpu = nr_cpu_ids;
210         CPU_SPEED_BALANCE balance;
211
212         switch (balanced_state) {
213         case IDLE:
214                 break;
215         case DOWN:
216                 cpu = get_slowest_cpu_n();
217                 if (cpu < nr_cpu_ids) {
218                         up = false;
219                         queue_delayed_work(balanced_wq,
220                                                  &balanced_work, down_delay);
221                 } else
222                         stop_load_timer();
223                 break;
224         case UP:
225                 balance = balanced_speed_balance();
226                 switch (balance) {
227
228                 /* cpu speed is up and balanced - one more on-line */
229                 case CPU_SPEED_BALANCED:
230                         cpu = cpumask_next_zero(0, cpu_online_mask);
231                         if (cpu < nr_cpu_ids)
232                                 up = true;
233                         break;
234                 /* cpu speed is up, but skewed - remove one core */
235                 case CPU_SPEED_SKEWED:
236                         cpu = get_slowest_cpu_n();
237                         if (cpu < nr_cpu_ids)
238                                 up = false;
239                         break;
240                 /* cpu speed is up, but under-utilized - do nothing */
241                 case CPU_SPEED_BIASED:
242                 default:
243                         break;
244                 }
245                 queue_delayed_work(
246                         balanced_wq, &balanced_work, up_delay);
247                 break;
248         default:
249                 pr_err("%s: invalid cpuquiet balanced governor state %d\n",
250                        __func__, balanced_state);
251         }
252
253         if (cpu < nr_cpu_ids) {
254                 if (up)
255                         cpuquiet_wake_cpu(cpu);
256                 else
257                         cpuquiet_quiesence_cpu(cpu);
258         }
259 }
260
261 static int balanced_cpufreq_transition(struct notifier_block *nb,
262         unsigned long state, void *data)
263 {
264         struct cpufreq_freqs *freqs = data;
265         unsigned long cpu_freq;
266
267         if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE) {
268                 cpu_freq = freqs->new;
269
270                 switch (balanced_state) {
271                 case IDLE:
272                         if (cpu_freq > idle_top_freq) {
273                                 balanced_state = UP;
274                                 queue_delayed_work(
275                                         balanced_wq, &balanced_work, up_delay);
276                                 start_load_timer();
277                         } else if (cpu_freq <= idle_bottom_freq) {
278                                 balanced_state = DOWN;
279                                 queue_delayed_work(
280                                         balanced_wq, &balanced_work,
281                                         down_delay);
282                                 start_load_timer();
283                         }
284                         break;
285                 case DOWN:
286                         if (cpu_freq > idle_top_freq) {
287                                 balanced_state = UP;
288                                 queue_delayed_work(
289                                         balanced_wq, &balanced_work, up_delay);
290                                 start_load_timer();
291                         }
292                         break;
293                 case UP:
294                         if (cpu_freq <= idle_bottom_freq) {
295                                 balanced_state = DOWN;
296                                 queue_delayed_work(balanced_wq,
297                                         &balanced_work, down_delay);
298                                 start_load_timer();
299                         }
300                         break;
301                 default:
302                         pr_err("%s: invalid tegra hotplug state %d\n",
303                                 __func__, balanced_state);
304                 }
305         }
306
307         return NOTIFY_OK;
308 }
309
310 static struct notifier_block balanced_cpufreq_nb = {
311         .notifier_call = balanced_cpufreq_transition,
312 };
313
314 static ssize_t show_attribute(struct balanced_attribute *battr, char *buf)
315 {
316         return sprintf(buf, "%lu\n", *(battr->param));
317 }
318
319 static ssize_t store_attribute(struct balanced_attribute *battr,
320                                         const char *buf, size_t count)
321 {
322         int err;
323         unsigned long val;
324
325         err = strict_strtoul(buf, 0, &val);
326         if (err < 0)
327                 return err;
328
329         *(battr->param) = val;
330
331         return count;
332 }
333
334 static ssize_t balanced_sysfs_store(struct kobject *kobj,
335                         struct attribute *attr, const char *buf, size_t count)
336 {
337         struct balanced_attribute *battr =
338                  container_of(attr, struct balanced_attribute, attr);
339
340         if (battr->store)
341                 return battr->store(battr, buf, count);
342
343         return -EINVAL;
344 }
345
346 static ssize_t balanced_sysfs_show(struct kobject *kobj,
347                         struct attribute *attr, char *buf)
348 {
349         struct balanced_attribute *battr =
350                  container_of(attr, struct balanced_attribute, attr);
351
352         return battr->show(battr, buf);
353 }
354
355 BALANCED_ATTRIBUTE(balance_level, 0644);
356 BALANCED_ATTRIBUTE(idle_bottom_freq, 0644);
357 BALANCED_ATTRIBUTE(idle_top_freq, 0644);
358 BALANCED_ATTRIBUTE(up_delay, 0644);
359 BALANCED_ATTRIBUTE(down_delay, 0644);
360
361 static struct attribute *balanced_attributes[] = {
362         &balance_level_attr.attr,
363         &idle_bottom_freq_attr.attr,
364         &idle_top_freq_attr.attr,
365         &up_delay_attr.attr,
366         &down_delay_attr.attr,
367         NULL,
368 };
369
370 static const struct sysfs_ops balanced_sysfs_ops = {
371         .show = balanced_sysfs_show,
372         .store = balanced_sysfs_store,
373 };
374
375 static struct kobj_type ktype_balanced = {
376         .sysfs_ops = &balanced_sysfs_ops,
377         .default_attrs = balanced_attributes,
378 };
379
380 static int balanced_sysfs(void)
381 {
382         int err;
383
384         balanced_kobject = kzalloc(sizeof(*balanced_kobject),
385                                 GFP_KERNEL);
386
387         if (!balanced_kobject)
388                 return -ENOMEM;
389
390         err = cpuquiet_kobject_init(balanced_kobject, &ktype_balanced,
391                                 "balanced");
392
393         if (err)
394                 kfree(balanced_kobject);
395
396         return err;
397 }
398
399 static void balanced_stop(void)
400 {
401
402         /*
403            first unregister the notifiers. This ensures the governor state
404            can't be modified by a cpufreq transition
405         */
406         cpufreq_unregister_notifier(&balanced_cpufreq_nb,
407                 CPUFREQ_TRANSITION_NOTIFIER);
408
409         /* now we can force the governor to be idle */
410         balanced_state = IDLE;
411         cancel_delayed_work_sync(&balanced_work);
412         destroy_workqueue(balanced_wq);
413         del_timer(&load_timer);
414
415         kobject_put(balanced_kobject);
416 }
417
418 static int balanced_start(void)
419 {
420         int err, count;
421         struct cpufreq_frequency_table *table;
422
423         err = balanced_sysfs();
424         if (err)
425                 return err;
426
427         balanced_wq = alloc_workqueue("cpuquiet-balanced",
428                         WQ_UNBOUND | WQ_RESCUER | WQ_FREEZABLE, 1);
429         if (!balanced_wq)
430                 return -ENOMEM;
431
432         INIT_DELAYED_WORK(&balanced_work, balanced_work_func);
433
434         up_delay = msecs_to_jiffies(1000);
435         down_delay = msecs_to_jiffies(2000);
436
437         table = cpufreq_frequency_get_table(0);
438         for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
439                 ;
440
441         idle_top_freq = table[(count / 2) - 1].frequency;
442         idle_bottom_freq = table[(count / 2) - 2].frequency;
443
444         cpufreq_register_notifier(&balanced_cpufreq_nb,
445                 CPUFREQ_TRANSITION_NOTIFIER);
446
447         init_timer(&load_timer);
448         load_timer.function = calculate_load_timer;
449
450         return 0;
451 }
452
453 struct cpuquiet_governor balanced_governor = {
454         .name           = "balanced",
455         .start          = balanced_start,
456         .stop           = balanced_stop,
457         .owner          = THIS_MODULE,
458 };
459
460 static int __init init_balanced(void)
461 {
462         return cpuquiet_register_governor(&balanced_governor);
463 }
464
465 static void __exit exit_balanced(void)
466 {
467         cpuquiet_unregister_governor(&balanced_governor);
468 }
469
470 MODULE_LICENSE("GPL");
471 module_init(init_balanced);
472 module_exit(exit_balanced);
473