2 * Copyright (c) 2012 NVIDIA CORPORATION. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 #include <linux/kernel.h>
20 #include <linux/cpuquiet.h>
21 #include <linux/cpumask.h>
22 #include <linux/module.h>
23 #include <linux/cpufreq.h>
24 #include <linux/pm_qos.h>
25 #include <linux/jiffies.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/sched.h>
29 #include <linux/tick.h>
30 #include <asm/cputime.h>
53 static DEFINE_PER_CPU(struct idle_info, idleinfo);
54 static DEFINE_PER_CPU(unsigned int, cpu_load);
56 static struct timer_list load_timer;
57 static bool load_timer_active;
59 /* configurable parameters */
60 static unsigned int balance_level = 60;
61 static unsigned int idle_bottom_freq;
62 static unsigned int idle_top_freq;
63 static unsigned long up_delay;
64 static unsigned long down_delay;
65 static unsigned long last_change_time;
66 static unsigned int load_sample_rate = 20; /* msec */
67 static struct workqueue_struct *balanced_wq;
68 static struct delayed_work balanced_work;
69 static BALANCED_STATE balanced_state;
70 static struct kobject *balanced_kobject;
72 static void calculate_load_timer(unsigned long data)
75 u64 idle_time, elapsed_time;
77 if (!load_timer_active)
80 for_each_online_cpu(i) {
81 struct idle_info *iinfo = &per_cpu(idleinfo, i);
82 unsigned int *load = &per_cpu(cpu_load, i);
84 iinfo->idle_last = iinfo->idle_current;
85 iinfo->last_timestamp = iinfo->timestamp;
87 get_cpu_idle_time_us(i, &iinfo->timestamp);
88 elapsed_time = iinfo->timestamp - iinfo->last_timestamp;
90 idle_time = iinfo->idle_current - iinfo->idle_last;
92 do_div(idle_time, elapsed_time);
93 *load = 100 - idle_time;
95 mod_timer(&load_timer, jiffies + msecs_to_jiffies(load_sample_rate));
98 static void start_load_timer(void)
102 if (load_timer_active)
105 load_timer_active = true;
107 for_each_online_cpu(i) {
108 struct idle_info *iinfo = &per_cpu(idleinfo, i);
110 iinfo->idle_current =
111 get_cpu_idle_time_us(i, &iinfo->timestamp);
113 mod_timer(&load_timer, jiffies + msecs_to_jiffies(100));
116 static void stop_load_timer(void)
118 if (!load_timer_active)
121 load_timer_active = false;
122 del_timer(&load_timer);
125 static unsigned int get_slowest_cpu_n(void)
127 unsigned int cpu = nr_cpu_ids;
128 unsigned long minload = ULONG_MAX;
131 for_each_online_cpu(i) {
132 unsigned int *load = &per_cpu(cpu_load, i);
134 if ((i > 0) && (minload > *load)) {
143 static unsigned int cpu_highest_speed(void)
145 unsigned int maxload = 0;
148 for_each_online_cpu(i) {
149 unsigned int *load = &per_cpu(cpu_load, i);
151 maxload = max(maxload, *load);
157 static unsigned int count_slow_cpus(unsigned int limit)
159 unsigned int cnt = 0;
162 for_each_online_cpu(i) {
163 unsigned int *load = &per_cpu(cpu_load, i);
173 static unsigned int nr_run_thresholds[] = {
174 /* 1, 2, 3, 4 - on-line cpus target */
175 5, 9, 10, UINT_MAX /* avg run threads * 4 (e.g., 9 = 2.25 threads) */
177 static unsigned int nr_run_hysteresis = 2; /* 0.5 thread */
178 static unsigned int nr_run_last;
180 static CPU_SPEED_BALANCE balanced_speed_balance(void)
182 unsigned long highest_speed = cpu_highest_speed();
183 unsigned long balanced_speed = highest_speed * balance_level / 100;
184 unsigned long skewed_speed = balanced_speed / 2;
185 unsigned int nr_cpus = num_online_cpus();
186 unsigned int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS) ? : 4;
187 unsigned int avg_nr_run = avg_nr_running();
190 /* balanced: freq targets for all CPUs are above 50% of highest speed
191 biased: freq target for at least one CPU is below 50% threshold
192 skewed: freq targets for at least 2 CPUs are below 25% threshold */
193 for (nr_run = 1; nr_run < ARRAY_SIZE(nr_run_thresholds); nr_run++) {
194 unsigned int nr_threshold = nr_run_thresholds[nr_run - 1];
195 if (nr_run_last <= nr_run)
196 nr_threshold += nr_run_hysteresis;
197 if (avg_nr_run <= (nr_threshold << (FSHIFT - NR_FSHIFT)))
200 nr_run_last = nr_run;
202 if (count_slow_cpus(skewed_speed) >= 2 || nr_cpus > max_cpus ||
204 return CPU_SPEED_SKEWED;
206 if (count_slow_cpus(balanced_speed) >= 1 || nr_cpus == max_cpus ||
208 return CPU_SPEED_BIASED;
210 return CPU_SPEED_BALANCED;
213 static void balanced_work_func(struct work_struct *work)
216 unsigned int cpu = nr_cpu_ids;
217 unsigned long now = jiffies;
219 CPU_SPEED_BALANCE balance;
221 switch (balanced_state) {
225 cpu = get_slowest_cpu_n();
226 if (cpu < nr_cpu_ids) {
228 queue_delayed_work(balanced_wq,
229 &balanced_work, up_delay);
234 balance = balanced_speed_balance();
237 /* cpu speed is up and balanced - one more on-line */
238 case CPU_SPEED_BALANCED:
239 cpu = cpumask_next_zero(0, cpu_online_mask);
240 if (cpu < nr_cpu_ids)
243 /* cpu speed is up, but skewed - remove one core */
244 case CPU_SPEED_SKEWED:
245 cpu = get_slowest_cpu_n();
246 if (cpu < nr_cpu_ids)
249 /* cpu speed is up, but under-utilized - do nothing */
250 case CPU_SPEED_BIASED:
255 balanced_wq, &balanced_work, up_delay);
258 pr_err("%s: invalid cpuquiet balanced governor state %d\n",
259 __func__, balanced_state);
262 if (!up && ((now - last_change_time) < down_delay))
265 if (cpu < nr_cpu_ids) {
266 last_change_time = now;
268 cpuquiet_wake_cpu(cpu);
270 cpuquiet_quiesence_cpu(cpu);
274 static int balanced_cpufreq_transition(struct notifier_block *nb,
275 unsigned long state, void *data)
277 struct cpufreq_freqs *freqs = data;
278 unsigned long cpu_freq;
280 if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE) {
281 cpu_freq = freqs->new;
283 switch (balanced_state) {
285 if (cpu_freq >= idle_top_freq) {
288 balanced_wq, &balanced_work, up_delay);
290 } else if (cpu_freq <= idle_bottom_freq) {
291 balanced_state = DOWN;
293 balanced_wq, &balanced_work,
299 if (cpu_freq >= idle_top_freq) {
302 balanced_wq, &balanced_work, up_delay);
307 if (cpu_freq <= idle_bottom_freq) {
308 balanced_state = DOWN;
309 queue_delayed_work(balanced_wq,
310 &balanced_work, up_delay);
315 pr_err("%s: invalid cpuquiet balanced governor "
316 "state %d\n", __func__, balanced_state);
323 static struct notifier_block balanced_cpufreq_nb = {
324 .notifier_call = balanced_cpufreq_transition,
327 static void delay_callback(struct cpuquiet_attribute *attr)
332 val = (*((unsigned long *)(attr->param)));
333 (*((unsigned long *)(attr->param))) = msecs_to_jiffies(val);
337 CPQ_BASIC_ATTRIBUTE(balance_level, 0644, uint);
338 CPQ_BASIC_ATTRIBUTE(idle_bottom_freq, 0644, uint);
339 CPQ_BASIC_ATTRIBUTE(idle_top_freq, 0644, uint);
340 CPQ_BASIC_ATTRIBUTE(load_sample_rate, 0644, uint);
341 CPQ_ATTRIBUTE(up_delay, 0644, ulong, delay_callback);
342 CPQ_ATTRIBUTE(down_delay, 0644, ulong, delay_callback);
344 static struct attribute *balanced_attributes[] = {
345 &balance_level_attr.attr,
346 &idle_bottom_freq_attr.attr,
347 &idle_top_freq_attr.attr,
349 &down_delay_attr.attr,
350 &load_sample_rate_attr.attr,
354 static const struct sysfs_ops balanced_sysfs_ops = {
355 .show = cpuquiet_auto_sysfs_show,
356 .store = cpuquiet_auto_sysfs_store,
359 static struct kobj_type ktype_balanced = {
360 .sysfs_ops = &balanced_sysfs_ops,
361 .default_attrs = balanced_attributes,
364 static int balanced_sysfs(void)
368 balanced_kobject = kzalloc(sizeof(*balanced_kobject),
371 if (!balanced_kobject)
374 err = cpuquiet_kobject_init(balanced_kobject, &ktype_balanced,
378 kfree(balanced_kobject);
383 static void balanced_stop(void)
386 first unregister the notifiers. This ensures the governor state
387 can't be modified by a cpufreq transition
389 cpufreq_unregister_notifier(&balanced_cpufreq_nb,
390 CPUFREQ_TRANSITION_NOTIFIER);
392 /* now we can force the governor to be idle */
393 balanced_state = IDLE;
394 cancel_delayed_work_sync(&balanced_work);
395 destroy_workqueue(balanced_wq);
396 del_timer(&load_timer);
398 kobject_put(balanced_kobject);
401 static int balanced_start(void)
404 struct cpufreq_frequency_table *table;
405 struct cpufreq_freqs initial_freq;
407 err = balanced_sysfs();
411 balanced_wq = alloc_workqueue("cpuquiet-balanced",
412 WQ_UNBOUND | WQ_RESCUER | WQ_FREEZABLE, 1);
416 INIT_DELAYED_WORK(&balanced_work, balanced_work_func);
418 up_delay = msecs_to_jiffies(100);
419 down_delay = msecs_to_jiffies(2000);
421 table = cpufreq_frequency_get_table(0);
422 for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++);
424 idle_top_freq = table[(count / 2) - 1].frequency;
425 idle_bottom_freq = table[(count / 2) - 2].frequency;
427 cpufreq_register_notifier(&balanced_cpufreq_nb,
428 CPUFREQ_TRANSITION_NOTIFIER);
430 init_timer(&load_timer);
431 load_timer.function = calculate_load_timer;
433 /*FIXME: Kick start the state machine by faking a freq notification*/
434 initial_freq.new = cpufreq_get(0);
435 if (initial_freq.new != 0)
436 balanced_cpufreq_transition(NULL, CPUFREQ_RESUMECHANGE,
441 struct cpuquiet_governor balanced_governor = {
443 .start = balanced_start,
444 .stop = balanced_stop,
445 .owner = THIS_MODULE,
448 static int __init init_balanced(void)
450 return cpuquiet_register_governor(&balanced_governor);
453 static void __exit exit_balanced(void)
455 cpuquiet_unregister_governor(&balanced_governor);
458 MODULE_LICENSE("GPL");
459 #ifdef CONFIG_CPUQUIET_DEFAULT_GOV_BALANCED
460 fs_initcall(init_balanced);
462 module_init(init_balanced);
464 module_exit(exit_balanced);