2 * Cpuquiet driver for Rockchip SoCs
4 * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/kernel.h>
19 #include <linux/types.h>
20 #include <linux/sched.h>
21 #include <linux/module.h>
22 #include <linux/cpufreq.h>
23 #include <linux/delay.h>
24 #include <linux/err.h>
26 #include <linux/cpu.h>
27 #include <linux/clk.h>
28 #include <linux/debugfs.h>
29 #include <linux/seq_file.h>
30 #include <linux/cpuquiet.h>
31 #include <linux/pm_qos.h>
32 #include <linux/debugfs.h>
33 #include <linux/slab.h>
35 #define INITIAL_STATE CPQ_DISABLED
36 #define HOTPLUG_DELAY_MS 100
38 static DEFINE_MUTEX(rockchip_cpuquiet_lock);
39 static DEFINE_MUTEX(rockchip_cpq_lock_stats);
41 static struct workqueue_struct *cpuquiet_wq;
42 static struct work_struct cpuquiet_work;
44 static wait_queue_head_t wait_enable;
45 static wait_queue_head_t wait_cpu;
48 static unsigned long hotplug_timeout_jiffies;
50 static struct cpumask cpumask_online_requests;
51 static struct cpumask cpumask_offline_requests;
59 static int cpq_target_state;
64 cputime64_t time_up_total;
66 unsigned int up_down_count;
67 } hp_stats[CONFIG_NR_CPUS];
69 static void hp_init_stats(void)
72 u64 cur_jiffies = get_jiffies_64();
74 mutex_lock(&rockchip_cpq_lock_stats);
76 for (i = 0; i < nr_cpu_ids; i++) {
77 hp_stats[i].time_up_total = 0;
78 hp_stats[i].last_update = cur_jiffies;
80 hp_stats[i].up_down_count = 0;
82 hp_stats[i].up_down_count = 1;
85 mutex_unlock(&rockchip_cpq_lock_stats);
88 /* must be called with rockchip_cpq_lock_stats held */
89 static void __hp_stats_update(unsigned int cpu, bool up)
91 u64 cur_jiffies = get_jiffies_64();
94 was_up = hp_stats[cpu].up_down_count & 0x1;
97 hp_stats[cpu].time_up_total =
98 hp_stats[cpu].time_up_total +
99 (cur_jiffies - hp_stats[cpu].last_update);
102 hp_stats[cpu].up_down_count++;
103 if ((hp_stats[cpu].up_down_count & 0x1) != up) {
104 /* FIXME: sysfs user space CPU control breaks stats */
105 pr_err("hotplug stats out of sync with CPU%d", cpu);
106 hp_stats[cpu].up_down_count ^= 0x1;
109 hp_stats[cpu].last_update = cur_jiffies;
112 static void hp_stats_update(unsigned int cpu, bool up)
114 mutex_lock(&rockchip_cpq_lock_stats);
116 __hp_stats_update(cpu, up);
118 mutex_unlock(&rockchip_cpq_lock_stats);
121 static int update_core_config(unsigned int cpunumber, bool up)
125 mutex_lock(&rockchip_cpuquiet_lock);
127 if (cpq_state == CPQ_DISABLED || cpunumber >= nr_cpu_ids) {
128 mutex_unlock(&rockchip_cpuquiet_lock);
133 cpumask_set_cpu(cpunumber, &cpumask_online_requests);
134 cpumask_clear_cpu(cpunumber, &cpumask_offline_requests);
135 queue_work(cpuquiet_wq, &cpuquiet_work);
137 cpumask_set_cpu(cpunumber, &cpumask_offline_requests);
138 cpumask_clear_cpu(cpunumber, &cpumask_online_requests);
139 queue_work(cpuquiet_wq, &cpuquiet_work);
142 mutex_unlock(&rockchip_cpuquiet_lock);
147 static int rockchip_quiesence_cpu(unsigned int cpunumber, bool sync)
151 err = update_core_config(cpunumber, false);
155 err = wait_event_interruptible_timeout(wait_cpu,
156 !cpu_online(cpunumber),
157 hotplug_timeout_jiffies);
168 static int rockchip_wake_cpu(unsigned int cpunumber, bool sync)
172 err = update_core_config(cpunumber, true);
176 err = wait_event_interruptible_timeout(wait_cpu, cpu_online(cpunumber),
177 hotplug_timeout_jiffies);
188 static struct cpuquiet_driver rockchip_cpuquiet_driver = {
190 .quiesence_cpu = rockchip_quiesence_cpu,
191 .wake_cpu = rockchip_wake_cpu,
194 /* must be called from worker function */
195 static void __cpuinit __apply_core_config(void)
200 struct cpumask online, offline, cpu_online;
201 int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS);
202 int min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);
204 if (min_cpus > num_possible_cpus())
207 max_cpus = num_present_cpus();
209 mutex_lock(&rockchip_cpuquiet_lock);
211 online = cpumask_online_requests;
212 offline = cpumask_offline_requests;
214 mutex_unlock(&rockchip_cpuquiet_lock);
216 /* always keep CPU0 online */
217 cpumask_set_cpu(0, &online);
218 cpu_online = *cpu_online_mask;
220 if (max_cpus < min_cpus)
223 nr_cpus = cpumask_weight(&online);
224 if (nr_cpus < min_cpus) {
226 count = min_cpus - nr_cpus;
227 for (; count > 0; count--) {
228 cpu = cpumask_next_zero(cpu, &online);
229 cpumask_set_cpu(cpu, &online);
230 cpumask_clear_cpu(cpu, &offline);
232 } else if (nr_cpus > max_cpus) {
233 count = nr_cpus - max_cpus;
235 for (; count > 0; count--) {
236 /* CPU0 should always be online */
237 cpu = cpumask_next(cpu, &online);
238 cpumask_set_cpu(cpu, &offline);
239 cpumask_clear_cpu(cpu, &online);
243 cpumask_andnot(&online, &online, &cpu_online);
244 for_each_cpu(cpu, &online) {
246 hp_stats_update(cpu, true);
249 cpumask_and(&offline, &offline, &cpu_online);
250 for_each_cpu(cpu, &offline) {
252 hp_stats_update(cpu, false);
254 wake_up_interruptible(&wait_cpu);
257 static void __cpuinit rockchip_cpuquiet_work_func(struct work_struct *work)
261 mutex_lock(&rockchip_cpuquiet_lock);
263 action = cpq_target_state;
265 if (action == CPQ_ENABLED) {
267 cpuquiet_device_free();
268 pr_info("cpuquiet enabled\n");
269 cpq_state = CPQ_ENABLED;
270 cpq_target_state = CPQ_IDLE;
271 wake_up_interruptible(&wait_enable);
274 if (cpq_state == CPQ_DISABLED) {
275 mutex_unlock(&rockchip_cpuquiet_lock);
279 if (action == CPQ_DISABLED) {
280 cpq_state = CPQ_DISABLED;
281 mutex_unlock(&rockchip_cpuquiet_lock);
282 cpuquiet_device_busy();
283 pr_info("cpuquiet disabled\n");
284 wake_up_interruptible(&wait_enable);
288 mutex_unlock(&rockchip_cpuquiet_lock);
289 __apply_core_config();
292 static int min_cpus_notify(struct notifier_block *nb, unsigned long n, void *p)
294 mutex_lock(&rockchip_cpuquiet_lock);
296 if (cpq_state != CPQ_DISABLED)
297 queue_work(cpuquiet_wq, &cpuquiet_work);
299 mutex_unlock(&rockchip_cpuquiet_lock);
304 static int max_cpus_notify(struct notifier_block *nb, unsigned long n, void *p)
306 mutex_lock(&rockchip_cpuquiet_lock);
308 if (cpq_state != CPQ_DISABLED)
309 queue_work(cpuquiet_wq, &cpuquiet_work);
311 mutex_unlock(&rockchip_cpuquiet_lock);
316 /* Must be called with rockchip_cpuquiet_lock held */
317 static void __idle_stop_governor(void)
319 if (cpq_state == CPQ_DISABLED)
322 if (num_online_cpus() == 1)
323 cpuquiet_device_busy();
325 cpuquiet_device_free();
328 static int __cpuinit cpu_online_notify(struct notifier_block *nfb,
329 unsigned long action, void *hcpu)
333 if (num_online_cpus() == 1) {
334 mutex_lock(&rockchip_cpuquiet_lock);
335 __idle_stop_governor();
336 mutex_unlock(&rockchip_cpuquiet_lock);
340 case CPU_ONLINE_FROZEN:
341 mutex_lock(&rockchip_cpuquiet_lock);
342 __idle_stop_governor();
343 mutex_unlock(&rockchip_cpuquiet_lock);
350 static struct notifier_block cpu_online_notifier __cpuinitdata = {
351 .notifier_call = cpu_online_notify,
354 static struct notifier_block min_cpus_notifier = {
355 .notifier_call = min_cpus_notify,
358 static struct notifier_block max_cpus_notifier = {
359 .notifier_call = max_cpus_notify,
362 static void delay_callback(struct cpuquiet_attribute *attr)
367 val = (*((unsigned long *)(attr->param)));
368 (*((unsigned long *)(attr->param))) = msecs_to_jiffies(val);
372 static void enable_callback(struct cpuquiet_attribute *attr)
374 int target_state = enable ? CPQ_ENABLED : CPQ_DISABLED;
376 mutex_lock(&rockchip_cpuquiet_lock);
378 if (cpq_state != target_state) {
379 cpq_target_state = target_state;
380 queue_work(cpuquiet_wq, &cpuquiet_work);
383 mutex_unlock(&rockchip_cpuquiet_lock);
385 wait_event_interruptible(wait_enable, cpq_state == target_state);
388 CPQ_ATTRIBUTE(hotplug_timeout_jiffies, 0644, ulong, delay_callback);
389 CPQ_ATTRIBUTE(enable, 0644, bool, enable_callback);
391 static struct attribute *rockchip_cpuquiet_attributes[] = {
393 &hotplug_timeout_jiffies_attr.attr,
397 static const struct sysfs_ops rockchip_cpuquiet_sysfs_ops = {
398 .show = cpuquiet_auto_sysfs_show,
399 .store = cpuquiet_auto_sysfs_store,
402 static struct kobj_type ktype_sysfs = {
403 .sysfs_ops = &rockchip_cpuquiet_sysfs_ops,
404 .default_attrs = rockchip_cpuquiet_attributes,
407 static int rockchip_cpuquiet_sysfs_init(void)
411 struct kobject *kobj = kzalloc(sizeof(*kobj), GFP_KERNEL);
416 err = cpuquiet_kobject_init(kobj, &ktype_sysfs, "rockchip_cpuquiet");
424 #ifdef CONFIG_DEBUG_FS
425 static int hp_stats_show(struct seq_file *s, void *data)
428 u64 cur_jiffies = get_jiffies_64();
430 mutex_lock(&rockchip_cpuquiet_lock);
432 mutex_lock(&rockchip_cpq_lock_stats);
434 if (cpq_state != CPQ_DISABLED) {
435 for (i = 0; i < nr_cpu_ids; i++) {
438 was_up = (hp_stats[i].up_down_count & 0x1);
439 __hp_stats_update(i, was_up);
442 mutex_unlock(&rockchip_cpq_lock_stats);
444 mutex_unlock(&rockchip_cpuquiet_lock);
446 seq_printf(s, "%-15s ", "cpu:");
447 for (i = 0; i < nr_cpu_ids; i++)
448 seq_printf(s, "G%-9d ", i);
450 seq_printf(s, "%-15s ", "transitions:");
451 for (i = 0; i < nr_cpu_ids; i++)
452 seq_printf(s, "%-10u ", hp_stats[i].up_down_count);
455 seq_printf(s, "%-15s ", "time plugged:");
456 for (i = 0; i < nr_cpu_ids; i++) {
457 seq_printf(s, "%-10llu ",
458 cputime64_to_clock_t(hp_stats[i].time_up_total));
462 seq_printf(s, "%-15s %llu\n", "time-stamp:",
463 cputime64_to_clock_t(cur_jiffies));
468 static int hp_stats_open(struct inode *inode, struct file *file)
470 return single_open(file, hp_stats_show, inode->i_private);
473 static const struct file_operations hp_stats_fops = {
474 .open = hp_stats_open,
477 .release = single_release,
480 static int __init rockchip_cpuquiet_debug_init(void)
484 dir = debugfs_create_dir("rockchip_cpuquiet", NULL);
488 if (!debugfs_create_file("stats", S_IRUGO, dir, NULL, &hp_stats_fops))
494 debugfs_remove_recursive(dir);
498 late_initcall(rockchip_cpuquiet_debug_init);
499 #endif /* CONFIG_DEBUG_FS */
501 static int __init rockchip_cpuquiet_init(void)
505 init_waitqueue_head(&wait_enable);
506 init_waitqueue_head(&wait_cpu);
509 * Not bound to the issuer CPU (=> high-priority), has rescue worker
510 * task, single-threaded, freezable.
512 cpuquiet_wq = alloc_workqueue(
513 "cpuquiet", WQ_NON_REENTRANT | WQ_FREEZABLE, 1);
518 INIT_WORK(&cpuquiet_work, rockchip_cpuquiet_work_func);
519 hotplug_timeout_jiffies = msecs_to_jiffies(HOTPLUG_DELAY_MS);
520 cpumask_clear(&cpumask_online_requests);
521 cpumask_clear(&cpumask_offline_requests);
523 cpq_state = INITIAL_STATE;
524 enable = cpq_state == CPQ_DISABLED ? false : true;
527 pr_info("cpuquiet initialized: %s\n",
528 (cpq_state == CPQ_DISABLED) ? "disabled" : "enabled");
530 if (pm_qos_add_notifier(PM_QOS_MIN_ONLINE_CPUS, &min_cpus_notifier))
531 pr_err("Failed to register min cpus PM QoS notifier\n");
532 if (pm_qos_add_notifier(PM_QOS_MAX_ONLINE_CPUS, &max_cpus_notifier))
533 pr_err("Failed to register max cpus PM QoS notifier\n");
535 register_hotcpu_notifier(&cpu_online_notifier);
537 err = cpuquiet_register_driver(&rockchip_cpuquiet_driver);
539 destroy_workqueue(cpuquiet_wq);
543 err = rockchip_cpuquiet_sysfs_init();
545 cpuquiet_unregister_driver(&rockchip_cpuquiet_driver);
546 destroy_workqueue(cpuquiet_wq);
551 device_initcall(rockchip_cpuquiet_init);