2 * Copyright (C) 2013 ROCKCHIP, Inc.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #define pr_fmt(fmt) "cpufreq: " fmt
15 #include <linux/clk.h>
16 #include <linux/cpufreq.h>
17 #include <linux/err.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/init.h>
20 #include <linux/reboot.h>
21 #include <linux/suspend.h>
22 #include <linux/tick.h>
23 #include <linux/workqueue.h>
24 #include <linux/delay.h>
25 #include <linux/regulator/consumer.h>
27 #include <linux/miscdevice.h>
28 #include <linux/string.h>
29 #include <linux/rockchip/cpu.h>
30 #include <linux/rockchip/dvfs.h>
31 #include <asm/smp_plat.h>
33 #include <asm/unistd.h>
34 #include <asm/uaccess.h>
39 #define FREQ_DBG(fmt, args...) pr_debug(fmt, ## args)
40 #define FREQ_LOG(fmt, args...) pr_debug(fmt, ## args)
42 #define FREQ_DBG(fmt, args...) do {} while(0)
43 #define FREQ_LOG(fmt, args...) do {} while(0)
45 #define FREQ_ERR(fmt, args...) pr_err(fmt, ## args)
47 /* Frequency table index must be sequential starting at 0 */
48 static struct cpufreq_frequency_table default_freq_table[] = {
49 {.frequency = 312 * 1000, .index = 875 * 1000},
50 {.frequency = 504 * 1000, .index = 925 * 1000},
51 {.frequency = 816 * 1000, .index = 975 * 1000},
52 {.frequency = 1008 * 1000, .index = 1075 * 1000},
53 {.frequency = 1200 * 1000, .index = 1150 * 1000},
54 {.frequency = 1416 * 1000, .index = 1250 * 1000},
55 {.frequency = 1608 * 1000, .index = 1350 * 1000},
56 {.frequency = CPUFREQ_TABLE_END},
58 static struct cpufreq_frequency_table *freq_table = default_freq_table;
59 /*********************************************************/
60 /* additional symantics for "relation" in cpufreq with pm */
61 #define DISABLE_FURTHER_CPUFREQ 0x10
62 #define ENABLE_FURTHER_CPUFREQ 0x20
63 #define MASK_FURTHER_CPUFREQ 0x30
64 /* With 0x00(NOCHANGE), it depends on the previous "further" status */
65 #define CPUFREQ_PRIVATE 0x100
66 static int no_cpufreq_access;
67 static unsigned int suspend_freq = 816 * 1000;
68 static unsigned int suspend_volt = 1000000; // 1V
69 static unsigned int low_battery_freq = 600 * 1000;
70 static unsigned int low_battery_capacity = 5; // 5%
71 static bool is_booting = true;
72 static struct workqueue_struct *freq_wq;
73 static DEFINE_MUTEX(cpufreq_mutex);
74 static bool gpu_is_mali400;
75 struct dvfs_node *clk_cpu_dvfs_node = NULL;
76 struct dvfs_node *clk_gpu_dvfs_node = NULL;
77 /*******************************************************/
78 static unsigned int cpufreq_get_rate(unsigned int cpu)
80 if (clk_cpu_dvfs_node)
81 return clk_get_rate(clk_cpu_dvfs_node->clk) / 1000;
86 static bool cpufreq_is_ondemand(struct cpufreq_policy *policy)
89 if (policy && policy->governor)
90 c = policy->governor->name[0];
91 return (c == 'o' || c == 'i' || c == 'c' || c == 'h');
94 static unsigned int get_freq_from_table(unsigned int max_freq)
97 unsigned int target_freq = 0;
98 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
99 unsigned int freq = freq_table[i].frequency;
100 if (freq <= max_freq && target_freq < freq) {
105 target_freq = max_freq;
109 /**********************thermal limit**************************/
110 //#define CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
112 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
113 static unsigned int temp_limit_freq = -1;
114 module_param(temp_limit_freq, uint, 0444);
116 static struct cpufreq_frequency_table temp_limits[4][4] = {
118 {.frequency = -1, .index = 50},
119 {.frequency = -1, .index = 55},
120 {.frequency = -1, .index = 60},
121 {.frequency = 1608 * 1000, .index = 75},
123 {.frequency = 1800 * 1000, .index = 50},
124 {.frequency = 1608 * 1000, .index = 55},
125 {.frequency = 1416 * 1000, .index = 60},
126 {.frequency = 1200 * 1000, .index = 75},
128 {.frequency = 1608 * 1000, .index = 50},
129 {.frequency = 1416 * 1000, .index = 55},
130 {.frequency = 1200 * 1000, .index = 60},
131 {.frequency = 1008 * 1000, .index = 75},
133 {.frequency = 1416 * 1000, .index = 50},
134 {.frequency = 1200 * 1000, .index = 55},
135 {.frequency = 1008 * 1000, .index = 60},
136 {.frequency = 816 * 1000, .index = 75},
140 static struct cpufreq_frequency_table temp_limits_cpu_perf[] = {
141 {.frequency = 1008 * 1000, .index = 100},
144 static struct cpufreq_frequency_table temp_limits_gpu_perf[] = {
145 {.frequency = 1008 * 1000, .index = 0},
148 static int get_temp(void)
153 static char sys_state;
154 static ssize_t sys_state_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
160 if (copy_from_user(&state, buffer, 1)) {
168 static const struct file_operations sys_state_fops = {
169 .owner = THIS_MODULE,
170 .write = sys_state_write,
173 static struct miscdevice sys_state_dev = {
174 .fops = &sys_state_fops,
176 .minor = MISC_DYNAMIC_MINOR,
179 static void cpufreq_temp_limit_work_func(struct work_struct *work)
181 static bool in_perf = false;
182 struct cpufreq_policy *policy;
184 unsigned int new_freq = -1;
185 unsigned long delay = HZ / 10; // 100ms
186 unsigned int nr_cpus = num_online_cpus();
187 const struct cpufreq_frequency_table *limits_table = temp_limits[nr_cpus - 1];
188 size_t limits_size = ARRAY_SIZE(temp_limits[nr_cpus - 1]);
192 if (sys_state == '1') {
194 if (gpu_is_mali400) {
195 unsigned int gpu_irqs[2];
196 gpu_irqs[0] = kstat_irqs(IRQ_GPU_GP);
198 gpu_irqs[1] = kstat_irqs(IRQ_GPU_GP);
200 if ((gpu_irqs[1] - gpu_irqs[0]) < 8) {
201 limits_table = temp_limits_cpu_perf;
202 limits_size = ARRAY_SIZE(temp_limits_cpu_perf);
204 limits_table = temp_limits_gpu_perf;
205 limits_size = ARRAY_SIZE(temp_limits_gpu_perf);
209 limits_table = temp_limits_cpu_perf;
210 limits_size = ARRAY_SIZE(temp_limits_cpu_perf);
212 } else if (in_perf) {
215 static u64 last_time_in_idle = 0;
216 static u64 last_time_in_idle_timestamp = 0;
217 u64 time_in_idle = 0, now;
222 for_each_online_cpu(cpu) {
223 time_in_idle += get_cpu_idle_time_us(cpu, &now);
225 delta_time = now - last_time_in_idle_timestamp;
226 delta_idle = time_in_idle - last_time_in_idle;
227 last_time_in_idle = time_in_idle;
228 last_time_in_idle_timestamp = now;
229 delta_idle += delta_time >> 4; // +6.25%
230 if (delta_idle > (nr_cpus - 1) * delta_time && delta_idle < (nr_cpus + 1) * delta_time)
231 limits_table = temp_limits[0];
232 else if (delta_idle > (nr_cpus - 2) * delta_time)
233 limits_table = temp_limits[1];
234 else if (delta_idle > (nr_cpus - 3) * delta_time)
235 limits_table = temp_limits[2];
236 FREQ_DBG("delta time %6u us idle %6u us %u cpus select table %d\n", delta_time, delta_idle, nr_cpus, (limits_table - temp_limits[0]) / ARRAY_SIZE(temp_limits[0]));
239 for (i = 0; i < limits_size; i++) {
240 if (temp >= limits_table[i].index) {
241 new_freq = limits_table[i].frequency;
245 if (temp_limit_freq != new_freq) {
246 unsigned int cur_freq;
247 temp_limit_freq = new_freq;
248 cur_freq = cpufreq_get_rate(0);
249 FREQ_DBG("temp limit %7d KHz cur %7d KHz\n", temp_limit_freq, cur_freq);
250 if (cur_freq > temp_limit_freq) {
251 policy = cpufreq_cpu_get(0);
252 cpufreq_driver_target(policy, policy->cur, CPUFREQ_RELATION_L | CPUFREQ_PRIVATE);
253 cpufreq_cpu_put(policy);
257 queue_delayed_work_on(0, freq_wq, to_delayed_work(work), delay);
260 static DECLARE_DELAYED_WORK(cpufreq_temp_limit_work, cpufreq_temp_limit_work_func);
262 static int cpufreq_notifier_policy(struct notifier_block *nb, unsigned long val, void *data)
264 struct cpufreq_policy *policy = data;
266 if (val != CPUFREQ_NOTIFY)
269 if (cpufreq_is_ondemand(policy)) {
270 FREQ_DBG("queue work\n");
271 queue_delayed_work_on(0, freq_wq, &cpufreq_temp_limit_work, 0);
273 FREQ_DBG("cancel work\n");
274 cancel_delayed_work_sync(&cpufreq_temp_limit_work);
280 static struct notifier_block notifier_policy_block = {
281 .notifier_call = cpufreq_notifier_policy
284 static void cpufreq_temp_limit_init(struct cpufreq_policy *policy)
287 struct cpufreq_frequency_table *table;
289 table = temp_limits[0];
290 for (i = 0; i < sizeof(temp_limits) / sizeof(struct cpufreq_frequency_table); i++) {
291 table[i].frequency = get_freq_from_table(table[i].frequency);
293 table = temp_limits_cpu_perf;
294 for (i = 0; i < sizeof(temp_limits_cpu_perf) / sizeof(struct cpufreq_frequency_table); i++) {
295 table[i].frequency = get_freq_from_table(table[i].frequency);
297 table = temp_limits_gpu_perf;
298 for (i = 0; i < sizeof(temp_limits_gpu_perf) / sizeof(struct cpufreq_frequency_table); i++) {
299 table[i].frequency = get_freq_from_table(table[i].frequency);
301 misc_register(&sys_state_dev);
302 if (cpufreq_is_ondemand(policy)) {
303 queue_delayed_work_on(0, freq_wq, &cpufreq_temp_limit_work, 0*HZ);
305 cpufreq_register_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
308 static void cpufreq_temp_limit_exit(void)
310 cpufreq_unregister_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
312 cancel_delayed_work(&cpufreq_temp_limit_work);
315 static inline void cpufreq_temp_limit_init(struct cpufreq_policy *policy) {}
316 static inline void cpufreq_temp_limit_exit(void) {}
319 static int cpufreq_verify(struct cpufreq_policy *policy)
323 return cpufreq_frequency_table_verify(policy, freq_table);
326 static int cpufreq_scale_rate_for_dvfs(struct clk *clk, unsigned long rate)
330 struct cpufreq_freqs freqs;
331 struct cpufreq_policy *policy;
333 freqs.new = rate / 1000;
334 freqs.old = clk_get_rate(clk) / 1000;
336 for_each_online_cpu(freqs.cpu) {
337 policy = cpufreq_cpu_get(freqs.cpu);
338 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
339 cpufreq_cpu_put(policy);
342 FREQ_DBG("cpufreq_scale_rate_for_dvfs(%lu)\n", rate);
344 ret = clk_set_rate(clk, rate);
348 * Note that loops_per_jiffy is not updated on SMP systems in
349 * cpufreq driver. So, update the per-CPU loops_per_jiffy value
350 * on frequency transition. We need to update all dependent CPUs.
352 for_each_possible_cpu(i) {
353 per_cpu(cpu_data, i).loops_per_jiffy = loops_per_jiffy;
357 freqs.new = clk_get_rate(clk) / 1000;
359 for_each_online_cpu(freqs.cpu) {
360 policy = cpufreq_cpu_get(freqs.cpu);
361 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
362 cpufreq_cpu_put(policy);
370 static int cpufreq_init_cpu0(struct cpufreq_policy *policy)
373 gpu_is_mali400 = cpu_is_rk3188();
375 clk_gpu_dvfs_node = clk_get_dvfs_node("clk_gpu");
376 if (clk_gpu_dvfs_node){
377 clk_enable_dvfs(clk_gpu_dvfs_node);
379 dvfs_clk_enable_limit(clk_gpu_dvfs_node, 133000000, 600000000);
382 clk_cpu_dvfs_node = clk_get_dvfs_node("clk_core");
383 if (!clk_cpu_dvfs_node){
386 dvfs_clk_register_set_rate_callback(clk_cpu_dvfs_node, cpufreq_scale_rate_for_dvfs);
387 freq_table = dvfs_get_freq_volt_table(clk_cpu_dvfs_node);
388 if (freq_table == NULL) {
389 freq_table = default_freq_table;
392 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
393 if (freq_table[i].index >= suspend_volt && v > freq_table[i].index) {
394 suspend_freq = freq_table[i].frequency;
395 v = freq_table[i].index;
399 low_battery_freq = get_freq_from_table(low_battery_freq);
400 clk_enable_dvfs(clk_cpu_dvfs_node);
402 freq_wq = alloc_workqueue("cpufreq", WQ_NON_REENTRANT | WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
403 cpufreq_temp_limit_init(policy);
405 printk("cpufreq version " VERSION ", suspend freq %d MHz\n", suspend_freq / 1000);
409 static int cpufreq_init(struct cpufreq_policy *policy)
413 if (policy->cpu == 0) {
414 cpu0_err = cpufreq_init_cpu0(policy);
421 cpufreq_frequency_table_cpuinfo(policy, freq_table);
423 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
426 policy->cur = clk_get_rate(clk_cpu_dvfs_node->clk) / 1000;
428 policy->cpuinfo.transition_latency = 40 * NSEC_PER_USEC; // make ondemand default sampling_rate to 40000
431 * On SMP configuartion, both processors share the voltage
432 * and clock. So both CPUs needs to be scaled together and hence
433 * needs software co-ordination. Use cpufreq affected_cpus
434 * interface to handle this scenario. Additional is_smp() check
435 * is to keep SMP_ON_UP build working.
438 cpumask_setall(policy->cpus);
444 static int cpufreq_exit(struct cpufreq_policy *policy)
446 if (policy->cpu != 0)
449 cpufreq_frequency_table_cpuinfo(policy, freq_table);
450 clk_put_dvfs_node(clk_cpu_dvfs_node);
451 cpufreq_temp_limit_exit();
453 flush_workqueue(freq_wq);
454 destroy_workqueue(freq_wq);
461 static struct freq_attr *cpufreq_attr[] = {
462 &cpufreq_freq_attr_scaling_available_freqs,
466 //#ifdef CONFIG_POWER_SUPPLY
468 extern int rk_get_system_battery_capacity(void);
470 static int rk_get_system_battery_capacity(void) { return 100; }
473 static unsigned int cpufreq_scale_limit(unsigned int target_freq, struct cpufreq_policy *policy, bool is_private)
475 bool is_ondemand = cpufreq_is_ondemand(policy);
481 s64 boottime_ms = ktime_to_ms(ktime_get_boottime());
482 if (boottime_ms > 60 * MSEC_PER_SEC) {
484 } else if (target_freq > low_battery_freq &&
485 rk_get_system_battery_capacity() <= low_battery_capacity) {
486 target_freq = low_battery_freq;
490 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
492 static unsigned int ondemand_target = 816 * 1000;
494 target_freq = ondemand_target;
496 ondemand_target = target_freq;
500 * If the new frequency is more than the thermal max allowed
501 * frequency, go ahead and scale the mpu device to proper frequency.
503 target_freq = min(target_freq, temp_limit_freq);
509 static int cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation)
511 unsigned int i, new_freq = target_freq, new_rate, cur_rate;
516 FREQ_ERR("no freq table!\n");
520 mutex_lock(&cpufreq_mutex);
522 is_private = relation & CPUFREQ_PRIVATE;
523 relation &= ~CPUFREQ_PRIVATE;
525 if (relation & ENABLE_FURTHER_CPUFREQ)
527 if (no_cpufreq_access) {
528 FREQ_LOG("denied access to %s as it is disabled temporarily\n", __func__);
532 if (relation & DISABLE_FURTHER_CPUFREQ)
534 relation &= ~MASK_FURTHER_CPUFREQ;
536 ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, relation, &i);
538 FREQ_ERR("no freq match for %d(ret=%d)\n", target_freq, ret);
541 new_freq = freq_table[i].frequency;
542 if (!no_cpufreq_access)
543 new_freq = cpufreq_scale_limit(new_freq, policy, is_private);
545 new_rate = new_freq * 1000;
546 cur_rate = dvfs_clk_get_rate(clk_cpu_dvfs_node);
547 FREQ_LOG("req = %7u new = %7u (was = %7u)\n", target_freq, new_freq, cur_rate / 1000);
548 if (new_rate == cur_rate)
550 ret = dvfs_clk_set_rate(clk_cpu_dvfs_node, new_rate);
553 FREQ_DBG("set freq (%7u) end, ret %d\n", new_freq, ret);
554 mutex_unlock(&cpufreq_mutex);
559 static int cpufreq_pm_notifier_event(struct notifier_block *this, unsigned long event, void *ptr)
561 int ret = NOTIFY_DONE;
562 struct cpufreq_policy *policy = cpufreq_cpu_get(0);
567 if (!cpufreq_is_ondemand(policy))
571 case PM_SUSPEND_PREPARE:
572 ret = cpufreq_driver_target(policy, suspend_freq, DISABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
579 case PM_POST_RESTORE:
580 case PM_POST_SUSPEND:
581 cpufreq_driver_target(policy, suspend_freq, ENABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
586 cpufreq_cpu_put(policy);
590 static struct notifier_block cpufreq_pm_notifier = {
591 .notifier_call = cpufreq_pm_notifier_event,
594 static int cpufreq_reboot_notifier_event(struct notifier_block *this, unsigned long event, void *ptr)
596 struct cpufreq_policy *policy = cpufreq_cpu_get(0);
600 cpufreq_driver_target(policy, suspend_freq, DISABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
601 cpufreq_cpu_put(policy);
607 static struct notifier_block cpufreq_reboot_notifier = {
608 .notifier_call = cpufreq_reboot_notifier_event,
611 static struct cpufreq_driver cpufreq_driver = {
612 .flags = CPUFREQ_CONST_LOOPS,
613 .verify = cpufreq_verify,
614 .target = cpufreq_target,
615 .get = cpufreq_get_rate,
616 .init = cpufreq_init,
617 .exit = cpufreq_exit,
619 .attr = cpufreq_attr,
622 static int __init cpufreq_driver_init(void)
624 register_pm_notifier(&cpufreq_pm_notifier);
625 register_reboot_notifier(&cpufreq_reboot_notifier);
626 return cpufreq_register_driver(&cpufreq_driver);
629 device_initcall(cpufreq_driver_init);