2 * Copyright (C) 2013 ROCKCHIP, Inc.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #define pr_fmt(fmt) "cpufreq: " fmt
15 #include <linux/clk.h>
16 #include <linux/cpufreq.h>
17 #include <linux/err.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/init.h>
20 #include <linux/reboot.h>
21 #include <linux/suspend.h>
22 #include <linux/tick.h>
23 #include <linux/workqueue.h>
24 #include <linux/delay.h>
25 #include <linux/regulator/consumer.h>
27 #include <linux/miscdevice.h>
28 #include <linux/string.h>
29 #include <linux/rockchip/cpu.h>
30 #include <linux/rockchip/dvfs.h>
31 #include <asm/smp_plat.h>
32 #include <asm/unistd.h>
33 #include <asm/uaccess.h>
34 #include <asm/system_misc.h>
35 #include <linux/rockchip/common.h>
36 #include <dt-bindings/clock/rk_system_status.h>
37 #include "../../../drivers/clk/rockchip/clk-pd.h"
39 extern void dvfs_disable_temp_limit(void);
44 #define FREQ_DBG(fmt, args...) pr_debug(fmt, ## args)
45 #define FREQ_LOG(fmt, args...) pr_debug(fmt, ## args)
47 #define FREQ_DBG(fmt, args...) do {} while(0)
48 #define FREQ_LOG(fmt, args...) do {} while(0)
50 #define FREQ_ERR(fmt, args...) pr_err(fmt, ## args)
52 /* Frequency table index must be sequential starting at 0 */
53 static struct cpufreq_frequency_table default_freq_table[] = {
54 {.frequency = 312 * 1000, .index = 875 * 1000},
55 {.frequency = 504 * 1000, .index = 925 * 1000},
56 {.frequency = 816 * 1000, .index = 975 * 1000},
57 {.frequency = 1008 * 1000, .index = 1075 * 1000},
58 {.frequency = 1200 * 1000, .index = 1150 * 1000},
59 {.frequency = 1416 * 1000, .index = 1250 * 1000},
60 {.frequency = 1608 * 1000, .index = 1350 * 1000},
61 {.frequency = CPUFREQ_TABLE_END},
63 static struct cpufreq_frequency_table *freq_table = default_freq_table;
64 /*********************************************************/
65 /* additional symantics for "relation" in cpufreq with pm */
66 #define DISABLE_FURTHER_CPUFREQ 0x10
67 #define ENABLE_FURTHER_CPUFREQ 0x20
68 #define MASK_FURTHER_CPUFREQ 0x30
69 /* With 0x00(NOCHANGE), it depends on the previous "further" status */
70 #define CPUFREQ_PRIVATE 0x100
71 static unsigned int no_cpufreq_access = 0;
72 static unsigned int suspend_freq = 816 * 1000;
73 static unsigned int suspend_volt = 1100000;
74 static unsigned int low_battery_freq = 600 * 1000;
75 static unsigned int low_battery_capacity = 5; // 5%
76 static bool is_booting = true;
77 static DEFINE_MUTEX(cpufreq_mutex);
78 static bool gpu_is_mali400;
79 struct dvfs_node *clk_cpu_dvfs_node = NULL;
80 struct dvfs_node *clk_gpu_dvfs_node = NULL;
81 struct dvfs_node *aclk_vio1_dvfs_node = NULL;
82 struct dvfs_node *clk_ddr_dvfs_node = NULL;
83 /*******************************************************/
84 static unsigned int cpufreq_get_rate(unsigned int cpu)
86 if (clk_cpu_dvfs_node)
87 return clk_get_rate(clk_cpu_dvfs_node->clk) / 1000;
92 static bool cpufreq_is_ondemand(struct cpufreq_policy *policy)
95 if (policy && policy->governor)
96 c = policy->governor->name[0];
97 return (c == 'o' || c == 'i' || c == 'c' || c == 'h');
100 static unsigned int get_freq_from_table(unsigned int max_freq)
103 unsigned int target_freq = 0;
104 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
105 unsigned int freq = freq_table[i].frequency;
106 if (freq <= max_freq && target_freq < freq) {
111 target_freq = max_freq;
115 static int cpufreq_notifier_policy(struct notifier_block *nb, unsigned long val, void *data)
117 static unsigned int min_rate=0, max_rate=-1;
118 struct cpufreq_policy *policy = data;
120 if (val != CPUFREQ_ADJUST)
123 if (cpufreq_is_ondemand(policy)) {
124 FREQ_DBG("queue work\n");
125 dvfs_clk_enable_limit(clk_cpu_dvfs_node, min_rate, max_rate);
127 FREQ_DBG("cancel work\n");
128 dvfs_clk_get_limit(clk_cpu_dvfs_node, &min_rate, &max_rate);
134 static struct notifier_block notifier_policy_block = {
135 .notifier_call = cpufreq_notifier_policy
138 static int cpufreq_verify(struct cpufreq_policy *policy)
142 return cpufreq_frequency_table_verify(policy, freq_table);
145 static int cpufreq_scale_rate_for_dvfs(struct clk *clk, unsigned long rate)
148 struct cpufreq_freqs freqs;
149 struct cpufreq_policy *policy;
151 freqs.new = rate / 1000;
152 freqs.old = clk_get_rate(clk) / 1000;
154 for_each_online_cpu(freqs.cpu) {
155 policy = cpufreq_cpu_get(freqs.cpu);
156 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
157 cpufreq_cpu_put(policy);
160 FREQ_DBG("cpufreq_scale_rate_for_dvfs(%lu)\n", rate);
162 ret = clk_set_rate(clk, rate);
164 freqs.new = clk_get_rate(clk) / 1000;
166 for_each_online_cpu(freqs.cpu) {
167 policy = cpufreq_cpu_get(freqs.cpu);
168 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
169 cpufreq_cpu_put(policy);
176 static int cpufreq_init_cpu0(struct cpufreq_policy *policy)
180 struct regulator *vdd_gpu_regulator;
182 gpu_is_mali400 = cpu_is_rk3188();
184 clk_gpu_dvfs_node = clk_get_dvfs_node("clk_gpu");
185 if (clk_gpu_dvfs_node){
186 clk_enable_dvfs(clk_gpu_dvfs_node);
187 vdd_gpu_regulator = dvfs_get_regulator("vdd_gpu");
188 if (!IS_ERR_OR_NULL(vdd_gpu_regulator)) {
189 if (!regulator_is_enabled(vdd_gpu_regulator)) {
190 ret = regulator_enable(vdd_gpu_regulator);
191 arm_pm_restart('h', NULL);
193 /* make sure vdd_gpu_regulator is in use,
194 so it will not be disable by regulator_init_complete*/
195 ret = regulator_enable(vdd_gpu_regulator);
197 arm_pm_restart('h', NULL);
200 dvfs_clk_enable_limit(clk_gpu_dvfs_node, 133000000, 600000000);
203 clk_ddr_dvfs_node = clk_get_dvfs_node("clk_ddr");
204 if (clk_ddr_dvfs_node){
205 clk_enable_dvfs(clk_ddr_dvfs_node);
208 clk_cpu_dvfs_node = clk_get_dvfs_node("clk_core");
209 if (!clk_cpu_dvfs_node){
212 dvfs_clk_register_set_rate_callback(clk_cpu_dvfs_node, cpufreq_scale_rate_for_dvfs);
213 freq_table = dvfs_get_freq_volt_table(clk_cpu_dvfs_node);
214 if (freq_table == NULL) {
215 freq_table = default_freq_table;
218 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
219 if (freq_table[i].index >= suspend_volt && v > freq_table[i].index) {
220 suspend_freq = freq_table[i].frequency;
221 v = freq_table[i].index;
225 low_battery_freq = get_freq_from_table(low_battery_freq);
226 clk_enable_dvfs(clk_cpu_dvfs_node);
228 cpufreq_register_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
230 printk("cpufreq version " VERSION ", suspend freq %d MHz\n", suspend_freq / 1000);
234 static int cpufreq_init(struct cpufreq_policy *policy)
238 if (policy->cpu == 0) {
239 cpu0_err = cpufreq_init_cpu0(policy);
246 cpufreq_frequency_table_cpuinfo(policy, freq_table);
248 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
251 policy->cur = clk_get_rate(clk_cpu_dvfs_node->clk) / 1000;
253 policy->cpuinfo.transition_latency = 40 * NSEC_PER_USEC; // make ondemand default sampling_rate to 40000
255 cpumask_setall(policy->cpus);
261 static int cpufreq_exit(struct cpufreq_policy *policy)
263 if (policy->cpu != 0)
266 cpufreq_frequency_table_cpuinfo(policy, freq_table);
267 clk_put_dvfs_node(clk_cpu_dvfs_node);
268 cpufreq_unregister_notifier(¬ifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
273 static struct freq_attr *cpufreq_attr[] = {
274 &cpufreq_freq_attr_scaling_available_freqs,
278 #ifdef CONFIG_CHARGER_DISPLAY
279 extern int rk_get_system_battery_capacity(void);
281 static int rk_get_system_battery_capacity(void) { return 100; }
284 static unsigned int cpufreq_scale_limit(unsigned int target_freq, struct cpufreq_policy *policy, bool is_private)
286 bool is_ondemand = cpufreq_is_ondemand(policy);
292 s64 boottime_ms = ktime_to_ms(ktime_get_boottime());
293 if (boottime_ms > 60 * MSEC_PER_SEC) {
295 } else if (target_freq > low_battery_freq &&
296 rk_get_system_battery_capacity() <= low_battery_capacity) {
297 target_freq = low_battery_freq;
304 static int cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation)
306 unsigned int i, new_freq = target_freq, new_rate, cur_rate;
311 FREQ_ERR("no freq table!\n");
315 mutex_lock(&cpufreq_mutex);
317 is_private = relation & CPUFREQ_PRIVATE;
318 relation &= ~CPUFREQ_PRIVATE;
320 if ((relation & ENABLE_FURTHER_CPUFREQ) && no_cpufreq_access)
322 if (no_cpufreq_access) {
323 FREQ_LOG("denied access to %s as it is disabled temporarily\n", __func__);
327 if (relation & DISABLE_FURTHER_CPUFREQ)
329 relation &= ~MASK_FURTHER_CPUFREQ;
331 ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, relation, &i);
333 FREQ_ERR("no freq match for %d(ret=%d)\n", target_freq, ret);
336 new_freq = freq_table[i].frequency;
337 if (!no_cpufreq_access)
338 new_freq = cpufreq_scale_limit(new_freq, policy, is_private);
340 new_rate = new_freq * 1000;
341 cur_rate = dvfs_clk_get_rate(clk_cpu_dvfs_node);
342 FREQ_LOG("req = %7u new = %7u (was = %7u)\n", target_freq, new_freq, cur_rate / 1000);
343 if (new_rate == cur_rate)
345 ret = dvfs_clk_set_rate(clk_cpu_dvfs_node, new_rate);
348 FREQ_DBG("set freq (%7u) end, ret %d\n", new_freq, ret);
349 mutex_unlock(&cpufreq_mutex);
354 static int cpufreq_pm_notifier_event(struct notifier_block *this, unsigned long event, void *ptr)
356 int ret = NOTIFY_DONE;
357 struct cpufreq_policy *policy = cpufreq_cpu_get(0);
362 if (!cpufreq_is_ondemand(policy))
366 case PM_SUSPEND_PREPARE:
368 ret = cpufreq_driver_target(policy, suspend_freq, DISABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
375 case PM_POST_RESTORE:
376 case PM_POST_SUSPEND:
377 //if (target_freq == policy->cur) then cpufreq_driver_target
378 //will return, and our target will not be called, it casue
379 //ENABLE_FURTHER_CPUFREQ flag invalid, avoid that.
381 cpufreq_driver_target(policy, suspend_freq, ENABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
386 cpufreq_cpu_put(policy);
390 static struct notifier_block cpufreq_pm_notifier = {
391 .notifier_call = cpufreq_pm_notifier_event,
394 int rockchip_cpufreq_reboot_limit_freq(void)
396 struct regulator *regulator;
400 dvfs_disable_temp_limit();
401 dvfs_clk_enable_limit(clk_cpu_dvfs_node, 1000*suspend_freq, 1000*suspend_freq);
403 rate = dvfs_clk_get_rate(clk_cpu_dvfs_node);
404 regulator = dvfs_get_regulator("vdd_arm");
406 volt = regulator_get_voltage(regulator);
408 pr_info("cpufreq: get arm regulator failed\n");
409 pr_info("cpufreq: reboot set core rate=%lu, volt=%d\n",
410 dvfs_clk_get_rate(clk_cpu_dvfs_node), volt);
415 static int cpufreq_reboot_notifier_event(struct notifier_block *this,
416 unsigned long event, void *ptr)
418 rockchip_set_system_status(SYS_STATUS_REBOOT);
419 rockchip_cpufreq_reboot_limit_freq();
424 static struct notifier_block cpufreq_reboot_notifier = {
425 .notifier_call = cpufreq_reboot_notifier_event,
428 static int clk_pd_vio_notifier_call(struct notifier_block *nb, unsigned long event, void *ptr)
431 case RK_CLK_PD_PREPARE:
432 if (aclk_vio1_dvfs_node)
433 clk_enable_dvfs(aclk_vio1_dvfs_node);
435 case RK_CLK_PD_UNPREPARE:
436 if (aclk_vio1_dvfs_node)
437 clk_disable_dvfs(aclk_vio1_dvfs_node);
443 static struct notifier_block clk_pd_vio_notifier = {
444 .notifier_call = clk_pd_vio_notifier_call,
447 static struct cpufreq_driver cpufreq_driver = {
448 .flags = CPUFREQ_CONST_LOOPS,
449 .verify = cpufreq_verify,
450 .target = cpufreq_target,
451 .get = cpufreq_get_rate,
452 .init = cpufreq_init,
453 .exit = cpufreq_exit,
455 .attr = cpufreq_attr,
458 static int __init cpufreq_driver_init(void)
462 clk = clk_get(NULL, "pd_vio");
464 rk_clk_pd_notifier_register(clk, &clk_pd_vio_notifier);
465 aclk_vio1_dvfs_node = clk_get_dvfs_node("aclk_vio1");
466 if (aclk_vio1_dvfs_node && __clk_is_enabled(clk)){
467 clk_enable_dvfs(aclk_vio1_dvfs_node);
470 register_reboot_notifier(&cpufreq_reboot_notifier);
471 register_pm_notifier(&cpufreq_pm_notifier);
472 return cpufreq_register_driver(&cpufreq_driver);
475 device_initcall(cpufreq_driver_init);