1 /* arch/arm/mach-rk30/rk30_dvfs.c
3 * Copyright (C) 2012 ROCKCHIP, Inc.
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #include <linux/slab.h>
16 #include <linux/clk.h>
17 #include <linux/regulator/consumer.h>
18 #include <linux/delay.h>
19 #include <linux/stat.h>
21 #include <linux/opp.h>
22 #include <linux/rockchip/dvfs.h>
23 #include <linux/rockchip/common.h>
25 #include <linux/reboot.h>
26 #include <linux/rockchip/cpu.h>
27 #include <linux/tick.h>
28 #include <dt-bindings/clock/rk_system_status.h>
29 #include "../../../drivers/clk/rockchip/clk-pd.h"
32 #define MHz (1000 * 1000)
33 static LIST_HEAD(rk_dvfs_tree);
34 static DEFINE_MUTEX(rk_dvfs_mutex);
35 static struct workqueue_struct *dvfs_wq;
36 static struct dvfs_node *clk_cpu_b_dvfs_node;
37 static struct dvfs_node *clk_cpu_l_dvfs_node;
38 static struct dvfs_node *clk_cpu_bl_dvfs_node;
39 static struct dvfs_node *clk_cpu_dvfs_node;
40 static struct dvfs_node *clk_gpu_dvfs_node;
41 static int pd_gpu_off, early_suspend;
42 static DEFINE_MUTEX(switch_vdd_gpu_mutex);
43 struct regulator *vdd_gpu_regulator;
45 static int dvfs_get_temp(int chn)
47 int temp = INVALID_TEMP;
49 #if IS_ENABLED(CONFIG_ROCKCHIP_THERMAL)
52 if (clk_cpu_bl_dvfs_node == NULL ||
53 IS_ERR_OR_NULL(clk_cpu_bl_dvfs_node->vd->regulator))
56 mutex_lock(&clk_cpu_bl_dvfs_node->vd->mutex);
57 read_back = dvfs_regulator_get_voltage(
58 clk_cpu_bl_dvfs_node->vd->regulator);
59 temp = rockchip_tsadc_get_temp(chn, read_back);
60 mutex_unlock(&clk_cpu_bl_dvfs_node->vd->mutex);
62 temp = rockchip_tsadc_get_temp(chn);
68 static int vdd_gpu_reboot_notifier_event(struct notifier_block *this,
69 unsigned long event, void *ptr)
73 DVFS_DBG("%s: enable vdd_gpu\n", __func__);
74 mutex_lock(&switch_vdd_gpu_mutex);
75 if (!regulator_is_enabled(vdd_gpu_regulator))
76 ret = regulator_enable(vdd_gpu_regulator);
77 mutex_unlock(&switch_vdd_gpu_mutex);
82 static struct notifier_block vdd_gpu_reboot_notifier = {
83 .notifier_call = vdd_gpu_reboot_notifier_event,
86 static int clk_pd_gpu_notifier_call(struct notifier_block *nb,
87 unsigned long event, void *ptr)
92 case RK_CLK_PD_PREPARE:
93 mutex_lock(&switch_vdd_gpu_mutex);
96 if (!regulator_is_enabled(vdd_gpu_regulator))
97 ret = regulator_enable(vdd_gpu_regulator);
99 mutex_unlock(&switch_vdd_gpu_mutex);
101 case RK_CLK_PD_UNPREPARE:
102 mutex_lock(&switch_vdd_gpu_mutex);
105 if (regulator_is_enabled(vdd_gpu_regulator))
106 ret = regulator_disable(vdd_gpu_regulator);
108 mutex_unlock(&switch_vdd_gpu_mutex);
117 static struct notifier_block clk_pd_gpu_notifier = {
118 .notifier_call = clk_pd_gpu_notifier_call,
122 static int early_suspend_notifier_call(struct notifier_block *self,
123 unsigned long action, void *data)
125 struct fb_event *event = data;
126 int blank_mode = *((int *)event->data);
129 mutex_lock(&switch_vdd_gpu_mutex);
130 if (action == FB_EARLY_EVENT_BLANK) {
131 switch (blank_mode) {
132 case FB_BLANK_UNBLANK:
135 if (!regulator_is_enabled(vdd_gpu_regulator))
136 ret = regulator_enable(
143 } else if (action == FB_EVENT_BLANK) {
144 switch (blank_mode) {
145 case FB_BLANK_POWERDOWN:
148 if (regulator_is_enabled(vdd_gpu_regulator))
149 ret = regulator_disable(
158 mutex_unlock(&switch_vdd_gpu_mutex);
163 static struct notifier_block early_suspend_notifier = {
164 .notifier_call = early_suspend_notifier_call,
167 #define DVFS_REGULATOR_MODE_STANDBY 1
168 #define DVFS_REGULATOR_MODE_IDLE 2
169 #define DVFS_REGULATOR_MODE_NORMAL 3
170 #define DVFS_REGULATOR_MODE_FAST 4
172 static const char* dvfs_regu_mode_to_string(unsigned int mode)
175 case DVFS_REGULATOR_MODE_FAST:
177 case DVFS_REGULATOR_MODE_NORMAL:
179 case DVFS_REGULATOR_MODE_IDLE:
181 case DVFS_REGULATOR_MODE_STANDBY:
188 static int dvfs_regu_mode_convert(unsigned int mode)
191 case DVFS_REGULATOR_MODE_FAST:
192 return REGULATOR_MODE_FAST;
193 case DVFS_REGULATOR_MODE_NORMAL:
194 return REGULATOR_MODE_NORMAL;
195 case DVFS_REGULATOR_MODE_IDLE:
196 return REGULATOR_MODE_IDLE;
197 case DVFS_REGULATOR_MODE_STANDBY:
198 return REGULATOR_MODE_STANDBY;
204 static int dvfs_regu_mode_deconvert(unsigned int mode)
207 case REGULATOR_MODE_FAST:
208 return DVFS_REGULATOR_MODE_FAST;
209 case REGULATOR_MODE_NORMAL:
210 return DVFS_REGULATOR_MODE_NORMAL;
211 case REGULATOR_MODE_IDLE:
212 return DVFS_REGULATOR_MODE_IDLE;
213 case REGULATOR_MODE_STANDBY:
214 return DVFS_REGULATOR_MODE_STANDBY;
220 static struct cpufreq_frequency_table *of_get_regu_mode_table(struct device_node *dev_node)
222 struct cpufreq_frequency_table *regu_mode_table = NULL;
223 const struct property *prop;
227 prop = of_find_property(dev_node, "regu-mode-table", NULL);
233 nr = prop->length / sizeof(u32);
235 pr_err("%s: Invalid freq list\n", __func__);
239 regu_mode_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
240 (nr/2+1), GFP_KERNEL);
241 if (!regu_mode_table) {
242 pr_err("%s: could not allocate regu_mode_table!\n", __func__);
243 return ERR_PTR(-ENOMEM);
248 for (i=0; i<nr/2; i++){
249 regu_mode_table[i].frequency = be32_to_cpup(val++) * 1000;
250 regu_mode_table[i].index = be32_to_cpup(val++);
253 if (regu_mode_table[i-1].frequency != 0) {
254 pr_err("%s: Last freq of regu_mode_table is not 0!\n", __func__);
255 kfree(regu_mode_table);
259 regu_mode_table[i].index = 0;
260 regu_mode_table[i].frequency = CPUFREQ_TABLE_END;
262 return regu_mode_table;
265 static int dvfs_regu_mode_table_constrain(struct dvfs_node *clk_dvfs_node)
268 int mode, convert_mode, valid_mode;
273 if (!clk_dvfs_node->regu_mode_table)
276 if (!clk_dvfs_node->vd)
279 if (IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator))
282 for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
283 mode = clk_dvfs_node->regu_mode_table[i].index;
284 convert_mode = dvfs_regu_mode_convert(mode);
286 ret = regulator_is_supported_mode(clk_dvfs_node->vd->regulator,
289 DVFS_ERR("%s: find mode=%d unsupported\n", __func__,
291 kfree(clk_dvfs_node->regu_mode_table);
292 clk_dvfs_node->regu_mode_table = NULL;
296 valid_mode = dvfs_regu_mode_deconvert(convert_mode);
297 if (valid_mode != mode) {
298 DVFS_ERR("%s: round mode=%d to valid mode=%d!\n",
299 __func__, mode, valid_mode);
300 clk_dvfs_node->regu_mode_table[i].index = valid_mode;
308 static int clk_dvfs_node_get_regu_mode(struct dvfs_node *clk_dvfs_node,
309 unsigned long rate, unsigned int *mode)
314 if ((!clk_dvfs_node) || (!clk_dvfs_node->regu_mode_table))
317 for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
318 if (rate >= clk_dvfs_node->regu_mode_table[i].frequency) {
319 *mode = clk_dvfs_node->regu_mode_table[i].index;
327 static int dvfs_pd_get_newmode_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
329 unsigned int mode_max = 0;
332 if (clk_dvfs_node->regu_mode_en && (clk_dvfs_node->regu_mode >= pd->regu_mode)) {
333 return clk_dvfs_node->regu_mode;
336 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
337 if (clk_dvfs_node->regu_mode_en)
338 mode_max = max(mode_max, (clk_dvfs_node->regu_mode));
344 static void dvfs_update_clk_pds_mode(struct dvfs_node *clk_dvfs_node)
351 pd = clk_dvfs_node->pd;
355 pd->regu_mode = dvfs_pd_get_newmode_byclk(pd, clk_dvfs_node);
358 static int dvfs_vd_get_newmode_bypd(struct vd_node *vd)
360 unsigned int mode_max_vd = 0;
366 list_for_each_entry(pd, &vd->pd_list, node) {
367 mode_max_vd = max(mode_max_vd, pd->regu_mode);
373 static int dvfs_vd_get_newmode_byclk(struct dvfs_node *clk_dvfs_node)
378 dvfs_update_clk_pds_mode(clk_dvfs_node);
380 return dvfs_vd_get_newmode_bypd(clk_dvfs_node->vd);
383 static int dvfs_regu_set_mode(struct vd_node *vd, unsigned int mode)
389 if (IS_ERR_OR_NULL(vd)) {
390 DVFS_ERR("%s: vd_node error\n", __func__);
394 DVFS_DBG("%s: mode=%d(old=%d)\n", __func__, mode, vd->regu_mode);
396 convert_mode = dvfs_regu_mode_convert(mode);
397 if (convert_mode < 0) {
398 DVFS_ERR("%s: mode %d convert error\n", __func__, mode);
402 if (!IS_ERR_OR_NULL(vd->regulator)) {
403 ret = dvfs_regulator_set_mode(vd->regulator, convert_mode);
405 DVFS_ERR("%s: %s set mode %d (was %d) error!\n", __func__,
406 vd->regulator_name, mode, vd->regu_mode);
410 DVFS_ERR("%s: invalid regulator\n", __func__);
414 vd->regu_mode = mode;
419 static int dvfs_regu_mode_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
428 if (!clk_dvfs_node->regu_mode_en)
431 ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, rate, &mode);
433 DVFS_ERR("%s: clk(%s) rate %luhz get mode fail\n",
434 __func__, clk_dvfs_node->name, rate);
437 clk_dvfs_node->regu_mode = mode;
439 mode = dvfs_vd_get_newmode_byclk(clk_dvfs_node);
443 ret = dvfs_regu_set_mode(clk_dvfs_node->vd, mode);
448 static void dvfs_volt_up_delay(struct vd_node *vd, int new_volt, int old_volt)
452 if(new_volt <= old_volt)
454 if(vd->volt_time_flag > 0)
455 u_time = regulator_set_voltage_time(vd->regulator, old_volt, new_volt);
458 if(u_time < 0) {// regulator is not suported time,useing default time
459 DVFS_DBG("%s:vd %s is not suported getting delay time,so we use default\n",
461 u_time = ((new_volt) - (old_volt)) >> 9;
464 DVFS_DBG("%s: vd %s volt %d to %d delay %d us\n",
465 __func__, vd->name, old_volt, new_volt, u_time);
467 if (u_time >= 1000) {
468 mdelay(u_time / 1000);
469 udelay(u_time % 1000);
470 DVFS_WARNING("%s: regulator set vol delay is larger 1ms,old is %d,new is %d\n",
471 __func__, old_volt, new_volt);
477 static int dvfs_regulator_set_voltage_readback(struct regulator *regulator, int min_uV, int max_uV)
479 int ret = 0, read_back = 0;
481 ret = dvfs_regulator_set_voltage(regulator, max_uV, max_uV);
483 DVFS_ERR("%s: now read back to check voltage\n", __func__);
485 /* read back to judge if it is already effect */
487 read_back = dvfs_regulator_get_voltage(regulator);
488 if (read_back == max_uV) {
489 DVFS_ERR("%s: set ERROR but already effected, volt=%d\n", __func__, read_back);
492 DVFS_ERR("%s: set ERROR AND NOT effected, volt=%d\n", __func__, read_back);
499 static int dvfs_scale_volt_direct(struct vd_node *vd_clk, int volt_new)
503 DVFS_DBG("%s: volt=%d(old=%d)\n", __func__, volt_new, vd_clk->cur_volt);
505 if (IS_ERR_OR_NULL(vd_clk)) {
506 DVFS_ERR("%s: vd_node error\n", __func__);
510 if (!IS_ERR_OR_NULL(vd_clk->regulator)) {
511 ret = dvfs_regulator_set_voltage_readback(vd_clk->regulator, volt_new, volt_new);
512 dvfs_volt_up_delay(vd_clk,volt_new, vd_clk->cur_volt);
514 vd_clk->volt_set_flag = DVFS_SET_VOLT_FAILURE;
515 DVFS_ERR("%s: %s set voltage up err ret = %d, Vnew = %d(was %d)mV\n",
516 __func__, vd_clk->name, ret, volt_new, vd_clk->cur_volt);
521 DVFS_ERR("%s: invalid regulator\n", __func__);
525 vd_clk->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
526 vd_clk->cur_volt = volt_new;
532 static int dvfs_reset_volt(struct vd_node *dvfs_vd)
534 int flag_set_volt_correct = 0;
535 if (!IS_ERR_OR_NULL(dvfs_vd->regulator))
536 flag_set_volt_correct = dvfs_regulator_get_voltage(dvfs_vd->regulator);
538 DVFS_ERR("%s: invalid regulator\n", __func__);
541 if (flag_set_volt_correct <= 0) {
542 DVFS_ERR("%s (vd:%s), try to reload volt ,by it is error again(%d)!!! stop scaling\n",
543 __func__, dvfs_vd->name, flag_set_volt_correct);
546 dvfs_vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
547 DVFS_WARNING("%s:vd(%s) try to reload volt = %d\n",
548 __func__, dvfs_vd->name, flag_set_volt_correct);
550 /* Reset vd's voltage */
551 dvfs_vd->cur_volt = flag_set_volt_correct;
553 return dvfs_vd->cur_volt;
557 // for clk enable case to get vd regulator info
558 static void clk_enable_dvfs_regulator_check(struct vd_node *vd)
560 vd->cur_volt = dvfs_regulator_get_voltage(vd->regulator);
561 if(vd->cur_volt <= 0){
562 vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
564 vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
567 static void dvfs_get_vd_regulator_volt_list(struct vd_node *vd)
569 unsigned int i, selector = dvfs_regulator_count_voltages(vd->regulator);
570 int n = 0, sel_volt = 0;
572 if(selector > VD_VOL_LIST_CNT)
573 selector = VD_VOL_LIST_CNT;
575 for (i = 0; i < selector; i++) {
576 sel_volt = dvfs_regulator_list_voltage(vd->regulator, i);
578 //DVFS_WARNING("%s: vd(%s) list volt selector=%u, but volt(%d) <=0\n",
579 // __func__, vd->name, i, sel_volt);
582 vd->volt_list[n++] = sel_volt;
583 DVFS_DBG("%s: vd(%s) list volt selector=%u, n=%d, volt=%d\n",
584 __func__, vd->name, i, n, sel_volt);
591 static int vd_regulator_round_volt_max(struct vd_node *vd, int volt)
596 for (i = 0; i < vd->n_voltages; i++) {
597 sel_volt = vd->volt_list[i];
599 DVFS_WARNING("%s: selector=%u, but volt <=0\n",
610 static int vd_regulator_round_volt_min(struct vd_node *vd, int volt)
615 for (i = 0; i < vd->n_voltages; i++) {
616 sel_volt = vd->volt_list[i];
618 DVFS_WARNING("%s: selector=%u, but volt <=0\n",
624 return vd->volt_list[i-1];
634 static int vd_regulator_round_volt(struct vd_node *vd, int volt, int flags)
638 if(flags == VD_LIST_RELATION_L)
639 return vd_regulator_round_volt_min(vd, volt);
641 return vd_regulator_round_volt_max(vd, volt);
644 static void dvfs_table_round_volt(struct dvfs_node *clk_dvfs_node)
648 if(!clk_dvfs_node->dvfs_table || !clk_dvfs_node->vd ||
649 IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator))
652 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
654 test_volt = vd_regulator_round_volt(clk_dvfs_node->vd, clk_dvfs_node->dvfs_table[i].index, VD_LIST_RELATION_H);
657 DVFS_WARNING("%s: clk(%s) round volt(%d) but list <=0\n",
658 __func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].index);
661 DVFS_DBG("clk %s:round_volt %d to %d\n",
662 clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].index, test_volt);
664 clk_dvfs_node->dvfs_table[i].index=test_volt;
668 static void dvfs_vd_get_regulator_volt_time_info(struct vd_node *vd)
670 if(vd->volt_time_flag <= 0){// check regulator support get uping vol timer
671 vd->volt_time_flag = dvfs_regulator_set_voltage_time(vd->regulator, vd->cur_volt, vd->cur_volt+200*1000);
672 if(vd->volt_time_flag < 0){
673 DVFS_DBG("%s,vd %s volt_time is no support\n",
677 DVFS_DBG("%s,vd %s volt_time is support,up 200mv need delay %d us\n",
678 __func__, vd->name, vd->volt_time_flag);
683 static void dvfs_vd_get_regulator_mode_info(struct vd_node *vd)
685 //REGULATOR_MODE_FAST
686 if(vd->mode_flag <= 0){// check regulator support get uping vol timer{
687 vd->mode_flag = dvfs_regulator_get_mode(vd->regulator);
688 if(vd->mode_flag==REGULATOR_MODE_FAST || vd->mode_flag==REGULATOR_MODE_NORMAL
689 || vd->mode_flag == REGULATOR_MODE_IDLE || vd->mode_flag==REGULATOR_MODE_STANDBY){
691 if(dvfs_regulator_set_mode(vd->regulator, vd->mode_flag) < 0){
692 vd->mode_flag = 0;// check again
695 if(vd->mode_flag > 0){
696 DVFS_DBG("%s,vd %s mode(now is %d) support\n",
697 __func__, vd->name, vd->mode_flag);
700 DVFS_DBG("%s,vd %s mode is not support now check\n",
707 struct regulator *dvfs_get_regulator(char *regulator_name)
711 mutex_lock(&rk_dvfs_mutex);
712 list_for_each_entry(vd, &rk_dvfs_tree, node) {
713 if (strcmp(regulator_name, vd->regulator_name) == 0) {
714 mutex_unlock(&rk_dvfs_mutex);
715 return vd->regulator;
718 mutex_unlock(&rk_dvfs_mutex);
722 static int dvfs_get_rate_range(struct dvfs_node *clk_dvfs_node)
724 struct cpufreq_frequency_table *table;
730 clk_dvfs_node->min_rate = 0;
731 clk_dvfs_node->max_rate = 0;
733 table = clk_dvfs_node->dvfs_table;
734 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
735 clk_dvfs_node->max_rate = table[i].frequency / 1000 * 1000 * 1000;
737 clk_dvfs_node->min_rate = table[i].frequency / 1000 * 1000 * 1000;
740 DVFS_DBG("%s: clk %s, limit rate [min, max] = [%u, %u]\n",
741 __func__, clk_dvfs_node->name, clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
746 static void dvfs_table_round_clk_rate(struct dvfs_node *clk_dvfs_node)
748 int i, rate, temp_rate, flags;
750 if(!clk_dvfs_node || !clk_dvfs_node->dvfs_table || !clk_dvfs_node->clk)
753 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
754 //ddr rate = real rate+flags
755 flags = clk_dvfs_node->dvfs_table[i].frequency%1000;
756 rate = (clk_dvfs_node->dvfs_table[i].frequency/1000)*1000;
757 temp_rate = __clk_round_rate(clk_dvfs_node->clk, rate*1000);
759 DVFS_WARNING("%s: clk(%s) rate %d round return %d\n",
760 __func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
764 /* Set rate unit as MHZ */
765 if (temp_rate % MHz != 0)
766 temp_rate = (temp_rate / MHz + 1) * MHz;
768 temp_rate = (temp_rate / 1000) + flags;
770 DVFS_DBG("clk %s round_clk_rate %d to %d\n",
771 clk_dvfs_node->name,clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
773 clk_dvfs_node->dvfs_table[i].frequency = temp_rate;
777 static int clk_dvfs_node_get_ref_volt(struct dvfs_node *clk_dvfs_node, int rate_khz,
778 struct cpufreq_frequency_table *clk_fv)
782 if (rate_khz == 0 || !clk_dvfs_node || !clk_dvfs_node->dvfs_table) {
786 clk_fv->frequency = rate_khz;
789 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
790 if (clk_dvfs_node->dvfs_table[i].frequency >= rate_khz) {
791 clk_fv->frequency = clk_dvfs_node->dvfs_table[i].frequency;
792 clk_fv->index = clk_dvfs_node->dvfs_table[i].index;
793 //printk("%s,%s rate=%ukhz(vol=%d)\n",__func__,clk_dvfs_node->name,
794 //clk_fv->frequency, clk_fv->index);
798 clk_fv->frequency = 0;
800 //DVFS_DBG("%s get corresponding voltage error! out of bound\n", clk_dvfs_node->name);
804 static int dvfs_pd_get_newvolt_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
808 if (clk_dvfs_node->enable_count && (clk_dvfs_node->set_volt >= pd->cur_volt)) {
809 return clk_dvfs_node->set_volt;
812 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
813 if (clk_dvfs_node->enable_count)
814 volt_max = max(volt_max, clk_dvfs_node->set_volt);
819 static void dvfs_update_clk_pds_volt(struct dvfs_node *clk_dvfs_node)
826 pd = clk_dvfs_node->pd;
830 pd->cur_volt = dvfs_pd_get_newvolt_byclk(pd, clk_dvfs_node);
833 static int dvfs_vd_get_newvolt_bypd(struct vd_node *vd)
841 list_for_each_entry(pd, &vd->pd_list, node) {
842 volt_max_vd = max(volt_max_vd, pd->cur_volt);
848 static int dvfs_vd_get_newvolt_byclk(struct dvfs_node *clk_dvfs_node)
853 dvfs_update_clk_pds_volt(clk_dvfs_node);
854 return dvfs_vd_get_newvolt_bypd(clk_dvfs_node->vd);
858 static void dvfs_temp_limit_work_func(struct work_struct *work)
860 unsigned long delay = HZ / 10; // 100ms
863 struct dvfs_node *clk_dvfs_node;
865 queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
867 mutex_lock(&rk_dvfs_mutex);
868 list_for_each_entry(vd, &rk_dvfs_tree, node) {
869 mutex_lock(&vd->mutex);
870 list_for_each_entry(pd, &vd->pd_list, node) {
871 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
872 if (clk_dvfs_node->temp_limit_table) {
873 clk_dvfs_node->temp = rockchip_tsadc_get_temp(clk_dvfs_node->temp_channel);
874 clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
878 mutex_unlock(&vd->mutex);
880 mutex_unlock(&rk_dvfs_mutex);
884 static struct cpufreq_frequency_table rk3288v0_arm_pvtm_table[] = {
885 {.frequency = 216000, .index = 4006},
886 {.frequency = 408000, .index = 6518},
887 {.frequency = 600000, .index = 8345},
888 {.frequency = 816000, .index = 11026},
889 {.frequency = 1008000, .index = 12906},
890 {.frequency = 1200000, .index = 15532},
891 {.frequency = 1416000, .index = 18076},
892 {.frequency = 1608000, .index = 21282},
893 {.frequency = CPUFREQ_TABLE_END, .index = 1},
896 static struct pvtm_info rk3288v0_arm_pvtm_info = {
897 .compatible = "rockchip,rk3288",
898 .pvtm_table = rk3288v0_arm_pvtm_table,
899 .channel = ARM_DVFS_CH,
900 .process_version = RK3288_PROCESS_V0,
901 .scan_rate_hz = 216000000,
902 .sample_time_us = 1000,
903 .volt_step_uv = 12500,
904 .delta_pvtm_by_volt = 400,
905 .delta_pvtm_by_temp = 14,
906 .volt_margin_uv = 25000,
907 .min_volt_uv = 850000,
908 .max_volt_uv = 1400000,
911 static struct cpufreq_frequency_table rk3288v1_arm_pvtm_table[] = {
912 {.frequency = 216000, .index = 4710},
913 {.frequency = 408000, .index = 7200},
914 {.frequency = 600000, .index = 9192},
915 {.frequency = 816000, .index = 12560},
916 {.frequency = 1008000, .index = 14741},
917 {.frequency = 1200000, .index = 16886},
918 {.frequency = 1416000, .index = 20081},
919 {.frequency = 1608000, .index = 24061},
920 {.frequency = CPUFREQ_TABLE_END, .index = 1},
923 static struct pvtm_info rk3288v1_arm_pvtm_info = {
924 .compatible = "rockchip,rk3288",
925 .pvtm_table = rk3288v1_arm_pvtm_table,
926 .channel = ARM_DVFS_CH,
927 .process_version = RK3288_PROCESS_V1,
928 .scan_rate_hz = 216000000,
929 .sample_time_us = 1000,
930 .volt_step_uv = 12500,
931 .delta_pvtm_by_volt = 450,
932 .delta_pvtm_by_temp = 7,
933 .volt_margin_uv = 25000,
934 .min_volt_uv = 850000,
935 .max_volt_uv = 1400000,
938 static struct cpufreq_frequency_table rk3288v2_arm_pvtm_table[] = {
939 {.frequency = 216000, .index = 5369},
940 {.frequency = 408000, .index = 6984},
941 {.frequency = 600000, .index = 8771},
942 {.frequency = 816000, .index = 11434},
943 {.frequency = 1008000, .index = 14178},
944 {.frequency = 1200000, .index = 16797},
945 {.frequency = 1416000, .index = 20178},
946 {.frequency = 1608000, .index = 23303},
947 {.frequency = CPUFREQ_TABLE_END, .index = 1},
950 static struct pvtm_info rk3288v2_arm_pvtm_info = {
951 .compatible = "rockchip,rk3288",
952 .pvtm_table = rk3288v2_arm_pvtm_table,
953 .channel = ARM_DVFS_CH,
954 .process_version = RK3288_PROCESS_V2,
955 .scan_rate_hz = 216000000,
956 .sample_time_us = 1000,
957 .volt_step_uv = 12500,
958 .delta_pvtm_by_volt = 430,
959 .delta_pvtm_by_temp = 12,
960 .volt_margin_uv = 25000,
961 .min_volt_uv = 900000,
962 .max_volt_uv = 1400000,
965 static struct pvtm_info *pvtm_info_table[] = {
966 &rk3288v0_arm_pvtm_info,
967 &rk3288v1_arm_pvtm_info,
968 &rk3288v2_arm_pvtm_info
971 static int pvtm_set_single_dvfs(struct dvfs_node *dvfs_node, u32 idx,
972 struct pvtm_info *info, int *pvtm_list,
975 struct cpufreq_frequency_table *dvfs_table = dvfs_node->dvfs_table;
976 struct cpufreq_frequency_table *pvtm_table = dvfs_node->pvtm_table;
977 int target_pvtm, pvtm_margin, volt_margin;
978 unsigned int n_voltages = dvfs_node->vd->n_voltages;
979 int *volt_list = dvfs_node->vd->volt_list;
982 volt_margin = info->volt_margin_uv + pvtm_table[idx].index;
983 n = volt_margin/info->volt_step_uv;
984 if (volt_margin%info->volt_step_uv)
987 pvtm_margin = n*info->delta_pvtm_by_volt;
989 temp = dvfs_get_temp(1);
991 temp = dvfs_get_temp(0);
993 target_pvtm = min_pvtm+temp * info->delta_pvtm_by_temp + pvtm_margin;
995 DVFS_DBG("=====%s: temp:%d, freq:%d, target pvtm:%d=====\n",
996 __func__, temp, dvfs_table[idx].frequency, target_pvtm);
998 for (n = 0; n < n_voltages; n++) {
999 if (pvtm_list[n] >= target_pvtm) {
1000 dvfs_table[idx].index = volt_list[n];
1001 DVFS_DBG("freq[%d]=%d, volt=%d\n",
1002 idx, dvfs_table[idx].frequency, volt_list[n]);
1013 static void pvtm_set_dvfs_table(struct dvfs_node *dvfs_node)
1015 struct cpufreq_frequency_table *dvfs_table = dvfs_node->dvfs_table;
1016 struct pvtm_info *info = dvfs_node->pvtm_info;
1017 struct regulator *regulator = dvfs_node->vd->regulator;
1020 int pvtm_list[VD_VOL_LIST_CNT] = {0};
1021 unsigned int n_voltages = dvfs_node->vd->n_voltages;
1022 int *volt_list = dvfs_node->vd->volt_list;
1027 clk_set_rate(dvfs_node->clk, info->scan_rate_hz);
1028 DVFS_DBG("%s:%lu\n", __func__, clk_get_rate(dvfs_node->clk));
1030 for (i = 0; i < n_voltages; i++) {
1031 if ((volt_list[i] >= info->min_volt_uv) &&
1032 (volt_list[i] <= info->max_volt_uv)) {
1033 regulator_set_voltage(regulator, volt_list[i],
1035 pvtm_list[i] = pvtm_get_value(info->channel,
1036 info->sample_time_us);
1040 for (i = 0; dvfs_table[i].frequency != CPUFREQ_TABLE_END; i++) {
1041 for (j = 0; info->pvtm_table[j].frequency !=
1042 CPUFREQ_TABLE_END; j++)
1043 if (info->pvtm_table[j].frequency >=
1044 dvfs_table[i].frequency) {
1045 int min_pvtm = info->pvtm_table[j].index;
1047 ret = pvtm_set_single_dvfs(dvfs_node,
1056 DVFS_WARNING("freq: %d can not reach target pvtm\n",
1057 dvfs_table[i].frequency);
1061 if (info->pvtm_table[j].frequency == CPUFREQ_TABLE_END) {
1062 DVFS_WARNING("not support freq :%d, max freq is %d\n",
1063 dvfs_table[i].frequency,
1064 info->pvtm_table[j-1].frequency);
1070 static void dvfs_virt_temp_limit_work_func(struct dvfs_node *dvfs_node)
1072 const struct cpufreq_frequency_table *limits_table = NULL;
1073 unsigned int new_temp_limit_rate = -1;
1074 unsigned int nr_cpus = num_online_cpus();
1075 static bool in_perf;
1078 if (!cpu_is_rk312x())
1081 if (rockchip_get_system_status() & SYS_STATUS_PERFORMANCE) {
1083 } else if (in_perf) {
1086 static u64 last_time_in_idle;
1087 static u64 last_time_in_idle_timestamp;
1088 u64 time_in_idle = 0, now;
1091 unsigned cpu, busy_cpus;
1093 for_each_online_cpu(cpu) {
1094 time_in_idle += get_cpu_idle_time_us(cpu, &now);
1096 delta_time = now - last_time_in_idle_timestamp;
1097 delta_idle = time_in_idle - last_time_in_idle;
1098 last_time_in_idle = time_in_idle;
1099 last_time_in_idle_timestamp = now;
1100 delta_idle += delta_time >> 4; /* +6.25% */
1101 if (delta_idle > (nr_cpus - 1)
1102 * delta_time && delta_idle < (nr_cpus + 1) * delta_time)
1104 else if (delta_idle > (nr_cpus - 2) * delta_time)
1106 else if (delta_idle > (nr_cpus - 3) * delta_time)
1111 limits_table = dvfs_node->virt_temp_limit_table[busy_cpus-1];
1112 DVFS_DBG("delta time %6u us idle %6u us %u cpus select table %d\n",
1113 delta_time, delta_idle, nr_cpus, busy_cpus);
1117 new_temp_limit_rate = limits_table[0].frequency;
1118 for (i = 0; limits_table[i].frequency != CPUFREQ_TABLE_END; i++) {
1119 if (dvfs_node->target_temp >=
1120 limits_table[i].index)
1121 new_temp_limit_rate = limits_table[i].frequency;
1125 if (dvfs_node->temp_limit_rate != new_temp_limit_rate) {
1126 dvfs_node->temp_limit_rate = new_temp_limit_rate;
1127 dvfs_clk_set_rate(dvfs_node, dvfs_node->last_set_rate);
1128 DVFS_DBG("temp_limit_rate:%d\n",
1129 (int)dvfs_node->temp_limit_rate);
1133 static void dvfs_temp_limit_performance(struct dvfs_node *dvfs_node, int temp)
1137 dvfs_node->temp_limit_rate = dvfs_node->max_rate;
1138 for (i = 0; dvfs_node->per_temp_limit_table[i].frequency !=
1139 CPUFREQ_TABLE_END; i++) {
1140 if (temp > dvfs_node->per_temp_limit_table[i].index)
1141 dvfs_node->temp_limit_rate =
1142 dvfs_node->per_temp_limit_table[i].frequency;
1144 dvfs_clk_set_rate(dvfs_node, dvfs_node->last_set_rate);
1147 static void dvfs_temp_limit_normal(struct dvfs_node *dvfs_node, int temp)
1150 unsigned long arm_rate_step = 0;
1153 if (temp > dvfs_node->target_temp) {
1154 if (temp > dvfs_node->old_temp) {
1155 delta_temp = temp - dvfs_node->target_temp;
1157 dvfs_node->nor_temp_limit_table[i].frequency !=
1158 CPUFREQ_TABLE_END; i++) {
1160 dvfs_node->nor_temp_limit_table[i].index)
1162 dvfs_node->nor_temp_limit_table[i].frequency;
1164 if (arm_rate_step &&
1165 (dvfs_node->temp_limit_rate > arm_rate_step)) {
1166 dvfs_node->temp_limit_rate -= arm_rate_step;
1167 if (dvfs_node->temp_limit_rate <
1168 dvfs_node->min_temp_limit)
1169 dvfs_node->temp_limit_rate =
1170 dvfs_node->min_temp_limit;
1171 dvfs_clk_set_rate(dvfs_node,
1172 dvfs_node->last_set_rate);
1176 if (dvfs_node->temp_limit_rate < dvfs_node->max_rate) {
1177 delta_temp = dvfs_node->target_temp - temp;
1179 dvfs_node->nor_temp_limit_table[i].frequency !=
1180 CPUFREQ_TABLE_END; i++) {
1182 dvfs_node->nor_temp_limit_table[i].index)
1184 dvfs_node->nor_temp_limit_table[i].frequency;
1187 if (arm_rate_step) {
1188 dvfs_node->temp_limit_rate += arm_rate_step;
1189 if (dvfs_node->temp_limit_rate >
1190 dvfs_node->max_rate)
1191 dvfs_node->temp_limit_rate =
1192 dvfs_node->max_rate;
1193 dvfs_clk_set_rate(dvfs_node,
1194 dvfs_node->last_set_rate);
1200 static void dvfs_temp_limit(struct dvfs_node *dvfs_node, int temp)
1205 delta_temp = (dvfs_node->old_temp > temp) ? (dvfs_node->old_temp-temp) :
1206 (temp-dvfs_node->old_temp);
1207 if (delta_temp <= 1)
1210 if (ROCKCHIP_PM_POLICY_PERFORMANCE == rockchip_pm_get_policy()) {
1211 if (!dvfs_node->per_temp_limit_table)
1213 dvfs_temp_limit_performance(dvfs_node, temp);
1214 } else if (ROCKCHIP_PM_POLICY_NORMAL == rockchip_pm_get_policy()){
1215 if (!dvfs_node->nor_temp_limit_table)
1217 dvfs_temp_limit_normal(dvfs_node, temp);
1219 dvfs_node->old_temp = temp;
1220 DVFS_DBG("cur temp: %d, temp_limit_core_rate: %lu\n",
1221 temp, dvfs_node->temp_limit_rate);
1224 static void dvfs_temp_limit_work_func(struct work_struct *work)
1226 unsigned long delay = HZ/10;
1227 int temp = INVALID_TEMP;
1229 queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
1231 if (clk_cpu_b_dvfs_node &&
1232 clk_cpu_b_dvfs_node->temp_limit_enable == 1) {
1233 temp = dvfs_get_temp(0);
1234 if (temp != INVALID_TEMP)
1235 dvfs_temp_limit(clk_cpu_b_dvfs_node, temp);
1237 if (clk_cpu_l_dvfs_node &&
1238 clk_cpu_l_dvfs_node->temp_limit_enable == 1) {
1239 if (temp == INVALID_TEMP)
1240 temp = dvfs_get_temp(0);
1241 if (temp != INVALID_TEMP)
1242 dvfs_temp_limit(clk_cpu_l_dvfs_node, temp);
1244 if (clk_cpu_dvfs_node &&
1245 clk_cpu_dvfs_node->temp_limit_enable == 1) {
1246 temp = dvfs_get_temp(1);
1247 if (temp == INVALID_TEMP)
1248 dvfs_virt_temp_limit_work_func(clk_cpu_dvfs_node);
1250 dvfs_temp_limit(clk_cpu_dvfs_node, temp);
1252 if (clk_gpu_dvfs_node &&
1253 clk_gpu_dvfs_node->temp_limit_enable == 1) {
1254 temp = dvfs_get_temp(2);
1255 if (temp != INVALID_TEMP)
1256 dvfs_temp_limit(clk_gpu_dvfs_node, temp);
1259 static DECLARE_DELAYED_WORK(dvfs_temp_limit_work, dvfs_temp_limit_work_func);
1261 int dvfs_clk_enable_limit(struct dvfs_node *clk_dvfs_node, unsigned int min_rate, unsigned int max_rate)
1263 u32 rate = 0, ret = 0;
1265 if (!clk_dvfs_node || (min_rate > max_rate))
1268 if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
1269 mutex_lock(&clk_dvfs_node->vd->mutex);
1271 /* To reset clk_dvfs_node->min_rate/max_rate */
1272 dvfs_get_rate_range(clk_dvfs_node);
1273 clk_dvfs_node->freq_limit_en = 1;
1275 if ((min_rate >= clk_dvfs_node->min_rate) && (min_rate <= clk_dvfs_node->max_rate)) {
1276 clk_dvfs_node->min_rate = min_rate;
1279 if ((max_rate >= clk_dvfs_node->min_rate) && (max_rate <= clk_dvfs_node->max_rate)) {
1280 clk_dvfs_node->max_rate = max_rate;
1283 if (clk_dvfs_node->last_set_rate == 0)
1284 rate = __clk_get_rate(clk_dvfs_node->clk);
1286 rate = clk_dvfs_node->last_set_rate;
1287 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
1289 mutex_unlock(&clk_dvfs_node->vd->mutex);
1293 DVFS_DBG("%s:clk(%s) last_set_rate=%lu; [min_rate, max_rate]=[%u, %u]\n",
1294 __func__, __clk_get_name(clk_dvfs_node->clk),
1295 clk_dvfs_node->last_set_rate,
1296 clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
1300 EXPORT_SYMBOL(dvfs_clk_enable_limit);
1302 int dvfs_clk_disable_limit(struct dvfs_node *clk_dvfs_node)
1309 if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
1310 mutex_lock(&clk_dvfs_node->vd->mutex);
1312 /* To reset clk_dvfs_node->min_rate/max_rate */
1313 dvfs_get_rate_range(clk_dvfs_node);
1314 clk_dvfs_node->freq_limit_en = 0;
1315 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
1317 mutex_unlock(&clk_dvfs_node->vd->mutex);
1320 DVFS_DBG("%s: clk(%s) last_set_rate=%lu; [min_rate, max_rate]=[%u, %u]\n",
1321 __func__, __clk_get_name(clk_dvfs_node->clk),
1322 clk_dvfs_node->last_set_rate,
1323 clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
1327 EXPORT_SYMBOL(dvfs_clk_disable_limit);
1329 void dvfs_disable_temp_limit(void) {
1330 if (clk_cpu_b_dvfs_node)
1331 clk_cpu_b_dvfs_node->temp_limit_enable = 0;
1332 if (clk_cpu_l_dvfs_node)
1333 clk_cpu_l_dvfs_node->temp_limit_enable = 0;
1334 if (clk_cpu_dvfs_node)
1335 clk_cpu_dvfs_node->temp_limit_enable = 0;
1336 if (clk_gpu_dvfs_node)
1337 clk_gpu_dvfs_node->temp_limit_enable = 0;
1338 cancel_delayed_work_sync(&dvfs_temp_limit_work);
1341 int dvfs_clk_get_limit(struct dvfs_node *clk_dvfs_node, unsigned int *min_rate, unsigned int *max_rate)
1348 mutex_lock(&clk_dvfs_node->vd->mutex);
1350 *min_rate = clk_dvfs_node->min_rate;
1351 *max_rate = clk_dvfs_node->max_rate;
1352 freq_limit_en = clk_dvfs_node->freq_limit_en;
1354 mutex_unlock(&clk_dvfs_node->vd->mutex);
1356 return freq_limit_en;
1358 EXPORT_SYMBOL(dvfs_clk_get_limit);
1360 int dvfs_clk_register_set_rate_callback(struct dvfs_node *clk_dvfs_node, clk_set_rate_callback clk_dvfs_target)
1365 mutex_lock(&clk_dvfs_node->vd->mutex);
1366 clk_dvfs_node->clk_dvfs_target = clk_dvfs_target;
1367 mutex_unlock(&clk_dvfs_node->vd->mutex);
1371 EXPORT_SYMBOL(dvfs_clk_register_set_rate_callback);
1373 struct cpufreq_frequency_table *dvfs_get_freq_volt_table(struct dvfs_node *clk_dvfs_node)
1375 struct cpufreq_frequency_table *table;
1380 mutex_lock(&clk_dvfs_node->vd->mutex);
1381 table = clk_dvfs_node->dvfs_table;
1382 mutex_unlock(&clk_dvfs_node->vd->mutex);
1386 EXPORT_SYMBOL(dvfs_get_freq_volt_table);
1388 int dvfs_set_freq_volt_table(struct dvfs_node *clk_dvfs_node, struct cpufreq_frequency_table *table)
1393 if (IS_ERR_OR_NULL(table)){
1394 DVFS_ERR("%s:invalid table!\n", __func__);
1398 mutex_lock(&clk_dvfs_node->vd->mutex);
1399 clk_dvfs_node->dvfs_table = table;
1400 dvfs_get_rate_range(clk_dvfs_node);
1401 dvfs_table_round_clk_rate(clk_dvfs_node);
1402 dvfs_table_round_volt(clk_dvfs_node);
1403 mutex_unlock(&clk_dvfs_node->vd->mutex);
1407 EXPORT_SYMBOL(dvfs_set_freq_volt_table);
1409 static int get_adjust_volt_by_leakage(struct dvfs_node *dvfs_node)
1412 int delta_leakage = 0;
1414 int adjust_volt = 0;
1419 if (dvfs_node->lkg_info.def_table_lkg == -1)
1422 leakage = rockchip_get_leakage(dvfs_node->channel);
1423 if (!leakage || (leakage == 0xff))
1426 delta_leakage = leakage - dvfs_node->lkg_info.def_table_lkg;
1427 if (delta_leakage <= 0) {
1428 for (i = 0; (dvfs_node->lkg_info.table[i].dlt_volt !=
1429 CPUFREQ_TABLE_END); i++) {
1430 if (leakage > dvfs_node->lkg_info.table[i].lkg) {
1432 dvfs_node->lkg_info.table[i].dlt_volt;
1437 } else if (delta_leakage > 0) {
1438 for (i = 0; (dvfs_node->lkg_info.table[i].dlt_volt !=
1439 CPUFREQ_TABLE_END); i++) {
1440 if (leakage <= dvfs_node->lkg_info.table[i].lkg) {
1442 -dvfs_node->lkg_info.table[i].dlt_volt;
1450 static void adjust_table_by_leakage(struct dvfs_node *dvfs_node)
1452 int i, adjust_volt = get_adjust_volt_by_leakage(dvfs_node);
1457 if (!dvfs_node->dvfs_table)
1460 if (dvfs_node->lkg_info.min_adjust_freq == -1)
1464 (dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
1465 if (dvfs_node->dvfs_table[i].frequency >=
1466 dvfs_node->lkg_info.min_adjust_freq)
1467 dvfs_node->dvfs_table[i].index += adjust_volt;
1471 int clk_enable_dvfs(struct dvfs_node *clk_dvfs_node)
1473 struct cpufreq_frequency_table clk_fv;
1481 DVFS_DBG("%s: dvfs clk(%s) enable dvfs!\n",
1482 __func__, __clk_get_name(clk_dvfs_node->clk));
1484 if (!clk_dvfs_node->vd) {
1485 DVFS_ERR("%s: dvfs node(%s) has no vd node!\n",
1486 __func__, clk_dvfs_node->name);
1489 mutex_lock(&clk_dvfs_node->vd->mutex);
1490 if (clk_dvfs_node->enable_count == 0) {
1491 if (IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator)) {
1492 if (clk_dvfs_node->vd->regulator_name)
1493 clk_dvfs_node->vd->regulator = dvfs_regulator_get(NULL, clk_dvfs_node->vd->regulator_name);
1494 if (!IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator)) {
1495 DVFS_DBG("%s: vd(%s) get regulator(%s) ok\n",
1496 __func__, clk_dvfs_node->vd->name, clk_dvfs_node->vd->regulator_name);
1497 clk_enable_dvfs_regulator_check(clk_dvfs_node->vd);
1498 dvfs_get_vd_regulator_volt_list(clk_dvfs_node->vd);
1499 dvfs_vd_get_regulator_volt_time_info(clk_dvfs_node->vd);
1501 clk_dvfs_node->enable_count = 0;
1502 DVFS_ERR("%s: vd(%s) can't get regulator(%s)!\n",
1503 __func__, clk_dvfs_node->vd->name, clk_dvfs_node->vd->regulator_name);
1504 mutex_unlock(&clk_dvfs_node->vd->mutex);
1508 clk_enable_dvfs_regulator_check(clk_dvfs_node->vd);
1511 DVFS_DBG("%s: vd(%s) cur volt=%d\n",
1512 __func__, clk_dvfs_node->name, clk_dvfs_node->vd->cur_volt);
1514 dvfs_table_round_clk_rate(clk_dvfs_node);
1515 dvfs_get_rate_range(clk_dvfs_node);
1516 clk_dvfs_node->freq_limit_en = 1;
1517 if (clk_dvfs_node->lkg_adjust_volt_en)
1518 adjust_table_by_leakage(clk_dvfs_node);
1519 if (clk_dvfs_node->support_pvtm)
1520 pvtm_set_dvfs_table(clk_dvfs_node);
1521 dvfs_table_round_volt(clk_dvfs_node);
1522 clk_dvfs_node->set_freq = clk_dvfs_node_get_rate_kz(clk_dvfs_node->clk);
1523 clk_dvfs_node->last_set_rate = clk_dvfs_node->set_freq*1000;
1525 DVFS_DBG("%s: %s get freq %u!\n",
1526 __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1528 if (clk_dvfs_node_get_ref_volt(clk_dvfs_node, clk_dvfs_node->set_freq, &clk_fv)) {
1529 if (clk_dvfs_node->dvfs_table[0].frequency == CPUFREQ_TABLE_END) {
1530 DVFS_ERR("%s: table empty\n", __func__);
1531 clk_dvfs_node->enable_count = 0;
1532 mutex_unlock(&clk_dvfs_node->vd->mutex);
1535 DVFS_WARNING("%s: clk(%s) freq table all value are smaller than default(%d), use default, just enable dvfs\n",
1536 __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1537 clk_dvfs_node->enable_count++;
1538 mutex_unlock(&clk_dvfs_node->vd->mutex);
1542 clk_dvfs_node->enable_count++;
1543 clk_dvfs_node->set_volt = clk_fv.index;
1544 volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1545 DVFS_DBG("%s: %s, freq %u(ref vol %u)\n",
1546 __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt);
1548 if (clk_dvfs_node->dvfs_nb) {
1549 // must unregister when clk disable
1550 clk_notifier_register(clk, clk_dvfs_node->dvfs_nb);
1553 if(clk_dvfs_node->vd->cur_volt != volt_new) {
1554 ret = dvfs_regulator_set_voltage_readback(clk_dvfs_node->vd->regulator, volt_new, volt_new);
1555 dvfs_volt_up_delay(clk_dvfs_node->vd,volt_new, clk_dvfs_node->vd->cur_volt);
1557 clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
1558 clk_dvfs_node->enable_count = 0;
1559 DVFS_ERR("dvfs enable clk %s,set volt error \n", clk_dvfs_node->name);
1560 mutex_unlock(&clk_dvfs_node->vd->mutex);
1563 clk_dvfs_node->vd->cur_volt = volt_new;
1564 clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
1568 DVFS_DBG("%s: dvfs already enable clk enable = %d!\n",
1569 __func__, clk_dvfs_node->enable_count);
1570 clk_dvfs_node->enable_count++;
1573 if (clk_dvfs_node->regu_mode_en) {
1574 ret = dvfs_regu_mode_table_constrain(clk_dvfs_node);
1576 DVFS_ERR("%s: clk(%s) regu_mode_table is unvalid, set regu_mode_en=0!\n",
1577 __func__, clk_dvfs_node->name);
1578 clk_dvfs_node->regu_mode_en = 0;
1579 mutex_unlock(&clk_dvfs_node->vd->mutex);
1583 ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, clk_dvfs_node->set_freq*1000, &mode);
1585 DVFS_ERR("%s: clk(%s) rate %dKhz get regu_mode fail\n",
1586 __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1587 mutex_unlock(&clk_dvfs_node->vd->mutex);
1590 clk_dvfs_node->regu_mode = mode;
1592 dvfs_update_clk_pds_mode(clk_dvfs_node);
1595 mutex_unlock(&clk_dvfs_node->vd->mutex);
1599 EXPORT_SYMBOL(clk_enable_dvfs);
1601 int clk_disable_dvfs(struct dvfs_node *clk_dvfs_node)
1608 DVFS_DBG("%s:dvfs clk(%s) disable dvfs!\n",
1609 __func__, __clk_get_name(clk_dvfs_node->clk));
1611 mutex_lock(&clk_dvfs_node->vd->mutex);
1612 if (!clk_dvfs_node->enable_count) {
1613 DVFS_WARNING("%s:clk(%s) is already closed!\n",
1614 __func__, __clk_get_name(clk_dvfs_node->clk));
1615 mutex_unlock(&clk_dvfs_node->vd->mutex);
1618 clk_dvfs_node->enable_count--;
1619 if (0 == clk_dvfs_node->enable_count) {
1620 DVFS_DBG("%s:dvfs clk(%s) disable dvfs ok!\n",
1621 __func__, __clk_get_name(clk_dvfs_node->clk));
1622 volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1623 dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1626 clk_notifier_unregister(clk, clk_dvfs_node->dvfs_nb);
1627 DVFS_DBG("clk unregister nb!\n");
1631 mutex_unlock(&clk_dvfs_node->vd->mutex);
1634 EXPORT_SYMBOL(clk_disable_dvfs);
1636 static unsigned long dvfs_get_limit_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1638 unsigned long limit_rate;
1641 if (clk_dvfs_node->freq_limit_en) {
1643 if (rate < clk_dvfs_node->min_rate) {
1644 limit_rate = clk_dvfs_node->min_rate;
1645 } else if (rate > clk_dvfs_node->max_rate) {
1646 limit_rate = clk_dvfs_node->max_rate;
1648 if (clk_dvfs_node->temp_limit_enable) {
1649 if (limit_rate > clk_dvfs_node->temp_limit_rate) {
1650 limit_rate = clk_dvfs_node->temp_limit_rate;
1655 DVFS_DBG("%s: rate:%ld, limit_rate:%ld,\n", __func__, rate, limit_rate);
1660 static int dvfs_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1662 struct cpufreq_frequency_table clk_fv;
1663 unsigned long old_rate = 0, new_rate = 0, volt_new = 0, clk_volt_store = 0;
1664 struct clk *clk = clk_dvfs_node->clk;
1670 if (!clk_dvfs_node->enable_count)
1673 if (clk_dvfs_node->vd->volt_set_flag == DVFS_SET_VOLT_FAILURE) {
1674 /* It means the last time set voltage error */
1675 ret = dvfs_reset_volt(clk_dvfs_node->vd);
1681 rate = dvfs_get_limit_rate(clk_dvfs_node, rate);
1682 new_rate = __clk_round_rate(clk, rate);
1683 old_rate = __clk_get_rate(clk);
1684 if (new_rate == old_rate)
1687 DVFS_DBG("enter %s: clk(%s) new_rate = %lu Hz, old_rate = %lu Hz\n",
1688 __func__, clk_dvfs_node->name, rate, old_rate);
1690 /* find the clk corresponding voltage */
1691 ret = clk_dvfs_node_get_ref_volt(clk_dvfs_node, new_rate / 1000, &clk_fv);
1693 DVFS_ERR("%s:dvfs clk(%s) rate %luhz is not support\n",
1694 __func__, clk_dvfs_node->name, new_rate);
1697 clk_volt_store = clk_dvfs_node->set_volt;
1698 clk_dvfs_node->set_volt = clk_fv.index;
1699 volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1700 DVFS_DBG("%s:%s new rate=%lu(was=%lu),new volt=%lu,(was=%d)\n",
1701 __func__, clk_dvfs_node->name, new_rate, old_rate, volt_new,clk_dvfs_node->vd->cur_volt);
1704 /* if up the rate */
1705 if (new_rate > old_rate) {
1706 ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
1708 DVFS_ERR("%s: dvfs clk(%s) rate %luhz set mode err\n",
1709 __func__, clk_dvfs_node->name, new_rate);
1711 ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1713 goto fail_roll_back;
1717 if (clk_dvfs_node->clk_dvfs_target) {
1718 ret = clk_dvfs_node->clk_dvfs_target(clk, rate);
1720 ret = clk_set_rate(clk, rate);
1724 DVFS_ERR("%s:clk(%s) set rate err\n",
1725 __func__, __clk_get_name(clk));
1726 goto fail_roll_back;
1728 clk_dvfs_node->set_freq = new_rate / 1000;
1730 DVFS_DBG("%s:dvfs clk(%s) set rate %lu ok\n",
1731 __func__, clk_dvfs_node->name, __clk_get_rate(clk));
1733 /* if down the rate */
1734 if (new_rate < old_rate) {
1735 ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1739 ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
1741 DVFS_ERR("%s:dvfs clk(%s) rate %luhz set mode err\n",
1742 __func__, clk_dvfs_node->name, new_rate);
1747 clk_dvfs_node->set_volt = clk_volt_store;
1752 unsigned long dvfs_clk_round_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1754 return __clk_round_rate(clk_dvfs_node->clk, rate);
1756 EXPORT_SYMBOL_GPL(dvfs_clk_round_rate);
1758 unsigned long dvfs_clk_get_rate(struct dvfs_node *clk_dvfs_node)
1760 return __clk_get_rate(clk_dvfs_node->clk);
1762 EXPORT_SYMBOL_GPL(dvfs_clk_get_rate);
1764 unsigned long dvfs_clk_get_last_set_rate(struct dvfs_node *clk_dvfs_node)
1766 unsigned long last_set_rate;
1768 mutex_lock(&clk_dvfs_node->vd->mutex);
1769 last_set_rate = clk_dvfs_node->last_set_rate;
1770 mutex_unlock(&clk_dvfs_node->vd->mutex);
1772 return last_set_rate;
1774 EXPORT_SYMBOL_GPL(dvfs_clk_get_last_set_rate);
1777 int dvfs_clk_enable(struct dvfs_node *clk_dvfs_node)
1779 return clk_enable(clk_dvfs_node->clk);
1781 EXPORT_SYMBOL_GPL(dvfs_clk_enable);
1783 void dvfs_clk_disable(struct dvfs_node *clk_dvfs_node)
1785 return clk_disable(clk_dvfs_node->clk);
1787 EXPORT_SYMBOL_GPL(dvfs_clk_disable);
1789 struct dvfs_node *clk_get_dvfs_node(char *clk_name)
1793 struct dvfs_node *clk_dvfs_node;
1795 mutex_lock(&rk_dvfs_mutex);
1796 list_for_each_entry(vd, &rk_dvfs_tree, node) {
1797 mutex_lock(&vd->mutex);
1798 list_for_each_entry(pd, &vd->pd_list, node) {
1799 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
1800 if (0 == strcmp(clk_dvfs_node->name, clk_name)) {
1801 mutex_unlock(&vd->mutex);
1802 mutex_unlock(&rk_dvfs_mutex);
1803 return clk_dvfs_node;
1807 mutex_unlock(&vd->mutex);
1809 mutex_unlock(&rk_dvfs_mutex);
1813 EXPORT_SYMBOL_GPL(clk_get_dvfs_node);
1815 void clk_put_dvfs_node(struct dvfs_node *clk_dvfs_node)
1819 EXPORT_SYMBOL_GPL(clk_put_dvfs_node);
1821 int dvfs_clk_prepare_enable(struct dvfs_node *clk_dvfs_node)
1823 return clk_prepare_enable(clk_dvfs_node->clk);
1825 EXPORT_SYMBOL_GPL(dvfs_clk_prepare_enable);
1828 void dvfs_clk_disable_unprepare(struct dvfs_node *clk_dvfs_node)
1830 clk_disable_unprepare(clk_dvfs_node->clk);
1832 EXPORT_SYMBOL_GPL(dvfs_clk_disable_unprepare);
1834 int dvfs_clk_set_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1841 DVFS_DBG("%s:dvfs node(%s) set rate(%lu)\n",
1842 __func__, clk_dvfs_node->name, rate);
1844 #if 0 // judge by reference func in rk
1845 if (dvfs_support_clk_set_rate(dvfs_info)==false) {
1846 DVFS_ERR("dvfs func:%s is not support!\n", __func__);
1851 if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target) {
1852 mutex_lock(&clk_dvfs_node->vd->mutex);
1853 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
1854 clk_dvfs_node->last_set_rate = rate;
1855 mutex_unlock(&clk_dvfs_node->vd->mutex);
1857 DVFS_ERR("%s:dvfs node(%s) has no vd node or target callback!\n",
1858 __func__, clk_dvfs_node->name);
1863 EXPORT_SYMBOL_GPL(dvfs_clk_set_rate);
1866 int rk_regist_vd(struct vd_node *vd)
1872 vd->volt_time_flag=0;
1874 INIT_LIST_HEAD(&vd->pd_list);
1875 mutex_lock(&rk_dvfs_mutex);
1876 list_add(&vd->node, &rk_dvfs_tree);
1877 mutex_unlock(&rk_dvfs_mutex);
1881 EXPORT_SYMBOL_GPL(rk_regist_vd);
1883 int rk_regist_pd(struct pd_node *pd)
1894 INIT_LIST_HEAD(&pd->clk_list);
1895 mutex_lock(&vd->mutex);
1896 list_add(&pd->node, &vd->pd_list);
1897 mutex_unlock(&vd->mutex);
1901 EXPORT_SYMBOL_GPL(rk_regist_pd);
1903 int rk_regist_clk(struct dvfs_node *clk_dvfs_node)
1911 vd = clk_dvfs_node->vd;
1912 pd = clk_dvfs_node->pd;
1916 mutex_lock(&vd->mutex);
1917 list_add(&clk_dvfs_node->node, &pd->clk_list);
1918 mutex_unlock(&vd->mutex);
1922 EXPORT_SYMBOL_GPL(rk_regist_clk);
1924 static struct cpufreq_frequency_table *of_get_temp_limit_table(struct device_node *dev_node, const char *propname)
1926 struct cpufreq_frequency_table *temp_limt_table = NULL;
1927 const struct property *prop;
1931 prop = of_find_property(dev_node, propname, NULL);
1937 nr = prop->length / sizeof(u32);
1939 pr_err("%s: Invalid freq list\n", __func__);
1943 temp_limt_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
1944 (nr/2 + 1), GFP_KERNEL);
1948 for (i=0; i<nr/2; i++){
1949 temp_limt_table[i].index = be32_to_cpup(val++);
1950 temp_limt_table[i].frequency = be32_to_cpup(val++) * 1000;
1953 temp_limt_table[i].index = 0;
1954 temp_limt_table[i].frequency = CPUFREQ_TABLE_END;
1956 return temp_limt_table;
1960 static int of_get_dvfs_table(struct device_node *dev_node,
1961 struct cpufreq_frequency_table **dvfs_table)
1963 struct cpufreq_frequency_table *tmp_dvfs_table = NULL;
1964 const struct property *prop;
1968 prop = of_find_property(dev_node, "operating-points", NULL);
1974 nr = prop->length / sizeof(u32);
1976 pr_err("%s: Invalid freq list\n", __func__);
1980 tmp_dvfs_table = kzalloc(sizeof(*tmp_dvfs_table) *
1981 (nr/2 + 1), GFP_KERNEL);
1984 for (i = 0; i < nr/2; i++) {
1985 tmp_dvfs_table[i].frequency = be32_to_cpup(val++);
1986 tmp_dvfs_table[i].index = be32_to_cpup(val++);
1989 tmp_dvfs_table[i].index = 0;
1990 tmp_dvfs_table[i].frequency = CPUFREQ_TABLE_END;
1992 *dvfs_table = tmp_dvfs_table;
1998 static int of_get_dvfs_pvtm_table(struct device_node *dev_node,
1999 struct cpufreq_frequency_table **dvfs_table,
2000 struct cpufreq_frequency_table **pvtm_table)
2002 struct cpufreq_frequency_table *tmp_dvfs_table = NULL;
2003 struct cpufreq_frequency_table *tmp_pvtm_table = NULL;
2004 const struct property *prop;
2008 prop = of_find_property(dev_node, "pvtm-operating-points", NULL);
2014 nr = prop->length / sizeof(u32);
2016 pr_err("%s: Invalid freq list\n", __func__);
2020 tmp_dvfs_table = kzalloc(sizeof(*tmp_dvfs_table) *
2021 (nr/3 + 1), GFP_KERNEL);
2023 tmp_pvtm_table = kzalloc(sizeof(*tmp_pvtm_table) *
2024 (nr/3 + 1), GFP_KERNEL);
2028 for (i = 0; i < nr/3; i++) {
2029 tmp_dvfs_table[i].frequency = be32_to_cpup(val++);
2030 tmp_dvfs_table[i].index = be32_to_cpup(val++);
2032 tmp_pvtm_table[i].frequency = tmp_dvfs_table[i].frequency;
2033 tmp_pvtm_table[i].index = be32_to_cpup(val++);
2036 tmp_dvfs_table[i].index = 0;
2037 tmp_dvfs_table[i].frequency = CPUFREQ_TABLE_END;
2039 tmp_pvtm_table[i].index = 0;
2040 tmp_pvtm_table[i].frequency = CPUFREQ_TABLE_END;
2042 *dvfs_table = tmp_dvfs_table;
2043 *pvtm_table = tmp_pvtm_table;
2048 static struct lkg_adjust_volt_table
2049 *of_get_lkg_adjust_volt_table(struct device_node *np,
2050 const char *propname)
2052 struct lkg_adjust_volt_table *lkg_adjust_volt_table = NULL;
2053 const struct property *prop;
2057 prop = of_find_property(np, propname, NULL);
2063 nr = prop->length / sizeof(s32);
2065 pr_err("%s: Invalid freq list\n", __func__);
2069 lkg_adjust_volt_table =
2070 kzalloc(sizeof(struct lkg_adjust_volt_table) *
2071 (nr/2 + 1), GFP_KERNEL);
2075 for (i = 0; i < nr/2; i++) {
2076 lkg_adjust_volt_table[i].lkg = be32_to_cpup(val++);
2077 lkg_adjust_volt_table[i].dlt_volt = be32_to_cpup(val++);
2080 lkg_adjust_volt_table[i].lkg = 0;
2081 lkg_adjust_volt_table[i].dlt_volt = CPUFREQ_TABLE_END;
2083 return lkg_adjust_volt_table;
2086 static int dvfs_node_parse_dt(struct device_node *np,
2087 struct dvfs_node *dvfs_node)
2089 int process_version = rockchip_process_version();
2093 of_property_read_u32_index(np, "channel", 0, &dvfs_node->channel);
2095 pr_info("channel:%d, lkg:%d\n",
2096 dvfs_node->channel, rockchip_get_leakage(dvfs_node->channel));
2098 of_property_read_u32_index(np, "regu-mode-en", 0,
2099 &dvfs_node->regu_mode_en);
2100 if (dvfs_node->regu_mode_en)
2101 dvfs_node->regu_mode_table = of_get_regu_mode_table(np);
2103 dvfs_node->regu_mode_table = NULL;
2105 of_property_read_u32_index(np, "temp-limit-enable", 0,
2106 &dvfs_node->temp_limit_enable);
2107 if (dvfs_node->temp_limit_enable) {
2108 of_property_read_u32_index(np, "min_temp_limit",
2109 0, &dvfs_node->min_temp_limit);
2110 of_property_read_u32_index(np, "target-temp",
2111 0, &dvfs_node->target_temp);
2112 pr_info("target-temp:%d\n", dvfs_node->target_temp);
2113 dvfs_node->nor_temp_limit_table =
2114 of_get_temp_limit_table(np,
2115 "normal-temp-limit");
2116 dvfs_node->per_temp_limit_table =
2117 of_get_temp_limit_table(np,
2118 "performance-temp-limit");
2119 dvfs_node->virt_temp_limit_table[0] =
2120 of_get_temp_limit_table(np,
2121 "virt-temp-limit-1-cpu-busy");
2122 dvfs_node->virt_temp_limit_table[1] =
2123 of_get_temp_limit_table(np,
2124 "virt-temp-limit-2-cpu-busy");
2125 dvfs_node->virt_temp_limit_table[2] =
2126 of_get_temp_limit_table(np,
2127 "virt-temp-limit-3-cpu-busy");
2128 dvfs_node->virt_temp_limit_table[3] =
2129 of_get_temp_limit_table(np,
2130 "virt-temp-limit-4-cpu-busy");
2132 dvfs_node->temp_limit_rate = -1;
2134 ret = of_property_read_u32_index(np, "support-pvtm", 0,
2135 &dvfs_node->support_pvtm);
2137 if (of_get_dvfs_pvtm_table(np, &dvfs_node->dvfs_table,
2138 &dvfs_node->pvtm_table))
2141 for (i = 0; i < ARRAY_SIZE(pvtm_info_table); i++) {
2142 struct pvtm_info *pvtm_info = pvtm_info_table[i];
2144 if ((pvtm_info->channel == dvfs_node->channel) &&
2145 (pvtm_info->process_version == process_version) &&
2146 of_machine_is_compatible(pvtm_info->compatible)) {
2147 dvfs_node->pvtm_info = pvtm_info;
2152 if (!dvfs_node->pvtm_info)
2153 dvfs_node->support_pvtm = 0;
2155 if (of_get_dvfs_table(np, &dvfs_node->dvfs_table))
2159 of_property_read_u32_index(np, "lkg_adjust_volt_en", 0,
2160 &dvfs_node->lkg_adjust_volt_en);
2161 if (dvfs_node->lkg_adjust_volt_en) {
2162 dvfs_node->lkg_info.def_table_lkg = -1;
2163 of_property_read_u32_index(np, "def_table_lkg", 0,
2164 &dvfs_node->lkg_info.def_table_lkg);
2166 dvfs_node->lkg_info.min_adjust_freq = -1;
2167 of_property_read_u32_index(np, "min_adjust_freq", 0,
2168 &dvfs_node->lkg_info.min_adjust_freq
2171 dvfs_node->lkg_info.table =
2172 of_get_lkg_adjust_volt_table(np,
2173 "lkg_adjust_volt_table");
2179 int of_dvfs_init(void)
2183 struct device_node *dvfs_dev_node, *clk_dev_node, *vd_dev_node, *pd_dev_node;
2184 struct dvfs_node *dvfs_node;
2188 DVFS_DBG("%s\n", __func__);
2189 pr_info("process version: %d\n", rockchip_process_version());
2191 dvfs_dev_node = of_find_node_by_name(NULL, "dvfs");
2192 if (IS_ERR_OR_NULL(dvfs_dev_node)) {
2193 DVFS_ERR("%s get dvfs dev node err\n", __func__);
2194 return PTR_ERR(dvfs_dev_node);
2197 for_each_available_child_of_node(dvfs_dev_node, vd_dev_node) {
2198 vd = kzalloc(sizeof(struct vd_node), GFP_KERNEL);
2202 mutex_init(&vd->mutex);
2203 vd->name = vd_dev_node->name;
2204 ret = of_property_read_string(vd_dev_node, "regulator_name", &vd->regulator_name);
2206 DVFS_ERR("%s:vd(%s) get regulator_name err, ret:%d\n",
2207 __func__, vd_dev_node->name, ret);
2212 vd->suspend_volt = 0;
2214 vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
2215 vd->vd_dvfs_target = dvfs_target;
2216 ret = rk_regist_vd(vd);
2218 DVFS_ERR("%s:vd(%s) register err:%d\n", __func__, vd->name, ret);
2223 DVFS_DBG("%s:vd(%s) register ok, regulator name:%s,suspend volt:%d\n",
2224 __func__, vd->name, vd->regulator_name, vd->suspend_volt);
2226 for_each_available_child_of_node(vd_dev_node, pd_dev_node) {
2227 pd = kzalloc(sizeof(struct pd_node), GFP_KERNEL);
2232 pd->name = pd_dev_node->name;
2234 ret = rk_regist_pd(pd);
2236 DVFS_ERR("%s:pd(%s) register err:%d\n", __func__, pd->name, ret);
2240 DVFS_DBG("%s:pd(%s) register ok, parent vd:%s\n",
2241 __func__, pd->name, vd->name);
2242 for_each_available_child_of_node(pd_dev_node, clk_dev_node) {
2243 if (!of_device_is_available(clk_dev_node))
2246 dvfs_node = kzalloc(sizeof(struct dvfs_node), GFP_KERNEL);
2250 dvfs_node->name = clk_dev_node->name;
2254 if (dvfs_node_parse_dt(clk_dev_node, dvfs_node))
2257 clk = clk_get(NULL, clk_dev_node->name);
2259 DVFS_ERR("%s:get clk(%s) err:%ld\n", __func__, dvfs_node->name, PTR_ERR(clk));
2265 dvfs_node->clk = clk;
2266 ret = rk_regist_clk(dvfs_node);
2268 DVFS_ERR("%s:dvfs_node(%s) register err:%d\n", __func__, dvfs_node->name, ret);
2272 DVFS_DBG("%s:dvfs_node(%s) register ok, parent pd:%s\n",
2273 __func__, clk_dev_node->name, pd->name);
2282 arch_initcall_sync(of_dvfs_init);
2285 /*********************************************************************************/
2287 * dump_dbg_map() : Draw all informations of dvfs while debug
2289 static int dump_dbg_map(char *buf)
2294 struct dvfs_node *clk_dvfs_node;
2297 mutex_lock(&rk_dvfs_mutex);
2298 printk( "-------------DVFS TREE-----------\n\n\n");
2299 printk( "DVFS TREE:\n");
2301 list_for_each_entry(vd, &rk_dvfs_tree, node) {
2302 mutex_lock(&vd->mutex);
2303 printk( "|\n|- voltage domain:%s\n", vd->name);
2304 printk( "|- current voltage:%d\n", vd->cur_volt);
2305 printk( "|- current regu_mode:%s\n", dvfs_regu_mode_to_string(vd->regu_mode));
2307 list_for_each_entry(pd, &vd->pd_list, node) {
2308 printk( "| |\n| |- power domain:%s, status = %s, current volt = %d, current regu_mode = %s\n",
2309 pd->name, (pd->pd_status == 1) ? "ON" : "OFF", pd->cur_volt,
2310 dvfs_regu_mode_to_string(pd->regu_mode));
2312 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
2313 printk( "| | |\n| | |- clock: %s current: rate %d, volt = %d,"
2314 " enable_dvfs = %s\n",
2315 clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt,
2316 clk_dvfs_node->enable_count == 0 ? "DISABLE" : "ENABLE");
2317 printk( "| | |- clk limit(%s):[%u, %u]; last set rate = %lu\n",
2318 clk_dvfs_node->freq_limit_en ? "enable" : "disable",
2319 clk_dvfs_node->min_rate, clk_dvfs_node->max_rate,
2320 clk_dvfs_node->last_set_rate/1000);
2321 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
2322 printk( "| | | |- freq = %d, volt = %d\n",
2323 clk_dvfs_node->dvfs_table[i].frequency,
2324 clk_dvfs_node->dvfs_table[i].index);
2327 printk( "| | |- clock: %s current: rate %d, regu_mode = %s,"
2328 " regu_mode_en = %d\n",
2329 clk_dvfs_node->name, clk_dvfs_node->set_freq,
2330 dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode),
2331 clk_dvfs_node->regu_mode_en);
2332 if (clk_dvfs_node->regu_mode_table) {
2333 for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
2334 printk( "| | | |- freq = %d, regu_mode = %s\n",
2335 clk_dvfs_node->regu_mode_table[i].frequency/1000,
2336 dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode_table[i].index));
2341 mutex_unlock(&vd->mutex);
2344 printk( "-------------DVFS TREE END------------\n");
2345 mutex_unlock(&rk_dvfs_mutex);
2350 /*********************************************************************************/
2351 static struct kobject *dvfs_kobj;
2352 struct dvfs_attribute {
2353 struct attribute attr;
2354 ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
2356 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
2357 const char *buf, size_t n);
2360 static ssize_t dvfs_tree_store(struct kobject *kobj, struct kobj_attribute *attr,
2361 const char *buf, size_t n)
2365 static ssize_t dvfs_tree_show(struct kobject *kobj, struct kobj_attribute *attr,
2368 return dump_dbg_map(buf);
2372 static struct dvfs_attribute dvfs_attrs[] = {
2373 /* node_name permision show_func store_func */
2374 //#ifdef CONFIG_RK_CLOCK_PROC
2375 __ATTR(dvfs_tree, S_IRUSR | S_IRGRP | S_IWUSR, dvfs_tree_show, dvfs_tree_store),
2379 static int __init dvfs_init(void)
2383 dvfs_kobj = kobject_create_and_add("dvfs", NULL);
2386 for (i = 0; i < ARRAY_SIZE(dvfs_attrs); i++) {
2387 ret = sysfs_create_file(dvfs_kobj, &dvfs_attrs[i].attr);
2389 DVFS_ERR("create index %d error\n", i);
2394 clk_cpu_b_dvfs_node = clk_get_dvfs_node("clk_core_b");
2395 if (clk_cpu_b_dvfs_node) {
2396 clk_cpu_b_dvfs_node->temp_limit_rate =
2397 clk_cpu_b_dvfs_node->max_rate;
2398 if (clk_cpu_bl_dvfs_node == NULL)
2399 clk_cpu_bl_dvfs_node = clk_cpu_b_dvfs_node;
2402 clk_cpu_l_dvfs_node = clk_get_dvfs_node("clk_core_l");
2403 if (clk_cpu_l_dvfs_node) {
2404 clk_cpu_l_dvfs_node->temp_limit_rate =
2405 clk_cpu_l_dvfs_node->max_rate;
2406 if (clk_cpu_bl_dvfs_node == NULL)
2407 clk_cpu_bl_dvfs_node = clk_cpu_l_dvfs_node;
2410 clk_cpu_dvfs_node = clk_get_dvfs_node("clk_core");
2411 if (clk_cpu_dvfs_node)
2412 clk_cpu_dvfs_node->temp_limit_rate =
2413 clk_cpu_dvfs_node->max_rate;
2415 clk_gpu_dvfs_node = clk_get_dvfs_node("clk_gpu");
2416 if (clk_gpu_dvfs_node)
2417 clk_gpu_dvfs_node->temp_limit_rate =
2418 clk_gpu_dvfs_node->max_rate;
2420 if ((clk_cpu_b_dvfs_node && clk_cpu_b_dvfs_node->temp_limit_enable) ||
2421 (clk_cpu_l_dvfs_node && clk_cpu_l_dvfs_node->temp_limit_enable) ||
2422 (clk_gpu_dvfs_node && clk_gpu_dvfs_node->temp_limit_enable) ||
2423 (clk_cpu_dvfs_node && clk_cpu_dvfs_node->temp_limit_enable)) {
2424 dvfs_wq = alloc_workqueue("dvfs", WQ_NON_REENTRANT |
2425 WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
2426 queue_delayed_work_on(0, dvfs_wq, &dvfs_temp_limit_work, 0*HZ);
2429 vdd_gpu_regulator = dvfs_get_regulator("vdd_gpu");
2430 if (!IS_ERR_OR_NULL(vdd_gpu_regulator)) {
2431 struct clk *clk = clk_get(NULL, "pd_gpu");
2434 rk_clk_pd_notifier_register(clk, &clk_pd_gpu_notifier);
2436 fb_register_client(&early_suspend_notifier);
2437 register_reboot_notifier(&vdd_gpu_reboot_notifier);
2443 late_initcall(dvfs_init);