1 /* arch/arm/mach-rk30/rk30_dvfs.c
3 * Copyright (C) 2012 ROCKCHIP, Inc.
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #include <linux/slab.h>
16 #include <linux/clk.h>
17 #include <linux/regulator/consumer.h>
18 #include <linux/delay.h>
19 #include <linux/stat.h>
21 #include <linux/opp.h>
22 #include <linux/rockchip/dvfs.h>
23 #include <linux/rockchip/common.h>
25 #include <linux/reboot.h>
26 #include <linux/rockchip/cpu.h>
27 #include <linux/tick.h>
28 #include <dt-bindings/clock/rk_system_status.h>
29 #include "../../../drivers/clk/rockchip/clk-pd.h"
32 #define MHz (1000 * 1000)
33 static LIST_HEAD(rk_dvfs_tree);
34 static DEFINE_MUTEX(rk_dvfs_mutex);
35 static struct workqueue_struct *dvfs_wq;
36 static struct dvfs_node *clk_cpu_b_dvfs_node;
37 static struct dvfs_node *clk_cpu_l_dvfs_node;
38 static struct dvfs_node *clk_cpu_bl_dvfs_node;
39 static struct dvfs_node *clk_cpu_dvfs_node;
40 static struct dvfs_node *clk_gpu_dvfs_node;
41 static int pd_gpu_off, early_suspend;
42 static DEFINE_MUTEX(switch_vdd_gpu_mutex);
43 struct regulator *vdd_gpu_regulator;
44 static DEFINE_MUTEX(temp_limit_mutex);
46 static int dvfs_get_temp(int chn)
48 int temp = INVALID_TEMP;
50 #if IS_ENABLED(CONFIG_ROCKCHIP_THERMAL)
53 if (clk_cpu_bl_dvfs_node == NULL ||
54 IS_ERR_OR_NULL(clk_cpu_bl_dvfs_node->vd->regulator))
57 mutex_lock(&clk_cpu_bl_dvfs_node->vd->mutex);
58 read_back = dvfs_regulator_get_voltage(
59 clk_cpu_bl_dvfs_node->vd->regulator);
60 temp = rockchip_tsadc_get_temp(chn, read_back);
61 mutex_unlock(&clk_cpu_bl_dvfs_node->vd->mutex);
63 temp = rockchip_tsadc_get_temp(chn);
69 static int pvtm_get_temp(struct dvfs_node *dvfs_node, int chn)
71 int temp = INVALID_TEMP;
73 #if IS_ENABLED(CONFIG_ROCKCHIP_THERMAL)
76 if (dvfs_node == NULL ||
77 IS_ERR_OR_NULL(dvfs_node->vd->regulator))
79 read_back = dvfs_regulator_get_voltage(
80 dvfs_node->vd->regulator);
81 temp = rockchip_tsadc_get_temp(chn, read_back);
83 temp = rockchip_tsadc_get_temp(chn);
90 static int vdd_gpu_reboot_notifier_event(struct notifier_block *this,
91 unsigned long event, void *ptr)
95 DVFS_DBG("%s: enable vdd_gpu\n", __func__);
96 mutex_lock(&switch_vdd_gpu_mutex);
97 if (!regulator_is_enabled(vdd_gpu_regulator))
98 ret = regulator_enable(vdd_gpu_regulator);
99 mutex_unlock(&switch_vdd_gpu_mutex);
104 static struct notifier_block vdd_gpu_reboot_notifier = {
105 .notifier_call = vdd_gpu_reboot_notifier_event,
108 static int clk_pd_gpu_notifier_call(struct notifier_block *nb,
109 unsigned long event, void *ptr)
114 case RK_CLK_PD_PREPARE:
115 mutex_lock(&switch_vdd_gpu_mutex);
118 if (!regulator_is_enabled(vdd_gpu_regulator))
119 ret = regulator_enable(vdd_gpu_regulator);
121 mutex_unlock(&switch_vdd_gpu_mutex);
123 case RK_CLK_PD_UNPREPARE:
124 mutex_lock(&switch_vdd_gpu_mutex);
127 if (regulator_is_enabled(vdd_gpu_regulator))
128 ret = regulator_disable(vdd_gpu_regulator);
130 mutex_unlock(&switch_vdd_gpu_mutex);
139 static struct notifier_block clk_pd_gpu_notifier = {
140 .notifier_call = clk_pd_gpu_notifier_call,
144 static int early_suspend_notifier_call(struct notifier_block *self,
145 unsigned long action, void *data)
147 struct fb_event *event = data;
148 int blank_mode = *((int *)event->data);
151 mutex_lock(&switch_vdd_gpu_mutex);
152 if (action == FB_EARLY_EVENT_BLANK) {
153 switch (*((int *)event->data)) {
154 case FB_BLANK_UNBLANK:
157 if (!regulator_is_enabled(vdd_gpu_regulator))
158 ret = regulator_enable(
165 } else if (action == FB_EVENT_BLANK) {
166 switch (*((int *)event->data)) {
167 case FB_BLANK_POWERDOWN:
170 if (regulator_is_enabled(vdd_gpu_regulator))
171 ret = regulator_disable(
180 mutex_unlock(&switch_vdd_gpu_mutex);
185 static struct notifier_block early_suspend_notifier = {
186 .notifier_call = early_suspend_notifier_call,
189 #define DVFS_REGULATOR_MODE_STANDBY 1
190 #define DVFS_REGULATOR_MODE_IDLE 2
191 #define DVFS_REGULATOR_MODE_NORMAL 3
192 #define DVFS_REGULATOR_MODE_FAST 4
194 static const char* dvfs_regu_mode_to_string(unsigned int mode)
197 case DVFS_REGULATOR_MODE_FAST:
199 case DVFS_REGULATOR_MODE_NORMAL:
201 case DVFS_REGULATOR_MODE_IDLE:
203 case DVFS_REGULATOR_MODE_STANDBY:
210 static int dvfs_regu_mode_convert(unsigned int mode)
213 case DVFS_REGULATOR_MODE_FAST:
214 return REGULATOR_MODE_FAST;
215 case DVFS_REGULATOR_MODE_NORMAL:
216 return REGULATOR_MODE_NORMAL;
217 case DVFS_REGULATOR_MODE_IDLE:
218 return REGULATOR_MODE_IDLE;
219 case DVFS_REGULATOR_MODE_STANDBY:
220 return REGULATOR_MODE_STANDBY;
226 static int dvfs_regu_mode_deconvert(unsigned int mode)
229 case REGULATOR_MODE_FAST:
230 return DVFS_REGULATOR_MODE_FAST;
231 case REGULATOR_MODE_NORMAL:
232 return DVFS_REGULATOR_MODE_NORMAL;
233 case REGULATOR_MODE_IDLE:
234 return DVFS_REGULATOR_MODE_IDLE;
235 case REGULATOR_MODE_STANDBY:
236 return DVFS_REGULATOR_MODE_STANDBY;
242 static struct cpufreq_frequency_table *of_get_regu_mode_table(struct device_node *dev_node)
244 struct cpufreq_frequency_table *regu_mode_table = NULL;
245 const struct property *prop;
249 prop = of_find_property(dev_node, "regu-mode-table", NULL);
255 nr = prop->length / sizeof(u32);
257 pr_err("%s: Invalid freq list\n", __func__);
261 regu_mode_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
262 (nr/2+1), GFP_KERNEL);
263 if (!regu_mode_table) {
264 pr_err("%s: could not allocate regu_mode_table!\n", __func__);
265 return ERR_PTR(-ENOMEM);
270 for (i=0; i<nr/2; i++){
271 regu_mode_table[i].frequency = be32_to_cpup(val++) * 1000;
272 regu_mode_table[i].index = be32_to_cpup(val++);
275 if (regu_mode_table[i-1].frequency != 0) {
276 pr_err("%s: Last freq of regu_mode_table is not 0!\n", __func__);
277 kfree(regu_mode_table);
281 regu_mode_table[i].index = 0;
282 regu_mode_table[i].frequency = CPUFREQ_TABLE_END;
284 return regu_mode_table;
287 static int dvfs_regu_mode_table_constrain(struct dvfs_node *clk_dvfs_node)
290 int mode, convert_mode, valid_mode;
295 if (!clk_dvfs_node->regu_mode_table)
298 if (!clk_dvfs_node->vd)
301 if (IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator))
304 for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
305 mode = clk_dvfs_node->regu_mode_table[i].index;
306 convert_mode = dvfs_regu_mode_convert(mode);
308 ret = regulator_is_supported_mode(clk_dvfs_node->vd->regulator,
311 DVFS_ERR("%s: find mode=%d unsupported\n", __func__,
313 kfree(clk_dvfs_node->regu_mode_table);
314 clk_dvfs_node->regu_mode_table = NULL;
318 valid_mode = dvfs_regu_mode_deconvert(convert_mode);
319 if (valid_mode != mode) {
320 DVFS_ERR("%s: round mode=%d to valid mode=%d!\n",
321 __func__, mode, valid_mode);
322 clk_dvfs_node->regu_mode_table[i].index = valid_mode;
330 static int clk_dvfs_node_get_regu_mode(struct dvfs_node *clk_dvfs_node,
331 unsigned long rate, unsigned int *mode)
336 if ((!clk_dvfs_node) || (!clk_dvfs_node->regu_mode_table))
339 for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
340 if (rate >= clk_dvfs_node->regu_mode_table[i].frequency) {
341 *mode = clk_dvfs_node->regu_mode_table[i].index;
349 static int dvfs_pd_get_newmode_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
351 unsigned int mode_max = 0;
354 if (clk_dvfs_node->regu_mode_en && (clk_dvfs_node->regu_mode >= pd->regu_mode)) {
355 return clk_dvfs_node->regu_mode;
358 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
359 if (clk_dvfs_node->regu_mode_en)
360 mode_max = max(mode_max, (clk_dvfs_node->regu_mode));
366 static void dvfs_update_clk_pds_mode(struct dvfs_node *clk_dvfs_node)
373 pd = clk_dvfs_node->pd;
377 pd->regu_mode = dvfs_pd_get_newmode_byclk(pd, clk_dvfs_node);
380 static int dvfs_vd_get_newmode_bypd(struct vd_node *vd)
382 unsigned int mode_max_vd = 0;
388 list_for_each_entry(pd, &vd->pd_list, node) {
389 mode_max_vd = max(mode_max_vd, pd->regu_mode);
395 static int dvfs_vd_get_newmode_byclk(struct dvfs_node *clk_dvfs_node)
400 dvfs_update_clk_pds_mode(clk_dvfs_node);
402 return dvfs_vd_get_newmode_bypd(clk_dvfs_node->vd);
405 static int dvfs_regu_set_mode(struct vd_node *vd, unsigned int mode)
411 if (IS_ERR_OR_NULL(vd)) {
412 DVFS_ERR("%s: vd_node error\n", __func__);
416 DVFS_DBG("%s: mode=%d(old=%d)\n", __func__, mode, vd->regu_mode);
418 convert_mode = dvfs_regu_mode_convert(mode);
419 if (convert_mode < 0) {
420 DVFS_ERR("%s: mode %d convert error\n", __func__, mode);
424 if (!IS_ERR_OR_NULL(vd->regulator)) {
425 ret = dvfs_regulator_set_mode(vd->regulator, convert_mode);
427 DVFS_ERR("%s: %s set mode %d (was %d) error!\n", __func__,
428 vd->regulator_name, mode, vd->regu_mode);
432 DVFS_ERR("%s: invalid regulator\n", __func__);
436 vd->regu_mode = mode;
441 static int dvfs_regu_mode_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
450 if (!clk_dvfs_node->regu_mode_en)
453 ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, rate, &mode);
455 DVFS_ERR("%s: clk(%s) rate %luhz get mode fail\n",
456 __func__, clk_dvfs_node->name, rate);
459 clk_dvfs_node->regu_mode = mode;
461 mode = dvfs_vd_get_newmode_byclk(clk_dvfs_node);
465 ret = dvfs_regu_set_mode(clk_dvfs_node->vd, mode);
470 static void dvfs_volt_up_delay(struct vd_node *vd, int new_volt, int old_volt)
474 if(new_volt <= old_volt)
476 if(vd->volt_time_flag > 0)
477 u_time = regulator_set_voltage_time(vd->regulator, old_volt, new_volt);
480 if(u_time < 0) {// regulator is not suported time,useing default time
481 DVFS_DBG("%s:vd %s is not suported getting delay time,so we use default\n",
483 u_time = ((new_volt) - (old_volt)) >> 9;
486 DVFS_DBG("%s: vd %s volt %d to %d delay %d us\n",
487 __func__, vd->name, old_volt, new_volt, u_time);
489 if (u_time >= 1000) {
490 mdelay(u_time / 1000);
491 udelay(u_time % 1000);
492 DVFS_WARNING("%s: regulator set vol delay is larger 1ms,old is %d,new is %d\n",
493 __func__, old_volt, new_volt);
499 static int dvfs_regulator_set_voltage_readback(struct regulator *regulator, int min_uV, int max_uV)
501 int ret = 0, read_back = 0;
503 ret = dvfs_regulator_set_voltage(regulator, max_uV, max_uV);
505 DVFS_ERR("%s: now read back to check voltage\n", __func__);
507 /* read back to judge if it is already effect */
509 read_back = dvfs_regulator_get_voltage(regulator);
510 if (read_back == max_uV) {
511 DVFS_ERR("%s: set ERROR but already effected, volt=%d\n", __func__, read_back);
514 DVFS_ERR("%s: set ERROR AND NOT effected, volt=%d\n", __func__, read_back);
521 static int dvfs_scale_volt_direct(struct vd_node *vd_clk, int volt_new)
525 DVFS_DBG("%s: volt=%d(old=%d)\n", __func__, volt_new, vd_clk->cur_volt);
527 if (IS_ERR_OR_NULL(vd_clk)) {
528 DVFS_ERR("%s: vd_node error\n", __func__);
532 if (!IS_ERR_OR_NULL(vd_clk->regulator)) {
533 ret = dvfs_regulator_set_voltage_readback(vd_clk->regulator, volt_new, volt_new);
534 dvfs_volt_up_delay(vd_clk,volt_new, vd_clk->cur_volt);
536 vd_clk->volt_set_flag = DVFS_SET_VOLT_FAILURE;
537 DVFS_ERR("%s: %s set voltage up err ret = %d, Vnew = %d(was %d)mV\n",
538 __func__, vd_clk->name, ret, volt_new, vd_clk->cur_volt);
543 DVFS_ERR("%s: invalid regulator\n", __func__);
547 vd_clk->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
548 vd_clk->cur_volt = volt_new;
554 static int dvfs_reset_volt(struct vd_node *dvfs_vd)
556 int flag_set_volt_correct = 0;
557 if (!IS_ERR_OR_NULL(dvfs_vd->regulator))
558 flag_set_volt_correct = dvfs_regulator_get_voltage(dvfs_vd->regulator);
560 DVFS_ERR("%s: invalid regulator\n", __func__);
563 if (flag_set_volt_correct <= 0) {
564 DVFS_ERR("%s (vd:%s), try to reload volt ,by it is error again(%d)!!! stop scaling\n",
565 __func__, dvfs_vd->name, flag_set_volt_correct);
568 dvfs_vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
569 DVFS_WARNING("%s:vd(%s) try to reload volt = %d\n",
570 __func__, dvfs_vd->name, flag_set_volt_correct);
572 /* Reset vd's voltage */
573 dvfs_vd->cur_volt = flag_set_volt_correct;
575 return dvfs_vd->cur_volt;
579 // for clk enable case to get vd regulator info
580 static void clk_enable_dvfs_regulator_check(struct vd_node *vd)
582 vd->cur_volt = dvfs_regulator_get_voltage(vd->regulator);
583 if(vd->cur_volt <= 0){
584 vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
586 vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
589 static void dvfs_get_vd_regulator_volt_list(struct vd_node *vd)
591 unsigned int i, selector = dvfs_regulator_count_voltages(vd->regulator);
592 int n = 0, sel_volt = 0;
594 if(selector > VD_VOL_LIST_CNT)
595 selector = VD_VOL_LIST_CNT;
597 for (i = 0; i < selector; i++) {
598 sel_volt = dvfs_regulator_list_voltage(vd->regulator, i);
600 //DVFS_WARNING("%s: vd(%s) list volt selector=%u, but volt(%d) <=0\n",
601 // __func__, vd->name, i, sel_volt);
604 vd->volt_list[n++] = sel_volt;
605 DVFS_DBG("%s: vd(%s) list volt selector=%u, n=%d, volt=%d\n",
606 __func__, vd->name, i, n, sel_volt);
613 static int vd_regulator_round_volt_max(struct vd_node *vd, int volt)
618 for (i = 0; i < vd->n_voltages; i++) {
619 sel_volt = vd->volt_list[i];
621 DVFS_WARNING("%s: selector=%u, but volt <=0\n",
632 static int vd_regulator_round_volt_min(struct vd_node *vd, int volt)
637 for (i = 0; i < vd->n_voltages; i++) {
638 sel_volt = vd->volt_list[i];
640 DVFS_WARNING("%s: selector=%u, but volt <=0\n",
646 return vd->volt_list[i-1];
656 static int vd_regulator_round_volt(struct vd_node *vd, int volt, int flags)
660 if(flags == VD_LIST_RELATION_L)
661 return vd_regulator_round_volt_min(vd, volt);
663 return vd_regulator_round_volt_max(vd, volt);
666 static void dvfs_table_round_volt(struct dvfs_node *clk_dvfs_node)
670 if(!clk_dvfs_node->dvfs_table || !clk_dvfs_node->vd ||
671 IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator))
674 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
676 test_volt = vd_regulator_round_volt(clk_dvfs_node->vd, clk_dvfs_node->dvfs_table[i].index, VD_LIST_RELATION_H);
679 DVFS_WARNING("%s: clk(%s) round volt(%d) but list <=0\n",
680 __func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].index);
683 DVFS_DBG("clk %s:round_volt %d to %d\n",
684 clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].index, test_volt);
686 clk_dvfs_node->dvfs_table[i].index=test_volt;
690 static void dvfs_vd_get_regulator_volt_time_info(struct vd_node *vd)
692 if(vd->volt_time_flag <= 0){// check regulator support get uping vol timer
693 vd->volt_time_flag = dvfs_regulator_set_voltage_time(vd->regulator, vd->cur_volt, vd->cur_volt+200*1000);
694 if(vd->volt_time_flag < 0){
695 DVFS_DBG("%s,vd %s volt_time is no support\n",
699 DVFS_DBG("%s,vd %s volt_time is support,up 200mv need delay %d us\n",
700 __func__, vd->name, vd->volt_time_flag);
705 static void dvfs_vd_get_regulator_mode_info(struct vd_node *vd)
707 //REGULATOR_MODE_FAST
708 if(vd->mode_flag <= 0){// check regulator support get uping vol timer{
709 vd->mode_flag = dvfs_regulator_get_mode(vd->regulator);
710 if(vd->mode_flag==REGULATOR_MODE_FAST || vd->mode_flag==REGULATOR_MODE_NORMAL
711 || vd->mode_flag == REGULATOR_MODE_IDLE || vd->mode_flag==REGULATOR_MODE_STANDBY){
713 if(dvfs_regulator_set_mode(vd->regulator, vd->mode_flag) < 0){
714 vd->mode_flag = 0;// check again
717 if(vd->mode_flag > 0){
718 DVFS_DBG("%s,vd %s mode(now is %d) support\n",
719 __func__, vd->name, vd->mode_flag);
722 DVFS_DBG("%s,vd %s mode is not support now check\n",
729 struct regulator *dvfs_get_regulator(char *regulator_name)
733 mutex_lock(&rk_dvfs_mutex);
734 list_for_each_entry(vd, &rk_dvfs_tree, node) {
735 if (strcmp(regulator_name, vd->regulator_name) == 0) {
736 mutex_unlock(&rk_dvfs_mutex);
737 return vd->regulator;
740 mutex_unlock(&rk_dvfs_mutex);
744 static int dvfs_get_rate_range(struct dvfs_node *clk_dvfs_node)
746 struct cpufreq_frequency_table *table;
752 clk_dvfs_node->min_rate = 0;
753 clk_dvfs_node->max_rate = 0;
755 table = clk_dvfs_node->dvfs_table;
756 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
757 clk_dvfs_node->max_rate = table[i].frequency / 1000 * 1000 * 1000;
759 clk_dvfs_node->min_rate = table[i].frequency / 1000 * 1000 * 1000;
762 DVFS_DBG("%s: clk %s, limit rate [min, max] = [%u, %u]\n",
763 __func__, clk_dvfs_node->name, clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
768 static void dvfs_table_round_clk_rate(struct dvfs_node *clk_dvfs_node)
770 int i, rate, temp_rate, flags;
772 if(!clk_dvfs_node || !clk_dvfs_node->dvfs_table || !clk_dvfs_node->clk)
775 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
776 //ddr rate = real rate+flags
777 flags = clk_dvfs_node->dvfs_table[i].frequency%1000;
778 rate = (clk_dvfs_node->dvfs_table[i].frequency/1000)*1000;
779 temp_rate = __clk_round_rate(clk_dvfs_node->clk, rate*1000);
781 DVFS_WARNING("%s: clk(%s) rate %d round return %d\n",
782 __func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
786 /* Set rate unit as MHZ */
787 if (temp_rate % MHz != 0)
788 temp_rate = (temp_rate / MHz + 1) * MHz;
790 temp_rate = (temp_rate / 1000) + flags;
792 DVFS_DBG("clk %s round_clk_rate %d to %d\n",
793 clk_dvfs_node->name,clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
795 clk_dvfs_node->dvfs_table[i].frequency = temp_rate;
799 static int clk_dvfs_node_get_ref_volt(struct dvfs_node *clk_dvfs_node, int rate_khz,
800 struct cpufreq_frequency_table *clk_fv)
804 if (rate_khz == 0 || !clk_dvfs_node || !clk_dvfs_node->dvfs_table) {
808 clk_fv->frequency = rate_khz;
811 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
812 if (clk_dvfs_node->dvfs_table[i].frequency >= rate_khz) {
813 clk_fv->frequency = clk_dvfs_node->dvfs_table[i].frequency;
814 clk_fv->index = clk_dvfs_node->dvfs_table[i].index;
815 //printk("%s,%s rate=%ukhz(vol=%d)\n",__func__,clk_dvfs_node->name,
816 //clk_fv->frequency, clk_fv->index);
820 clk_fv->frequency = 0;
822 //DVFS_DBG("%s get corresponding voltage error! out of bound\n", clk_dvfs_node->name);
826 static int dvfs_pd_get_newvolt_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
830 if (clk_dvfs_node->enable_count && (clk_dvfs_node->set_volt >= pd->cur_volt)) {
831 return clk_dvfs_node->set_volt;
834 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
835 if (clk_dvfs_node->enable_count)
836 volt_max = max(volt_max, clk_dvfs_node->set_volt);
841 static void dvfs_update_clk_pds_volt(struct dvfs_node *clk_dvfs_node)
848 pd = clk_dvfs_node->pd;
852 pd->cur_volt = dvfs_pd_get_newvolt_byclk(pd, clk_dvfs_node);
855 static int dvfs_vd_get_newvolt_bypd(struct vd_node *vd)
863 list_for_each_entry(pd, &vd->pd_list, node) {
864 volt_max_vd = max(volt_max_vd, pd->cur_volt);
870 static int dvfs_vd_get_newvolt_byclk(struct dvfs_node *clk_dvfs_node)
875 dvfs_update_clk_pds_volt(clk_dvfs_node);
876 return dvfs_vd_get_newvolt_bypd(clk_dvfs_node->vd);
880 static void dvfs_temp_limit_work_func(struct work_struct *work)
882 unsigned long delay = HZ / 10; // 100ms
885 struct dvfs_node *clk_dvfs_node;
887 queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
889 mutex_lock(&rk_dvfs_mutex);
890 list_for_each_entry(vd, &rk_dvfs_tree, node) {
891 mutex_lock(&vd->mutex);
892 list_for_each_entry(pd, &vd->pd_list, node) {
893 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
894 if (clk_dvfs_node->temp_limit_table) {
895 clk_dvfs_node->temp = rockchip_tsadc_get_temp(clk_dvfs_node->temp_channel);
896 clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
900 mutex_unlock(&vd->mutex);
902 mutex_unlock(&rk_dvfs_mutex);
906 static struct cpufreq_frequency_table rk3288v0_arm_pvtm_table[] = {
907 {.frequency = 216000, .index = 4006},
908 {.frequency = 408000, .index = 6518},
909 {.frequency = 600000, .index = 8345},
910 {.frequency = 816000, .index = 11026},
911 {.frequency = 1008000, .index = 12906},
912 {.frequency = 1200000, .index = 15532},
913 {.frequency = 1416000, .index = 18076},
914 {.frequency = 1608000, .index = 21282},
915 {.frequency = CPUFREQ_TABLE_END, .index = 1},
918 static struct pvtm_info rk3288v0_arm_pvtm_info = {
919 .compatible = "rockchip,rk3288",
920 .pvtm_table = rk3288v0_arm_pvtm_table,
921 .channel = ARM_DVFS_CH,
922 .process_version = RK3288_PROCESS_V0,
923 .scan_rate_hz = 216000000,
924 .sample_time_us = 1000,
925 .volt_step_uv = 12500,
926 .delta_pvtm_by_volt = 400,
927 .delta_pvtm_by_temp = 14,
928 .volt_margin_uv = 25000,
929 .min_volt_uv = 850000,
930 .max_volt_uv = 1400000,
934 static struct cpufreq_frequency_table rk3288v1_arm_pvtm_table[] = {
935 {.frequency = 216000, .index = 4710},
936 {.frequency = 408000, .index = 7200},
937 {.frequency = 600000, .index = 9192},
938 {.frequency = 816000, .index = 12560},
939 {.frequency = 1008000, .index = 14741},
940 {.frequency = 1200000, .index = 16886},
941 {.frequency = 1416000, .index = 20081},
942 {.frequency = 1608000, .index = 24061},
943 {.frequency = CPUFREQ_TABLE_END, .index = 1},
946 static struct pvtm_info rk3288v1_arm_pvtm_info = {
947 .compatible = "rockchip,rk3288",
948 .pvtm_table = rk3288v1_arm_pvtm_table,
949 .channel = ARM_DVFS_CH,
950 .process_version = RK3288_PROCESS_V1,
951 .scan_rate_hz = 216000000,
952 .sample_time_us = 1000,
953 .volt_step_uv = 12500,
954 .delta_pvtm_by_volt = 450,
955 .delta_pvtm_by_temp = 7,
956 .volt_margin_uv = 25000,
957 .min_volt_uv = 850000,
958 .max_volt_uv = 1400000,
962 static struct cpufreq_frequency_table rk3288v2_arm_pvtm_table[] = {
963 {.frequency = 216000, .index = 5369},
964 {.frequency = 408000, .index = 6984},
965 {.frequency = 600000, .index = 8771},
966 {.frequency = 816000, .index = 11434},
967 {.frequency = 1008000, .index = 14178},
968 {.frequency = 1200000, .index = 16797},
969 {.frequency = 1416000, .index = 20178},
970 {.frequency = 1608000, .index = 23303},
971 {.frequency = CPUFREQ_TABLE_END, .index = 1},
974 static struct pvtm_info rk3288v2_arm_pvtm_info = {
975 .compatible = "rockchip,rk3288",
976 .pvtm_table = rk3288v2_arm_pvtm_table,
977 .channel = ARM_DVFS_CH,
978 .process_version = RK3288_PROCESS_V2,
979 .scan_rate_hz = 216000000,
980 .sample_time_us = 1000,
981 .volt_step_uv = 12500,
982 .delta_pvtm_by_volt = 430,
983 .delta_pvtm_by_temp = 12,
984 .volt_margin_uv = 25000,
985 .min_volt_uv = 900000,
986 .max_volt_uv = 1400000,
990 static struct cpufreq_frequency_table rk3368v0_arm_b_pvtm_table[] = {
991 {.frequency = 216000, .index = 9891},
992 {.frequency = 312000, .index = 9891},
993 {.frequency = 408000, .index = 9891},
994 {.frequency = 600000, .index = 9891},
995 {.frequency = 696000, .index = 10115},
996 {.frequency = 816000, .index = 11014},
997 {.frequency = 1008000, .index = 13650},
998 {.frequency = 1200000, .index = 16520},
999 {.frequency = 1296000, .index = 17856},
1000 {.frequency = 1416000, .index = 19662},
1001 {.frequency = 1512000, .index = 21069},
1002 {.frequency = CPUFREQ_TABLE_END, .index = 1},
1005 static struct pvtm_info rk3368v0_arm_b_pvtm_info = {
1006 .compatible = "rockchip,rk3368",
1007 .pvtm_table = rk3368v0_arm_b_pvtm_table,
1008 .channel = ARM_DVFS_CH,
1009 .process_version = 0,
1010 .scan_rate_hz = 216000000,
1011 .sample_time_us = 1000,
1012 .volt_step_uv = 12500,
1013 .delta_pvtm_by_volt = 350,
1014 .delta_pvtm_by_temp = 12,
1015 .volt_margin_uv = 50000,
1016 .min_volt_uv = 925000,
1017 .max_volt_uv = 1375000,
1021 static struct cpufreq_frequency_table rk3368v0_arm_l_pvtm_table[] = {
1022 {.frequency = 216000, .index = 9913},
1023 {.frequency = 312000, .index = 9913},
1024 {.frequency = 408000, .index = 9913},
1025 {.frequency = 600000, .index = 9913},
1026 {.frequency = 696000, .index = 11056},
1027 {.frequency = 816000, .index = 12816},
1028 {.frequency = 1008000, .index = 15613},
1029 {.frequency = 1200000, .index = 18329},
1030 {.frequency = CPUFREQ_TABLE_END, .index = 1},
1033 static struct pvtm_info rk3368v0_arm_l_pvtm_info = {
1034 .compatible = "rockchip,rk3368",
1035 .pvtm_table = rk3368v0_arm_l_pvtm_table,
1036 .channel = ARM_DVFS_CH,
1037 .process_version = 0,
1038 .scan_rate_hz = 216000000,
1039 .sample_time_us = 1000,
1040 .volt_step_uv = 12500,
1041 .delta_pvtm_by_volt = 350,
1042 .delta_pvtm_by_temp = 12,
1043 .volt_margin_uv = 50000,
1044 .min_volt_uv = 925000,
1045 .max_volt_uv = 1375000,
1050 static struct pvtm_info *pvtm_info_table[] = {
1051 &rk3288v0_arm_pvtm_info,
1052 &rk3288v1_arm_pvtm_info,
1053 &rk3288v2_arm_pvtm_info,
1054 &rk3368v0_arm_b_pvtm_info,
1055 &rk3368v0_arm_l_pvtm_info
1058 static int pvtm_set_single_dvfs(struct dvfs_node *dvfs_node, u32 idx,
1059 struct pvtm_info *info, int *pvtm_list,
1062 struct cpufreq_frequency_table *dvfs_table = dvfs_node->dvfs_table;
1063 struct cpufreq_frequency_table *pvtm_table = dvfs_node->pvtm_table;
1064 int target_pvtm, pvtm_margin, volt_margin;
1065 unsigned int n_voltages = dvfs_node->vd->n_voltages;
1066 int *volt_list = dvfs_node->vd->volt_list;
1069 volt_margin = info->volt_margin_uv + pvtm_table[idx].index;
1070 n = volt_margin/info->volt_step_uv;
1071 if (volt_margin%info->volt_step_uv)
1074 pvtm_margin = n*info->delta_pvtm_by_volt;
1075 if (cpu_is_rk3288())
1076 temp = pvtm_get_temp(dvfs_node, 1);
1078 temp = pvtm_get_temp(dvfs_node, 0);
1080 if (temp < dvfs_node->pvtm_min_temp || temp == INVALID_TEMP)
1081 temp = dvfs_node->pvtm_min_temp;
1083 target_pvtm = min_pvtm+temp * info->delta_pvtm_by_temp + pvtm_margin;
1085 DVFS_DBG("=====%s: temp:%d, freq:%d, target pvtm:%d=====\n",
1086 __func__, temp, dvfs_table[idx].frequency, target_pvtm);
1088 for (n = 0; n < n_voltages; n++) {
1089 if (pvtm_list[n] >= target_pvtm) {
1090 dvfs_table[idx].index = volt_list[n];
1091 DVFS_DBG("freq[%d]=%d, volt=%d\n",
1092 idx, dvfs_table[idx].frequency, volt_list[n]);
1103 static void pvtm_set_dvfs_table(struct dvfs_node *dvfs_node)
1105 struct cpufreq_frequency_table *dvfs_table = dvfs_node->dvfs_table;
1106 struct pvtm_info *info = dvfs_node->pvtm_info;
1107 struct regulator *regulator = dvfs_node->vd->regulator;
1110 int pvtm_list[VD_VOL_LIST_CNT] = {0};
1111 unsigned int n_voltages = dvfs_node->vd->n_voltages;
1112 int *volt_list = dvfs_node->vd->volt_list;
1117 clk_set_rate(dvfs_node->clk, info->scan_rate_hz);
1118 DVFS_DBG("%s:%lu\n", __func__, clk_get_rate(dvfs_node->clk));
1120 for (i = 0; i < n_voltages; i++) {
1121 if ((volt_list[i] >= info->min_volt_uv) &&
1122 (volt_list[i] <= info->max_volt_uv)) {
1123 regulator_set_voltage(regulator, volt_list[i],
1125 pvtm_list[i] = pvtm_get_value(info->channel,
1126 info->sample_time_us);
1130 for (i = 0; dvfs_table[i].frequency != CPUFREQ_TABLE_END; i++) {
1131 for (j = 0; info->pvtm_table[j].frequency !=
1132 CPUFREQ_TABLE_END; j++)
1133 if (info->pvtm_table[j].frequency >=
1134 dvfs_table[i].frequency) {
1135 int min_pvtm = info->pvtm_table[j].index;
1137 ret = pvtm_set_single_dvfs(dvfs_node,
1146 dvfs_node->max_limit_freq =
1147 dvfs_table[i-1].frequency * 1000;
1148 DVFS_WARNING("freq: %d can not reach target pvtm\n",
1149 dvfs_table[i].frequency);
1150 DVFS_WARNING("max freq: %d\n",
1151 dvfs_node->max_limit_freq);
1155 if (info->pvtm_table[j].frequency == CPUFREQ_TABLE_END) {
1156 DVFS_WARNING("not support freq :%d, max freq is %d\n",
1157 dvfs_table[i].frequency,
1158 info->pvtm_table[j-1].frequency);
1164 static void dvfs_virt_temp_limit_work_func(struct dvfs_node *dvfs_node)
1166 const struct cpufreq_frequency_table *limits_table = NULL;
1167 unsigned int new_temp_limit_rate = -1;
1168 unsigned int nr_cpus = num_online_cpus();
1169 static bool in_perf;
1172 if (!cpu_is_rk312x())
1175 if (rockchip_get_system_status() & SYS_STATUS_PERFORMANCE) {
1177 } else if (in_perf) {
1180 static u64 last_time_in_idle;
1181 static u64 last_time_in_idle_timestamp;
1182 u64 time_in_idle = 0, now;
1185 unsigned cpu, busy_cpus;
1187 for_each_online_cpu(cpu) {
1188 time_in_idle += get_cpu_idle_time_us(cpu, &now);
1190 delta_time = now - last_time_in_idle_timestamp;
1191 delta_idle = time_in_idle - last_time_in_idle;
1192 last_time_in_idle = time_in_idle;
1193 last_time_in_idle_timestamp = now;
1194 delta_idle += delta_time >> 4; /* +6.25% */
1195 if (delta_idle > (nr_cpus - 1)
1196 * delta_time && delta_idle < (nr_cpus + 1) * delta_time)
1198 else if (delta_idle > (nr_cpus - 2) * delta_time)
1200 else if (delta_idle > (nr_cpus - 3) * delta_time)
1205 limits_table = dvfs_node->virt_temp_limit_table[busy_cpus-1];
1206 DVFS_DBG("delta time %6u us idle %6u us %u cpus select table %d\n",
1207 delta_time, delta_idle, nr_cpus, busy_cpus);
1211 new_temp_limit_rate = limits_table[0].frequency;
1212 for (i = 0; limits_table[i].frequency != CPUFREQ_TABLE_END; i++) {
1213 if (dvfs_node->target_temp >=
1214 limits_table[i].index)
1215 new_temp_limit_rate = limits_table[i].frequency;
1219 if (dvfs_node->temp_limit_rate != new_temp_limit_rate) {
1220 dvfs_node->temp_limit_rate = new_temp_limit_rate;
1221 dvfs_clk_set_rate(dvfs_node, dvfs_node->last_set_rate);
1222 DVFS_DBG("temp_limit_rate:%d\n",
1223 (int)dvfs_node->temp_limit_rate);
1227 static void dvfs_temp_limit_performance(struct dvfs_node *dvfs_node, int temp)
1231 dvfs_node->temp_limit_rate = dvfs_node->max_rate;
1232 for (i = 0; dvfs_node->per_temp_limit_table[i].frequency !=
1233 CPUFREQ_TABLE_END; i++) {
1234 if (temp > dvfs_node->per_temp_limit_table[i].index)
1235 dvfs_node->temp_limit_rate =
1236 dvfs_node->per_temp_limit_table[i].frequency;
1238 dvfs_clk_set_rate(dvfs_node, dvfs_node->last_set_rate);
1241 static void dvfs_temp_limit_normal(struct dvfs_node *dvfs_node, int temp)
1244 unsigned long arm_rate_step = 0;
1247 if (temp > dvfs_node->target_temp) {
1248 if (temp > dvfs_node->old_temp) {
1249 delta_temp = temp - dvfs_node->target_temp;
1251 dvfs_node->nor_temp_limit_table[i].frequency !=
1252 CPUFREQ_TABLE_END; i++) {
1254 dvfs_node->nor_temp_limit_table[i].index)
1256 dvfs_node->nor_temp_limit_table[i].frequency;
1258 if (arm_rate_step &&
1259 (dvfs_node->temp_limit_rate > arm_rate_step)) {
1260 dvfs_node->temp_limit_rate -= arm_rate_step;
1261 if (dvfs_node->temp_limit_rate <
1262 dvfs_node->min_temp_limit)
1263 dvfs_node->temp_limit_rate =
1264 dvfs_node->min_temp_limit;
1265 dvfs_clk_set_rate(dvfs_node,
1266 dvfs_node->last_set_rate);
1270 if (dvfs_node->temp_limit_rate < dvfs_node->max_rate) {
1271 delta_temp = dvfs_node->target_temp - temp;
1273 dvfs_node->nor_temp_limit_table[i].frequency !=
1274 CPUFREQ_TABLE_END; i++) {
1276 dvfs_node->nor_temp_limit_table[i].index)
1278 dvfs_node->nor_temp_limit_table[i].frequency;
1281 if (arm_rate_step) {
1282 dvfs_node->temp_limit_rate += arm_rate_step;
1283 if (dvfs_node->temp_limit_rate >
1284 dvfs_node->max_rate)
1285 dvfs_node->temp_limit_rate =
1286 dvfs_node->max_rate;
1287 dvfs_clk_set_rate(dvfs_node,
1288 dvfs_node->last_set_rate);
1294 static void dvfs_temp_limit(struct dvfs_node *dvfs_node, int temp)
1299 delta_temp = (dvfs_node->old_temp > temp) ? (dvfs_node->old_temp-temp) :
1300 (temp-dvfs_node->old_temp);
1301 if (delta_temp <= 1)
1304 if (ROCKCHIP_PM_POLICY_PERFORMANCE == rockchip_pm_get_policy()) {
1305 if (!dvfs_node->per_temp_limit_table)
1307 dvfs_temp_limit_performance(dvfs_node, temp);
1308 } else if (ROCKCHIP_PM_POLICY_NORMAL == rockchip_pm_get_policy()){
1309 if (!dvfs_node->nor_temp_limit_table)
1311 dvfs_temp_limit_normal(dvfs_node, temp);
1313 dvfs_node->old_temp = temp;
1314 DVFS_DBG("cur temp: %d, temp_limit_core_rate: %lu\n",
1315 temp, dvfs_node->temp_limit_rate);
1318 static void dvfs_temp_limit_work_func(struct work_struct *work)
1320 unsigned long delay = HZ/10;
1321 int temp = INVALID_TEMP;
1323 queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
1325 mutex_lock(&temp_limit_mutex);
1326 if (clk_cpu_b_dvfs_node &&
1327 clk_cpu_b_dvfs_node->temp_limit_enable == 1) {
1328 temp = dvfs_get_temp(0);
1329 if (temp != INVALID_TEMP)
1330 dvfs_temp_limit(clk_cpu_b_dvfs_node, temp);
1332 if (clk_cpu_l_dvfs_node &&
1333 clk_cpu_l_dvfs_node->temp_limit_enable == 1) {
1334 if (temp == INVALID_TEMP)
1335 temp = dvfs_get_temp(0);
1336 if (temp != INVALID_TEMP)
1337 dvfs_temp_limit(clk_cpu_l_dvfs_node, temp);
1339 if (clk_cpu_dvfs_node &&
1340 clk_cpu_dvfs_node->temp_limit_enable == 1) {
1341 temp = dvfs_get_temp(1);
1342 if (temp == INVALID_TEMP)
1343 dvfs_virt_temp_limit_work_func(clk_cpu_dvfs_node);
1345 dvfs_temp_limit(clk_cpu_dvfs_node, temp);
1347 if (clk_gpu_dvfs_node &&
1348 clk_gpu_dvfs_node->temp_limit_enable == 1) {
1349 temp = dvfs_get_temp(2);
1350 if (temp != INVALID_TEMP)
1351 dvfs_temp_limit(clk_gpu_dvfs_node, temp);
1353 mutex_unlock(&temp_limit_mutex);
1355 static DECLARE_DELAYED_WORK(dvfs_temp_limit_work, dvfs_temp_limit_work_func);
1357 int dvfs_clk_enable_limit(struct dvfs_node *clk_dvfs_node, unsigned int min_rate, unsigned int max_rate)
1359 u32 rate = 0, ret = 0;
1361 if (!clk_dvfs_node || (min_rate > max_rate))
1364 if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
1365 mutex_lock(&clk_dvfs_node->vd->mutex);
1367 /* To reset clk_dvfs_node->min_rate/max_rate */
1368 dvfs_get_rate_range(clk_dvfs_node);
1369 clk_dvfs_node->freq_limit_en = 1;
1371 if ((min_rate >= clk_dvfs_node->min_rate) && (min_rate <= clk_dvfs_node->max_rate)) {
1372 clk_dvfs_node->min_rate = min_rate;
1375 if ((max_rate >= clk_dvfs_node->min_rate) && (max_rate <= clk_dvfs_node->max_rate)) {
1376 clk_dvfs_node->max_rate = max_rate;
1379 if (clk_dvfs_node->last_set_rate == 0)
1380 rate = __clk_get_rate(clk_dvfs_node->clk);
1382 rate = clk_dvfs_node->last_set_rate;
1383 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
1385 mutex_unlock(&clk_dvfs_node->vd->mutex);
1389 DVFS_DBG("%s:clk(%s) last_set_rate=%lu; [min_rate, max_rate]=[%u, %u]\n",
1390 __func__, __clk_get_name(clk_dvfs_node->clk),
1391 clk_dvfs_node->last_set_rate,
1392 clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
1396 EXPORT_SYMBOL(dvfs_clk_enable_limit);
1398 int dvfs_clk_disable_limit(struct dvfs_node *clk_dvfs_node)
1405 if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
1406 mutex_lock(&clk_dvfs_node->vd->mutex);
1408 /* To reset clk_dvfs_node->min_rate/max_rate */
1409 dvfs_get_rate_range(clk_dvfs_node);
1410 clk_dvfs_node->freq_limit_en = 0;
1411 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
1413 mutex_unlock(&clk_dvfs_node->vd->mutex);
1416 DVFS_DBG("%s: clk(%s) last_set_rate=%lu; [min_rate, max_rate]=[%u, %u]\n",
1417 __func__, __clk_get_name(clk_dvfs_node->clk),
1418 clk_dvfs_node->last_set_rate,
1419 clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
1423 EXPORT_SYMBOL(dvfs_clk_disable_limit);
1425 void dvfs_disable_temp_limit(void) {
1426 if (clk_cpu_b_dvfs_node)
1427 clk_cpu_b_dvfs_node->temp_limit_enable = 0;
1428 if (clk_cpu_l_dvfs_node)
1429 clk_cpu_l_dvfs_node->temp_limit_enable = 0;
1430 if (clk_cpu_dvfs_node)
1431 clk_cpu_dvfs_node->temp_limit_enable = 0;
1432 if (clk_gpu_dvfs_node)
1433 clk_gpu_dvfs_node->temp_limit_enable = 0;
1434 cancel_delayed_work_sync(&dvfs_temp_limit_work);
1437 int dvfs_clk_get_limit(struct dvfs_node *clk_dvfs_node, unsigned int *min_rate, unsigned int *max_rate)
1444 mutex_lock(&clk_dvfs_node->vd->mutex);
1446 *min_rate = clk_dvfs_node->min_rate;
1447 *max_rate = clk_dvfs_node->max_rate;
1448 freq_limit_en = clk_dvfs_node->freq_limit_en;
1450 mutex_unlock(&clk_dvfs_node->vd->mutex);
1452 return freq_limit_en;
1454 EXPORT_SYMBOL(dvfs_clk_get_limit);
1456 int dvfs_clk_register_set_rate_callback(struct dvfs_node *clk_dvfs_node, clk_set_rate_callback clk_dvfs_target)
1461 mutex_lock(&clk_dvfs_node->vd->mutex);
1462 clk_dvfs_node->clk_dvfs_target = clk_dvfs_target;
1463 mutex_unlock(&clk_dvfs_node->vd->mutex);
1467 EXPORT_SYMBOL(dvfs_clk_register_set_rate_callback);
1469 struct cpufreq_frequency_table *dvfs_get_freq_volt_table(struct dvfs_node *clk_dvfs_node)
1471 struct cpufreq_frequency_table *table;
1476 mutex_lock(&clk_dvfs_node->vd->mutex);
1477 table = clk_dvfs_node->dvfs_table;
1478 mutex_unlock(&clk_dvfs_node->vd->mutex);
1482 EXPORT_SYMBOL(dvfs_get_freq_volt_table);
1484 int dvfs_set_freq_volt_table(struct dvfs_node *clk_dvfs_node, struct cpufreq_frequency_table *table)
1489 if (IS_ERR_OR_NULL(table)){
1490 DVFS_ERR("%s:invalid table!\n", __func__);
1494 mutex_lock(&clk_dvfs_node->vd->mutex);
1495 clk_dvfs_node->dvfs_table = table;
1496 dvfs_get_rate_range(clk_dvfs_node);
1497 dvfs_table_round_clk_rate(clk_dvfs_node);
1498 dvfs_table_round_volt(clk_dvfs_node);
1499 mutex_unlock(&clk_dvfs_node->vd->mutex);
1503 EXPORT_SYMBOL(dvfs_set_freq_volt_table);
1505 static int get_adjust_volt_by_leakage(struct dvfs_node *dvfs_node)
1508 int delta_leakage = 0;
1510 int adjust_volt = 0;
1515 if (dvfs_node->lkg_info.def_table_lkg == -1)
1518 leakage = rockchip_get_leakage(dvfs_node->channel);
1519 if (!leakage || (leakage == 0xff))
1522 delta_leakage = leakage - dvfs_node->lkg_info.def_table_lkg;
1523 if (delta_leakage <= 0) {
1524 for (i = 0; (dvfs_node->lkg_info.table[i].dlt_volt !=
1525 CPUFREQ_TABLE_END); i++) {
1526 if (leakage > dvfs_node->lkg_info.table[i].lkg) {
1528 dvfs_node->lkg_info.table[i].dlt_volt;
1533 } else if (delta_leakage > 0) {
1534 for (i = 0; (dvfs_node->lkg_info.table[i].dlt_volt !=
1535 CPUFREQ_TABLE_END); i++) {
1536 if (leakage <= dvfs_node->lkg_info.table[i].lkg) {
1538 -dvfs_node->lkg_info.table[i].dlt_volt;
1546 static void adjust_table_by_leakage(struct dvfs_node *dvfs_node)
1548 int i, adjust_volt = get_adjust_volt_by_leakage(dvfs_node);
1553 if (!dvfs_node->dvfs_table)
1556 if (dvfs_node->lkg_info.min_adjust_freq == -1)
1560 (dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
1561 if (dvfs_node->dvfs_table[i].frequency >=
1562 dvfs_node->lkg_info.min_adjust_freq)
1563 dvfs_node->dvfs_table[i].index += adjust_volt;
1567 int clk_enable_dvfs(struct dvfs_node *clk_dvfs_node)
1569 struct cpufreq_frequency_table clk_fv;
1577 DVFS_DBG("%s: dvfs clk(%s) enable dvfs!\n",
1578 __func__, __clk_get_name(clk_dvfs_node->clk));
1580 if (!clk_dvfs_node->vd) {
1581 DVFS_ERR("%s: dvfs node(%s) has no vd node!\n",
1582 __func__, clk_dvfs_node->name);
1585 mutex_lock(&clk_dvfs_node->vd->mutex);
1586 if (clk_dvfs_node->enable_count == 0) {
1587 if (IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator)) {
1588 if (clk_dvfs_node->vd->regulator_name)
1589 clk_dvfs_node->vd->regulator = dvfs_regulator_get(NULL, clk_dvfs_node->vd->regulator_name);
1590 if (!IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator)) {
1591 DVFS_DBG("%s: vd(%s) get regulator(%s) ok\n",
1592 __func__, clk_dvfs_node->vd->name, clk_dvfs_node->vd->regulator_name);
1593 clk_enable_dvfs_regulator_check(clk_dvfs_node->vd);
1594 dvfs_get_vd_regulator_volt_list(clk_dvfs_node->vd);
1595 dvfs_vd_get_regulator_volt_time_info(clk_dvfs_node->vd);
1597 clk_dvfs_node->vd->regulator = NULL;
1598 clk_dvfs_node->enable_count = 0;
1599 DVFS_ERR("%s: vd(%s) can't get regulator(%s)!\n",
1600 __func__, clk_dvfs_node->vd->name, clk_dvfs_node->vd->regulator_name);
1601 mutex_unlock(&clk_dvfs_node->vd->mutex);
1605 clk_enable_dvfs_regulator_check(clk_dvfs_node->vd);
1608 DVFS_DBG("%s: vd(%s) cur volt=%d\n",
1609 __func__, clk_dvfs_node->name, clk_dvfs_node->vd->cur_volt);
1611 dvfs_table_round_clk_rate(clk_dvfs_node);
1612 dvfs_get_rate_range(clk_dvfs_node);
1613 clk_dvfs_node->freq_limit_en = 1;
1614 clk_dvfs_node->max_limit_freq = clk_dvfs_node->max_rate;
1615 if (clk_dvfs_node->lkg_adjust_volt_en)
1616 adjust_table_by_leakage(clk_dvfs_node);
1617 if (clk_dvfs_node->support_pvtm)
1618 pvtm_set_dvfs_table(clk_dvfs_node);
1619 dvfs_table_round_volt(clk_dvfs_node);
1620 clk_dvfs_node->set_freq = clk_dvfs_node_get_rate_kz(clk_dvfs_node->clk);
1621 clk_dvfs_node->last_set_rate = clk_dvfs_node->set_freq*1000;
1623 DVFS_DBG("%s: %s get freq %u!\n",
1624 __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1626 if (clk_dvfs_node_get_ref_volt(clk_dvfs_node, clk_dvfs_node->set_freq, &clk_fv)) {
1627 if (clk_dvfs_node->dvfs_table[0].frequency == CPUFREQ_TABLE_END) {
1628 DVFS_ERR("%s: table empty\n", __func__);
1629 clk_dvfs_node->enable_count = 0;
1630 mutex_unlock(&clk_dvfs_node->vd->mutex);
1633 DVFS_WARNING("%s: clk(%s) freq table all value are smaller than default(%d), use default, just enable dvfs\n",
1634 __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1635 clk_dvfs_node->enable_count++;
1636 mutex_unlock(&clk_dvfs_node->vd->mutex);
1640 clk_dvfs_node->enable_count++;
1641 clk_dvfs_node->set_volt = clk_fv.index;
1642 volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1643 DVFS_DBG("%s: %s, freq %u(ref vol %u)\n",
1644 __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt);
1646 if (clk_dvfs_node->dvfs_nb) {
1647 // must unregister when clk disable
1648 clk_notifier_register(clk, clk_dvfs_node->dvfs_nb);
1651 if(clk_dvfs_node->vd->cur_volt != volt_new) {
1652 ret = dvfs_regulator_set_voltage_readback(clk_dvfs_node->vd->regulator, volt_new, volt_new);
1653 dvfs_volt_up_delay(clk_dvfs_node->vd,volt_new, clk_dvfs_node->vd->cur_volt);
1655 clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
1656 clk_dvfs_node->enable_count = 0;
1657 DVFS_ERR("dvfs enable clk %s,set volt error \n", clk_dvfs_node->name);
1658 mutex_unlock(&clk_dvfs_node->vd->mutex);
1661 clk_dvfs_node->vd->cur_volt = volt_new;
1662 clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
1666 DVFS_DBG("%s: dvfs already enable clk enable = %d!\n",
1667 __func__, clk_dvfs_node->enable_count);
1668 clk_dvfs_node->enable_count++;
1671 if (clk_dvfs_node->regu_mode_en) {
1672 ret = dvfs_regu_mode_table_constrain(clk_dvfs_node);
1674 DVFS_ERR("%s: clk(%s) regu_mode_table is unvalid, set regu_mode_en=0!\n",
1675 __func__, clk_dvfs_node->name);
1676 clk_dvfs_node->regu_mode_en = 0;
1677 mutex_unlock(&clk_dvfs_node->vd->mutex);
1681 ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, clk_dvfs_node->set_freq*1000, &mode);
1683 DVFS_ERR("%s: clk(%s) rate %dKhz get regu_mode fail\n",
1684 __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1685 mutex_unlock(&clk_dvfs_node->vd->mutex);
1688 clk_dvfs_node->regu_mode = mode;
1690 dvfs_update_clk_pds_mode(clk_dvfs_node);
1693 mutex_unlock(&clk_dvfs_node->vd->mutex);
1697 EXPORT_SYMBOL(clk_enable_dvfs);
1699 int clk_disable_dvfs(struct dvfs_node *clk_dvfs_node)
1706 DVFS_DBG("%s:dvfs clk(%s) disable dvfs!\n",
1707 __func__, __clk_get_name(clk_dvfs_node->clk));
1709 mutex_lock(&clk_dvfs_node->vd->mutex);
1710 if (!clk_dvfs_node->enable_count) {
1711 DVFS_WARNING("%s:clk(%s) is already closed!\n",
1712 __func__, __clk_get_name(clk_dvfs_node->clk));
1713 mutex_unlock(&clk_dvfs_node->vd->mutex);
1716 clk_dvfs_node->enable_count--;
1717 if (0 == clk_dvfs_node->enable_count) {
1718 DVFS_DBG("%s:dvfs clk(%s) disable dvfs ok!\n",
1719 __func__, __clk_get_name(clk_dvfs_node->clk));
1720 volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1721 dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1724 clk_notifier_unregister(clk, clk_dvfs_node->dvfs_nb);
1725 DVFS_DBG("clk unregister nb!\n");
1729 mutex_unlock(&clk_dvfs_node->vd->mutex);
1732 EXPORT_SYMBOL(clk_disable_dvfs);
1734 static unsigned long dvfs_get_limit_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1736 unsigned long limit_rate;
1739 if (clk_dvfs_node->freq_limit_en) {
1741 if (rate < clk_dvfs_node->min_rate) {
1742 limit_rate = clk_dvfs_node->min_rate;
1743 } else if (rate > clk_dvfs_node->max_rate) {
1744 limit_rate = clk_dvfs_node->max_rate;
1746 if (clk_dvfs_node->temp_limit_enable) {
1747 if (limit_rate > clk_dvfs_node->temp_limit_rate) {
1748 limit_rate = clk_dvfs_node->temp_limit_rate;
1751 if (limit_rate > clk_dvfs_node->max_limit_freq)
1752 limit_rate = clk_dvfs_node->max_limit_freq;
1755 DVFS_DBG("%s: rate:%ld, limit_rate:%ld,\n", __func__, rate, limit_rate);
1760 static int dvfs_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1762 struct cpufreq_frequency_table clk_fv;
1763 unsigned long old_rate = 0, new_rate = 0, volt_new = 0, clk_volt_store = 0;
1764 struct clk *clk = clk_dvfs_node->clk;
1770 if (!clk_dvfs_node->enable_count) {
1771 DVFS_ERR("%s: %s is disable, set rate error\n",
1772 __func__, clk_dvfs_node->name);
1776 if (clk_dvfs_node->vd->volt_set_flag == DVFS_SET_VOLT_FAILURE) {
1777 /* It means the last time set voltage error */
1778 ret = dvfs_reset_volt(clk_dvfs_node->vd);
1784 rate = dvfs_get_limit_rate(clk_dvfs_node, rate);
1785 new_rate = __clk_round_rate(clk, rate);
1786 old_rate = __clk_get_rate(clk);
1787 if (new_rate == old_rate)
1790 DVFS_DBG("enter %s: clk(%s) new_rate = %lu Hz, old_rate = %lu Hz\n",
1791 __func__, clk_dvfs_node->name, rate, old_rate);
1793 /* find the clk corresponding voltage */
1794 ret = clk_dvfs_node_get_ref_volt(clk_dvfs_node, new_rate / 1000, &clk_fv);
1796 DVFS_ERR("%s:dvfs clk(%s) rate %luhz is not support\n",
1797 __func__, clk_dvfs_node->name, new_rate);
1800 clk_volt_store = clk_dvfs_node->set_volt;
1801 clk_dvfs_node->set_volt = clk_fv.index;
1802 volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1803 DVFS_DBG("%s:%s new rate=%lu(was=%lu),new volt=%lu,(was=%d)\n",
1804 __func__, clk_dvfs_node->name, new_rate, old_rate, volt_new,clk_dvfs_node->vd->cur_volt);
1807 /* if up the rate */
1808 if (new_rate > old_rate) {
1809 ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
1811 DVFS_ERR("%s: dvfs clk(%s) rate %luhz set mode err\n",
1812 __func__, clk_dvfs_node->name, new_rate);
1814 ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1816 goto fail_roll_back;
1820 if (clk_dvfs_node->clk_dvfs_target) {
1821 ret = clk_dvfs_node->clk_dvfs_target(clk, rate);
1823 ret = clk_set_rate(clk, rate);
1827 DVFS_ERR("%s:clk(%s) set rate err\n",
1828 __func__, __clk_get_name(clk));
1829 goto fail_roll_back;
1831 clk_dvfs_node->set_freq = new_rate / 1000;
1833 DVFS_DBG("%s:dvfs clk(%s) set rate %lu ok\n",
1834 __func__, clk_dvfs_node->name, __clk_get_rate(clk));
1836 /* if down the rate */
1837 if (new_rate < old_rate) {
1838 ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1842 ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
1844 DVFS_ERR("%s:dvfs clk(%s) rate %luhz set mode err\n",
1845 __func__, clk_dvfs_node->name, new_rate);
1850 clk_dvfs_node->set_volt = clk_volt_store;
1855 unsigned long dvfs_clk_round_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1857 return __clk_round_rate(clk_dvfs_node->clk, rate);
1859 EXPORT_SYMBOL_GPL(dvfs_clk_round_rate);
1861 unsigned long dvfs_clk_get_rate(struct dvfs_node *clk_dvfs_node)
1863 return __clk_get_rate(clk_dvfs_node->clk);
1865 EXPORT_SYMBOL_GPL(dvfs_clk_get_rate);
1867 unsigned long dvfs_clk_get_last_set_rate(struct dvfs_node *clk_dvfs_node)
1869 unsigned long last_set_rate;
1871 mutex_lock(&clk_dvfs_node->vd->mutex);
1872 last_set_rate = clk_dvfs_node->last_set_rate;
1873 mutex_unlock(&clk_dvfs_node->vd->mutex);
1875 return last_set_rate;
1877 EXPORT_SYMBOL_GPL(dvfs_clk_get_last_set_rate);
1880 int dvfs_clk_enable(struct dvfs_node *clk_dvfs_node)
1882 return clk_enable(clk_dvfs_node->clk);
1884 EXPORT_SYMBOL_GPL(dvfs_clk_enable);
1886 void dvfs_clk_disable(struct dvfs_node *clk_dvfs_node)
1888 return clk_disable(clk_dvfs_node->clk);
1890 EXPORT_SYMBOL_GPL(dvfs_clk_disable);
1892 struct dvfs_node *clk_get_dvfs_node(char *clk_name)
1896 struct dvfs_node *clk_dvfs_node;
1898 mutex_lock(&rk_dvfs_mutex);
1899 list_for_each_entry(vd, &rk_dvfs_tree, node) {
1900 mutex_lock(&vd->mutex);
1901 list_for_each_entry(pd, &vd->pd_list, node) {
1902 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
1903 if (0 == strcmp(clk_dvfs_node->name, clk_name)) {
1904 mutex_unlock(&vd->mutex);
1905 mutex_unlock(&rk_dvfs_mutex);
1906 return clk_dvfs_node;
1910 mutex_unlock(&vd->mutex);
1912 mutex_unlock(&rk_dvfs_mutex);
1916 EXPORT_SYMBOL_GPL(clk_get_dvfs_node);
1918 void clk_put_dvfs_node(struct dvfs_node *clk_dvfs_node)
1922 EXPORT_SYMBOL_GPL(clk_put_dvfs_node);
1924 int dvfs_clk_prepare_enable(struct dvfs_node *clk_dvfs_node)
1926 return clk_prepare_enable(clk_dvfs_node->clk);
1928 EXPORT_SYMBOL_GPL(dvfs_clk_prepare_enable);
1931 void dvfs_clk_disable_unprepare(struct dvfs_node *clk_dvfs_node)
1933 clk_disable_unprepare(clk_dvfs_node->clk);
1935 EXPORT_SYMBOL_GPL(dvfs_clk_disable_unprepare);
1937 int dvfs_clk_set_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1944 DVFS_DBG("%s:dvfs node(%s) set rate(%lu)\n",
1945 __func__, clk_dvfs_node->name, rate);
1947 #if 0 // judge by reference func in rk
1948 if (dvfs_support_clk_set_rate(dvfs_info)==false) {
1949 DVFS_ERR("dvfs func:%s is not support!\n", __func__);
1954 if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target) {
1955 mutex_lock(&clk_dvfs_node->vd->mutex);
1956 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
1957 clk_dvfs_node->last_set_rate = rate;
1958 mutex_unlock(&clk_dvfs_node->vd->mutex);
1960 DVFS_ERR("%s:dvfs node(%s) has no vd node or target callback!\n",
1961 __func__, clk_dvfs_node->name);
1966 EXPORT_SYMBOL_GPL(dvfs_clk_set_rate);
1969 int rk_regist_vd(struct vd_node *vd)
1975 vd->volt_time_flag=0;
1977 INIT_LIST_HEAD(&vd->pd_list);
1978 mutex_lock(&rk_dvfs_mutex);
1979 list_add(&vd->node, &rk_dvfs_tree);
1980 mutex_unlock(&rk_dvfs_mutex);
1984 EXPORT_SYMBOL_GPL(rk_regist_vd);
1986 int rk_regist_pd(struct pd_node *pd)
1997 INIT_LIST_HEAD(&pd->clk_list);
1998 mutex_lock(&vd->mutex);
1999 list_add(&pd->node, &vd->pd_list);
2000 mutex_unlock(&vd->mutex);
2004 EXPORT_SYMBOL_GPL(rk_regist_pd);
2006 int rk_regist_clk(struct dvfs_node *clk_dvfs_node)
2014 vd = clk_dvfs_node->vd;
2015 pd = clk_dvfs_node->pd;
2019 mutex_lock(&vd->mutex);
2020 list_add(&clk_dvfs_node->node, &pd->clk_list);
2021 mutex_unlock(&vd->mutex);
2025 EXPORT_SYMBOL_GPL(rk_regist_clk);
2027 static struct cpufreq_frequency_table *of_get_temp_limit_table(struct device_node *dev_node, const char *propname)
2029 struct cpufreq_frequency_table *temp_limt_table = NULL;
2030 const struct property *prop;
2034 prop = of_find_property(dev_node, propname, NULL);
2040 nr = prop->length / sizeof(u32);
2042 pr_err("%s: Invalid freq list\n", __func__);
2046 temp_limt_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
2047 (nr/2 + 1), GFP_KERNEL);
2051 for (i=0; i<nr/2; i++){
2052 temp_limt_table[i].index = be32_to_cpup(val++);
2053 temp_limt_table[i].frequency = be32_to_cpup(val++) * 1000;
2056 temp_limt_table[i].index = 0;
2057 temp_limt_table[i].frequency = CPUFREQ_TABLE_END;
2059 return temp_limt_table;
2063 static int of_get_dvfs_table(struct device_node *dev_node,
2064 struct cpufreq_frequency_table **dvfs_table)
2066 struct cpufreq_frequency_table *tmp_dvfs_table = NULL;
2067 const struct property *prop;
2071 prop = of_find_property(dev_node, "operating-points", NULL);
2077 nr = prop->length / sizeof(u32);
2079 pr_err("%s: Invalid freq list\n", __func__);
2083 tmp_dvfs_table = kzalloc(sizeof(*tmp_dvfs_table) *
2084 (nr/2 + 1), GFP_KERNEL);
2087 for (i = 0; i < nr/2; i++) {
2088 tmp_dvfs_table[i].frequency = be32_to_cpup(val++);
2089 tmp_dvfs_table[i].index = be32_to_cpup(val++);
2092 tmp_dvfs_table[i].index = 0;
2093 tmp_dvfs_table[i].frequency = CPUFREQ_TABLE_END;
2095 *dvfs_table = tmp_dvfs_table;
2101 static int of_get_dvfs_pvtm_table(struct device_node *dev_node,
2102 struct cpufreq_frequency_table **dvfs_table,
2103 struct cpufreq_frequency_table **pvtm_table)
2105 struct cpufreq_frequency_table *tmp_dvfs_table = NULL;
2106 struct cpufreq_frequency_table *tmp_pvtm_table = NULL;
2107 const struct property *prop;
2111 prop = of_find_property(dev_node, "pvtm-operating-points", NULL);
2117 nr = prop->length / sizeof(u32);
2119 pr_err("%s: Invalid freq list\n", __func__);
2123 tmp_dvfs_table = kzalloc(sizeof(*tmp_dvfs_table) *
2124 (nr/3 + 1), GFP_KERNEL);
2126 tmp_pvtm_table = kzalloc(sizeof(*tmp_pvtm_table) *
2127 (nr/3 + 1), GFP_KERNEL);
2131 for (i = 0; i < nr/3; i++) {
2132 tmp_dvfs_table[i].frequency = be32_to_cpup(val++);
2133 tmp_dvfs_table[i].index = be32_to_cpup(val++);
2135 tmp_pvtm_table[i].frequency = tmp_dvfs_table[i].frequency;
2136 tmp_pvtm_table[i].index = be32_to_cpup(val++);
2139 tmp_dvfs_table[i].index = 0;
2140 tmp_dvfs_table[i].frequency = CPUFREQ_TABLE_END;
2142 tmp_pvtm_table[i].index = 0;
2143 tmp_pvtm_table[i].frequency = CPUFREQ_TABLE_END;
2145 *dvfs_table = tmp_dvfs_table;
2146 *pvtm_table = tmp_pvtm_table;
2151 static struct lkg_adjust_volt_table
2152 *of_get_lkg_adjust_volt_table(struct device_node *np,
2153 const char *propname)
2155 struct lkg_adjust_volt_table *lkg_adjust_volt_table = NULL;
2156 const struct property *prop;
2160 prop = of_find_property(np, propname, NULL);
2166 nr = prop->length / sizeof(s32);
2168 pr_err("%s: Invalid freq list\n", __func__);
2172 lkg_adjust_volt_table =
2173 kzalloc(sizeof(struct lkg_adjust_volt_table) *
2174 (nr/2 + 1), GFP_KERNEL);
2178 for (i = 0; i < nr/2; i++) {
2179 lkg_adjust_volt_table[i].lkg = be32_to_cpup(val++);
2180 lkg_adjust_volt_table[i].dlt_volt = be32_to_cpup(val++);
2183 lkg_adjust_volt_table[i].lkg = 0;
2184 lkg_adjust_volt_table[i].dlt_volt = CPUFREQ_TABLE_END;
2186 return lkg_adjust_volt_table;
2189 static int dvfs_node_parse_dt(struct device_node *np,
2190 struct dvfs_node *dvfs_node)
2192 int process_version = rockchip_process_version();
2196 of_property_read_u32_index(np, "channel", 0, &dvfs_node->channel);
2198 pr_info("channel:%d, lkg:%d\n",
2199 dvfs_node->channel, rockchip_get_leakage(dvfs_node->channel));
2201 of_property_read_u32_index(np, "regu-mode-en", 0,
2202 &dvfs_node->regu_mode_en);
2203 if (dvfs_node->regu_mode_en)
2204 dvfs_node->regu_mode_table = of_get_regu_mode_table(np);
2206 dvfs_node->regu_mode_table = NULL;
2208 of_property_read_u32_index(np, "temp-limit-enable", 0,
2209 &dvfs_node->temp_limit_enable);
2210 if (dvfs_node->temp_limit_enable) {
2211 of_property_read_u32_index(np, "min_temp_limit",
2212 0, &dvfs_node->min_temp_limit);
2213 dvfs_node->min_temp_limit *= 1000;
2214 of_property_read_u32_index(np, "target-temp",
2215 0, &dvfs_node->target_temp);
2216 pr_info("target-temp:%d\n", dvfs_node->target_temp);
2217 dvfs_node->nor_temp_limit_table =
2218 of_get_temp_limit_table(np,
2219 "normal-temp-limit");
2220 dvfs_node->per_temp_limit_table =
2221 of_get_temp_limit_table(np,
2222 "performance-temp-limit");
2223 dvfs_node->virt_temp_limit_table[0] =
2224 of_get_temp_limit_table(np,
2225 "virt-temp-limit-1-cpu-busy");
2226 dvfs_node->virt_temp_limit_table[1] =
2227 of_get_temp_limit_table(np,
2228 "virt-temp-limit-2-cpu-busy");
2229 dvfs_node->virt_temp_limit_table[2] =
2230 of_get_temp_limit_table(np,
2231 "virt-temp-limit-3-cpu-busy");
2232 dvfs_node->virt_temp_limit_table[3] =
2233 of_get_temp_limit_table(np,
2234 "virt-temp-limit-4-cpu-busy");
2236 dvfs_node->temp_limit_rate = -1;
2238 dvfs_node->cluster = 0;
2239 of_property_read_u32_index(np, "cluster", 0, &dvfs_node->cluster);
2241 dvfs_node->pvtm_min_temp = 0;
2242 of_property_read_u32_index(np, "pvtm_min_temp", 0,
2243 &dvfs_node->pvtm_min_temp);
2245 ret = of_property_read_u32_index(np, "support-pvtm", 0,
2246 &dvfs_node->support_pvtm);
2248 if (of_get_dvfs_pvtm_table(np, &dvfs_node->dvfs_table,
2249 &dvfs_node->pvtm_table))
2252 for (i = 0; i < ARRAY_SIZE(pvtm_info_table); i++) {
2253 struct pvtm_info *pvtm_info = pvtm_info_table[i];
2255 if ((pvtm_info->channel == dvfs_node->channel) &&
2256 (pvtm_info->process_version == process_version) &&
2257 (pvtm_info->cluster == dvfs_node->cluster) &&
2258 of_machine_is_compatible(pvtm_info->compatible)) {
2259 dvfs_node->pvtm_info = pvtm_info;
2264 if (!dvfs_node->pvtm_info)
2265 dvfs_node->support_pvtm = 0;
2267 if (of_get_dvfs_table(np, &dvfs_node->dvfs_table))
2271 of_property_read_u32_index(np, "lkg_adjust_volt_en", 0,
2272 &dvfs_node->lkg_adjust_volt_en);
2273 if (dvfs_node->lkg_adjust_volt_en) {
2274 dvfs_node->lkg_info.def_table_lkg = -1;
2275 of_property_read_u32_index(np, "def_table_lkg", 0,
2276 &dvfs_node->lkg_info.def_table_lkg);
2278 dvfs_node->lkg_info.min_adjust_freq = -1;
2279 of_property_read_u32_index(np, "min_adjust_freq", 0,
2280 &dvfs_node->lkg_info.min_adjust_freq
2283 dvfs_node->lkg_info.table =
2284 of_get_lkg_adjust_volt_table(np,
2285 "lkg_adjust_volt_table");
2291 int of_dvfs_init(void)
2295 struct device_node *dvfs_dev_node, *clk_dev_node, *vd_dev_node, *pd_dev_node;
2296 struct dvfs_node *dvfs_node;
2300 DVFS_DBG("%s\n", __func__);
2301 pr_info("process version: %d\n", rockchip_process_version());
2303 dvfs_dev_node = of_find_node_by_name(NULL, "dvfs");
2304 if (IS_ERR_OR_NULL(dvfs_dev_node)) {
2305 DVFS_ERR("%s get dvfs dev node err\n", __func__);
2306 return PTR_ERR(dvfs_dev_node);
2309 for_each_available_child_of_node(dvfs_dev_node, vd_dev_node) {
2310 vd = kzalloc(sizeof(struct vd_node), GFP_KERNEL);
2314 mutex_init(&vd->mutex);
2315 vd->name = vd_dev_node->name;
2316 ret = of_property_read_string(vd_dev_node, "regulator_name", &vd->regulator_name);
2318 DVFS_ERR("%s:vd(%s) get regulator_name err, ret:%d\n",
2319 __func__, vd_dev_node->name, ret);
2324 vd->suspend_volt = 0;
2326 vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
2327 vd->vd_dvfs_target = dvfs_target;
2328 ret = rk_regist_vd(vd);
2330 DVFS_ERR("%s:vd(%s) register err:%d\n", __func__, vd->name, ret);
2335 DVFS_DBG("%s:vd(%s) register ok, regulator name:%s,suspend volt:%d\n",
2336 __func__, vd->name, vd->regulator_name, vd->suspend_volt);
2338 for_each_available_child_of_node(vd_dev_node, pd_dev_node) {
2339 pd = kzalloc(sizeof(struct pd_node), GFP_KERNEL);
2344 pd->name = pd_dev_node->name;
2346 ret = rk_regist_pd(pd);
2348 DVFS_ERR("%s:pd(%s) register err:%d\n", __func__, pd->name, ret);
2352 DVFS_DBG("%s:pd(%s) register ok, parent vd:%s\n",
2353 __func__, pd->name, vd->name);
2354 for_each_available_child_of_node(pd_dev_node, clk_dev_node) {
2355 if (!of_device_is_available(clk_dev_node))
2358 dvfs_node = kzalloc(sizeof(struct dvfs_node), GFP_KERNEL);
2362 dvfs_node->name = clk_dev_node->name;
2366 if (dvfs_node_parse_dt(clk_dev_node, dvfs_node))
2369 clk = clk_get(NULL, clk_dev_node->name);
2371 DVFS_ERR("%s:get clk(%s) err:%ld\n", __func__, dvfs_node->name, PTR_ERR(clk));
2377 dvfs_node->clk = clk;
2378 ret = rk_regist_clk(dvfs_node);
2380 DVFS_ERR("%s:dvfs_node(%s) register err:%d\n", __func__, dvfs_node->name, ret);
2384 DVFS_DBG("%s:dvfs_node(%s) register ok, parent pd:%s\n",
2385 __func__, clk_dev_node->name, pd->name);
2394 arch_initcall_sync(of_dvfs_init);
2397 /*********************************************************************************/
2399 * dump_dbg_map() : Draw all informations of dvfs while debug
2401 static int dump_dbg_map(char *buf)
2406 struct dvfs_node *clk_dvfs_node;
2409 mutex_lock(&rk_dvfs_mutex);
2410 printk( "-------------DVFS TREE-----------\n\n\n");
2411 printk( "DVFS TREE:\n");
2413 list_for_each_entry(vd, &rk_dvfs_tree, node) {
2414 mutex_lock(&vd->mutex);
2415 printk( "|\n|- voltage domain:%s\n", vd->name);
2416 printk( "|- current voltage:%d\n", vd->cur_volt);
2417 printk( "|- current regu_mode:%s\n", dvfs_regu_mode_to_string(vd->regu_mode));
2419 list_for_each_entry(pd, &vd->pd_list, node) {
2420 printk( "| |\n| |- power domain:%s, status = %s, current volt = %d, current regu_mode = %s\n",
2421 pd->name, (pd->pd_status == 1) ? "ON" : "OFF", pd->cur_volt,
2422 dvfs_regu_mode_to_string(pd->regu_mode));
2424 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
2425 printk( "| | |\n| | |- clock: %s current: rate %d, volt = %d,"
2426 " enable_dvfs = %s\n",
2427 clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt,
2428 clk_dvfs_node->enable_count == 0 ? "DISABLE" : "ENABLE");
2429 printk( "| | |- clk limit(%s):[%u, %u]; last set rate = %lu\n",
2430 clk_dvfs_node->freq_limit_en ? "enable" : "disable",
2431 clk_dvfs_node->min_rate, clk_dvfs_node->max_rate,
2432 clk_dvfs_node->last_set_rate/1000);
2433 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
2434 printk( "| | | |- freq = %d, volt = %d\n",
2435 clk_dvfs_node->dvfs_table[i].frequency,
2436 clk_dvfs_node->dvfs_table[i].index);
2439 printk( "| | |- clock: %s current: rate %d, regu_mode = %s,"
2440 " regu_mode_en = %d\n",
2441 clk_dvfs_node->name, clk_dvfs_node->set_freq,
2442 dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode),
2443 clk_dvfs_node->regu_mode_en);
2444 if (clk_dvfs_node->regu_mode_table) {
2445 for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
2446 printk( "| | | |- freq = %d, regu_mode = %s\n",
2447 clk_dvfs_node->regu_mode_table[i].frequency/1000,
2448 dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode_table[i].index));
2453 mutex_unlock(&vd->mutex);
2456 printk( "-------------DVFS TREE END------------\n");
2457 mutex_unlock(&rk_dvfs_mutex);
2462 /*********************************************************************************/
2463 static struct kobject *dvfs_kobj;
2464 struct dvfs_attribute {
2465 struct attribute attr;
2466 ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
2468 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
2469 const char *buf, size_t n);
2472 static ssize_t dvfs_tree_store(struct kobject *kobj, struct kobj_attribute *attr,
2473 const char *buf, size_t n)
2477 static ssize_t dvfs_tree_show(struct kobject *kobj, struct kobj_attribute *attr,
2480 return dump_dbg_map(buf);
2483 static ssize_t cpu_temp_target_store(struct kobject *kobj,
2484 struct kobj_attribute *attr,
2485 const char *buf, size_t n)
2489 mutex_lock(&temp_limit_mutex);
2490 if (clk_cpu_b_dvfs_node) {
2491 ret = kstrtouint(buf, 0, &clk_cpu_b_dvfs_node->target_temp);
2495 if (clk_cpu_l_dvfs_node) {
2496 ret = kstrtouint(buf, 0, &clk_cpu_l_dvfs_node->target_temp);
2500 if (clk_cpu_dvfs_node) {
2501 ret = kstrtouint(buf, 0, &clk_cpu_dvfs_node->target_temp);
2506 mutex_unlock(&temp_limit_mutex);
2509 static ssize_t cpu_temp_target_show(struct kobject *kobj,
2510 struct kobj_attribute *attr, char *buf)
2514 if (clk_cpu_b_dvfs_node)
2515 ret += sprintf(buf + ret, "cpu_b:%d\n",
2516 clk_cpu_b_dvfs_node->target_temp);
2517 if (clk_cpu_l_dvfs_node)
2518 ret += sprintf(buf + ret, "cpu_l:%d\n",
2519 clk_cpu_l_dvfs_node->target_temp);
2520 if (clk_cpu_dvfs_node)
2521 ret += sprintf(buf + ret, "cpu:%d\n",
2522 clk_cpu_dvfs_node->target_temp);
2527 static ssize_t cpu_temp_enable_store(struct kobject *kobj,
2528 struct kobj_attribute *attr,
2529 const char *buf, size_t n)
2533 mutex_lock(&temp_limit_mutex);
2534 if (clk_cpu_b_dvfs_node) {
2535 ret = kstrtouint(buf, 0,
2536 &clk_cpu_b_dvfs_node->temp_limit_enable);
2539 clk_cpu_b_dvfs_node->temp_limit_rate =
2540 clk_cpu_b_dvfs_node->max_rate;
2542 if (clk_cpu_l_dvfs_node) {
2543 ret = kstrtouint(buf, 0,
2544 &clk_cpu_l_dvfs_node->temp_limit_enable);
2547 clk_cpu_l_dvfs_node->temp_limit_rate =
2548 clk_cpu_l_dvfs_node->max_rate;
2550 if (clk_cpu_dvfs_node) {
2551 ret = kstrtouint(buf, 0, &clk_cpu_dvfs_node->temp_limit_enable);
2554 clk_cpu_dvfs_node->temp_limit_rate =
2555 clk_cpu_dvfs_node->max_rate;
2558 mutex_unlock(&temp_limit_mutex);
2561 static ssize_t cpu_temp_enable_show(struct kobject *kobj,
2562 struct kobj_attribute *attr, char *buf)
2566 if (clk_cpu_b_dvfs_node)
2567 ret += sprintf(buf + ret, "cpu_b:%d\n",
2568 clk_cpu_b_dvfs_node->temp_limit_enable);
2569 if (clk_cpu_l_dvfs_node)
2570 ret += sprintf(buf + ret, "cpu_l:%d\n",
2571 clk_cpu_l_dvfs_node->temp_limit_enable);
2572 if (clk_cpu_dvfs_node)
2573 ret += sprintf(buf + ret, "cpu:%d\n",
2574 clk_cpu_dvfs_node->temp_limit_enable);
2579 static struct dvfs_attribute dvfs_attrs[] = {
2580 /* node_name permision show_func store_func */
2581 //#ifdef CONFIG_RK_CLOCK_PROC
2582 __ATTR(dvfs_tree, S_IRUSR | S_IRGRP | S_IWUSR,
2583 dvfs_tree_show, dvfs_tree_store),
2584 __ATTR(cpu_temp_target, S_IRUSR | S_IRGRP | S_IWUSR,
2585 cpu_temp_target_show, cpu_temp_target_store),
2586 __ATTR(cpu_temp_enable, S_IRUSR | S_IRGRP | S_IWUSR,
2587 cpu_temp_enable_show, cpu_temp_enable_store),
2591 static int __init dvfs_init(void)
2595 dvfs_kobj = kobject_create_and_add("dvfs", NULL);
2598 for (i = 0; i < ARRAY_SIZE(dvfs_attrs); i++) {
2599 ret = sysfs_create_file(dvfs_kobj, &dvfs_attrs[i].attr);
2601 DVFS_ERR("create index %d error\n", i);
2606 clk_cpu_b_dvfs_node = clk_get_dvfs_node("clk_core_b");
2607 if (clk_cpu_b_dvfs_node) {
2608 clk_cpu_b_dvfs_node->temp_limit_rate =
2609 clk_cpu_b_dvfs_node->max_rate;
2610 if (clk_cpu_bl_dvfs_node == NULL)
2611 clk_cpu_bl_dvfs_node = clk_cpu_b_dvfs_node;
2614 clk_cpu_l_dvfs_node = clk_get_dvfs_node("clk_core_l");
2615 if (clk_cpu_l_dvfs_node) {
2616 clk_cpu_l_dvfs_node->temp_limit_rate =
2617 clk_cpu_l_dvfs_node->max_rate;
2618 if (clk_cpu_bl_dvfs_node == NULL)
2619 clk_cpu_bl_dvfs_node = clk_cpu_l_dvfs_node;
2622 clk_cpu_dvfs_node = clk_get_dvfs_node("clk_core");
2623 if (clk_cpu_dvfs_node)
2624 clk_cpu_dvfs_node->temp_limit_rate =
2625 clk_cpu_dvfs_node->max_rate;
2627 clk_gpu_dvfs_node = clk_get_dvfs_node("clk_gpu");
2628 if (clk_gpu_dvfs_node)
2629 clk_gpu_dvfs_node->temp_limit_rate =
2630 clk_gpu_dvfs_node->max_rate;
2632 if ((clk_cpu_b_dvfs_node && clk_cpu_b_dvfs_node->temp_limit_enable) ||
2633 (clk_cpu_l_dvfs_node && clk_cpu_l_dvfs_node->temp_limit_enable) ||
2634 (clk_gpu_dvfs_node && clk_gpu_dvfs_node->temp_limit_enable) ||
2635 (clk_cpu_dvfs_node && clk_cpu_dvfs_node->temp_limit_enable)) {
2636 dvfs_wq = alloc_workqueue("dvfs", WQ_NON_REENTRANT |
2637 WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
2638 queue_delayed_work_on(0, dvfs_wq, &dvfs_temp_limit_work, 0*HZ);
2641 vdd_gpu_regulator = dvfs_get_regulator("vdd_gpu");
2642 if (!IS_ERR_OR_NULL(vdd_gpu_regulator)) {
2643 struct clk *clk = clk_get(NULL, "pd_gpu");
2646 rk_clk_pd_notifier_register(clk, &clk_pd_gpu_notifier);
2648 fb_register_client(&early_suspend_notifier);
2649 register_reboot_notifier(&vdd_gpu_reboot_notifier);
2655 late_initcall(dvfs_init);