1 /* arch/arm/mach-rk30/rk30_dvfs.c
3 * Copyright (C) 2012 ROCKCHIP, Inc.
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #include <linux/slab.h>
16 #include <linux/clk.h>
17 #include <linux/regulator/consumer.h>
18 #include <linux/delay.h>
19 #include <linux/stat.h>
21 #include <linux/opp.h>
22 #include <linux/rockchip/dvfs.h>
23 #include <linux/rockchip/common.h>
25 #include <linux/reboot.h>
26 #include <linux/rockchip/cpu.h>
27 #include <linux/tick.h>
28 #include <dt-bindings/clock/rk_system_status.h>
29 #include "../../../drivers/clk/rockchip/clk-pd.h"
32 extern int rockchip_tsadc_get_temp(int chn);
34 #define MHz (1000 * 1000)
35 static LIST_HEAD(rk_dvfs_tree);
36 static DEFINE_MUTEX(rk_dvfs_mutex);
37 static struct workqueue_struct *dvfs_wq;
38 static struct dvfs_node *clk_cpu_dvfs_node;
39 static unsigned int target_temp = 80;
40 static int temp_limit_enable;
42 static int pd_gpu_off, early_suspend;
43 static DEFINE_MUTEX(switch_vdd_gpu_mutex);
44 struct regulator *vdd_gpu_regulator;
46 static int vdd_gpu_reboot_notifier_event(struct notifier_block *this,
47 unsigned long event, void *ptr)
51 DVFS_DBG("%s: enable vdd_gpu\n", __func__);
52 mutex_lock(&switch_vdd_gpu_mutex);
53 if (!regulator_is_enabled(vdd_gpu_regulator))
54 ret = regulator_enable(vdd_gpu_regulator);
55 mutex_unlock(&switch_vdd_gpu_mutex);
60 static struct notifier_block vdd_gpu_reboot_notifier = {
61 .notifier_call = vdd_gpu_reboot_notifier_event,
64 static int clk_pd_gpu_notifier_call(struct notifier_block *nb,
65 unsigned long event, void *ptr)
70 case RK_CLK_PD_PREPARE:
71 mutex_lock(&switch_vdd_gpu_mutex);
74 if (!regulator_is_enabled(vdd_gpu_regulator))
75 ret = regulator_enable(vdd_gpu_regulator);
77 mutex_unlock(&switch_vdd_gpu_mutex);
79 case RK_CLK_PD_UNPREPARE:
80 mutex_lock(&switch_vdd_gpu_mutex);
83 if (regulator_is_enabled(vdd_gpu_regulator))
84 ret = regulator_disable(vdd_gpu_regulator);
86 mutex_unlock(&switch_vdd_gpu_mutex);
95 static struct notifier_block clk_pd_gpu_notifier = {
96 .notifier_call = clk_pd_gpu_notifier_call,
100 static int early_suspend_notifier_call(struct notifier_block *self,
101 unsigned long action, void *data)
103 struct fb_event *event = data;
104 int blank_mode = *((int *)event->data);
107 mutex_lock(&switch_vdd_gpu_mutex);
108 if (action == FB_EARLY_EVENT_BLANK) {
109 switch (blank_mode) {
110 case FB_BLANK_UNBLANK:
113 if (!regulator_is_enabled(vdd_gpu_regulator))
114 ret = regulator_enable(
121 } else if (action == FB_EVENT_BLANK) {
122 switch (blank_mode) {
123 case FB_BLANK_POWERDOWN:
126 if (regulator_is_enabled(vdd_gpu_regulator))
127 ret = regulator_disable(
136 mutex_unlock(&switch_vdd_gpu_mutex);
141 static struct notifier_block early_suspend_notifier = {
142 .notifier_call = early_suspend_notifier_call,
145 #define DVFS_REGULATOR_MODE_STANDBY 1
146 #define DVFS_REGULATOR_MODE_IDLE 2
147 #define DVFS_REGULATOR_MODE_NORMAL 3
148 #define DVFS_REGULATOR_MODE_FAST 4
150 static const char* dvfs_regu_mode_to_string(unsigned int mode)
153 case DVFS_REGULATOR_MODE_FAST:
155 case DVFS_REGULATOR_MODE_NORMAL:
157 case DVFS_REGULATOR_MODE_IDLE:
159 case DVFS_REGULATOR_MODE_STANDBY:
166 static int dvfs_regu_mode_convert(unsigned int mode)
169 case DVFS_REGULATOR_MODE_FAST:
170 return REGULATOR_MODE_FAST;
171 case DVFS_REGULATOR_MODE_NORMAL:
172 return REGULATOR_MODE_NORMAL;
173 case DVFS_REGULATOR_MODE_IDLE:
174 return REGULATOR_MODE_IDLE;
175 case DVFS_REGULATOR_MODE_STANDBY:
176 return REGULATOR_MODE_STANDBY;
182 static int dvfs_regu_mode_deconvert(unsigned int mode)
185 case REGULATOR_MODE_FAST:
186 return DVFS_REGULATOR_MODE_FAST;
187 case REGULATOR_MODE_NORMAL:
188 return DVFS_REGULATOR_MODE_NORMAL;
189 case REGULATOR_MODE_IDLE:
190 return DVFS_REGULATOR_MODE_IDLE;
191 case REGULATOR_MODE_STANDBY:
192 return DVFS_REGULATOR_MODE_STANDBY;
198 static struct cpufreq_frequency_table *of_get_regu_mode_table(struct device_node *dev_node)
200 struct cpufreq_frequency_table *regu_mode_table = NULL;
201 const struct property *prop;
205 prop = of_find_property(dev_node, "regu-mode-table", NULL);
211 nr = prop->length / sizeof(u32);
213 pr_err("%s: Invalid freq list\n", __func__);
217 regu_mode_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
218 (nr/2+1), GFP_KERNEL);
219 if (!regu_mode_table) {
220 pr_err("%s: could not allocate regu_mode_table!\n", __func__);
221 return ERR_PTR(-ENOMEM);
226 for (i=0; i<nr/2; i++){
227 regu_mode_table[i].frequency = be32_to_cpup(val++) * 1000;
228 regu_mode_table[i].index = be32_to_cpup(val++);
231 if (regu_mode_table[i-1].frequency != 0) {
232 pr_err("%s: Last freq of regu_mode_table is not 0!\n", __func__);
233 kfree(regu_mode_table);
237 regu_mode_table[i].index = 0;
238 regu_mode_table[i].frequency = CPUFREQ_TABLE_END;
240 return regu_mode_table;
243 static int dvfs_regu_mode_table_constrain(struct dvfs_node *clk_dvfs_node)
246 int mode, convert_mode, valid_mode;
251 if (!clk_dvfs_node->regu_mode_table)
254 if (!clk_dvfs_node->vd)
257 if (IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator))
260 for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
261 mode = clk_dvfs_node->regu_mode_table[i].index;
262 convert_mode = dvfs_regu_mode_convert(mode);
264 ret = regulator_is_supported_mode(clk_dvfs_node->vd->regulator,
267 DVFS_ERR("%s: find mode=%d unsupported\n", __func__,
269 kfree(clk_dvfs_node->regu_mode_table);
270 clk_dvfs_node->regu_mode_table = NULL;
274 valid_mode = dvfs_regu_mode_deconvert(convert_mode);
275 if (valid_mode != mode) {
276 DVFS_ERR("%s: round mode=%d to valid mode=%d!\n",
277 __func__, mode, valid_mode);
278 clk_dvfs_node->regu_mode_table[i].index = valid_mode;
286 static int clk_dvfs_node_get_regu_mode(struct dvfs_node *clk_dvfs_node,
287 unsigned long rate, unsigned int *mode)
292 if ((!clk_dvfs_node) || (!clk_dvfs_node->regu_mode_table))
295 for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
296 if (rate >= clk_dvfs_node->regu_mode_table[i].frequency) {
297 *mode = clk_dvfs_node->regu_mode_table[i].index;
305 static int dvfs_pd_get_newmode_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
307 unsigned int mode_max = 0;
310 if (clk_dvfs_node->regu_mode_en && (clk_dvfs_node->regu_mode >= pd->regu_mode)) {
311 return clk_dvfs_node->regu_mode;
314 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
315 if (clk_dvfs_node->regu_mode_en)
316 mode_max = max(mode_max, (clk_dvfs_node->regu_mode));
322 static void dvfs_update_clk_pds_mode(struct dvfs_node *clk_dvfs_node)
329 pd = clk_dvfs_node->pd;
333 pd->regu_mode = dvfs_pd_get_newmode_byclk(pd, clk_dvfs_node);
336 static int dvfs_vd_get_newmode_bypd(struct vd_node *vd)
338 unsigned int mode_max_vd = 0;
344 list_for_each_entry(pd, &vd->pd_list, node) {
345 mode_max_vd = max(mode_max_vd, pd->regu_mode);
351 static int dvfs_vd_get_newmode_byclk(struct dvfs_node *clk_dvfs_node)
356 dvfs_update_clk_pds_mode(clk_dvfs_node);
358 return dvfs_vd_get_newmode_bypd(clk_dvfs_node->vd);
361 static int dvfs_regu_set_mode(struct vd_node *vd, unsigned int mode)
367 if (IS_ERR_OR_NULL(vd)) {
368 DVFS_ERR("%s: vd_node error\n", __func__);
372 DVFS_DBG("%s: mode=%d(old=%d)\n", __func__, mode, vd->regu_mode);
374 convert_mode = dvfs_regu_mode_convert(mode);
375 if (convert_mode < 0) {
376 DVFS_ERR("%s: mode %d convert error\n", __func__, mode);
380 if (!IS_ERR_OR_NULL(vd->regulator)) {
381 ret = dvfs_regulator_set_mode(vd->regulator, convert_mode);
383 DVFS_ERR("%s: %s set mode %d (was %d) error!\n", __func__,
384 vd->regulator_name, mode, vd->regu_mode);
388 DVFS_ERR("%s: invalid regulator\n", __func__);
392 vd->regu_mode = mode;
397 static int dvfs_regu_mode_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
406 if (!clk_dvfs_node->regu_mode_en)
409 ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, rate, &mode);
411 DVFS_ERR("%s: clk(%s) rate %luhz get mode fail\n",
412 __func__, clk_dvfs_node->name, rate);
415 clk_dvfs_node->regu_mode = mode;
417 mode = dvfs_vd_get_newmode_byclk(clk_dvfs_node);
421 ret = dvfs_regu_set_mode(clk_dvfs_node->vd, mode);
426 static void dvfs_volt_up_delay(struct vd_node *vd, int new_volt, int old_volt)
430 if(new_volt <= old_volt)
432 if(vd->volt_time_flag > 0)
433 u_time = regulator_set_voltage_time(vd->regulator, old_volt, new_volt);
436 if(u_time < 0) {// regulator is not suported time,useing default time
437 DVFS_DBG("%s:vd %s is not suported getting delay time,so we use default\n",
439 u_time = ((new_volt) - (old_volt)) >> 9;
442 DVFS_DBG("%s: vd %s volt %d to %d delay %d us\n",
443 __func__, vd->name, old_volt, new_volt, u_time);
445 if (u_time >= 1000) {
446 mdelay(u_time / 1000);
447 udelay(u_time % 1000);
448 DVFS_WARNING("%s: regulator set vol delay is larger 1ms,old is %d,new is %d\n",
449 __func__, old_volt, new_volt);
455 static int dvfs_regulator_set_voltage_readback(struct regulator *regulator, int min_uV, int max_uV)
457 int ret = 0, read_back = 0;
459 ret = dvfs_regulator_set_voltage(regulator, max_uV, max_uV);
461 DVFS_ERR("%s: now read back to check voltage\n", __func__);
463 /* read back to judge if it is already effect */
465 read_back = dvfs_regulator_get_voltage(regulator);
466 if (read_back == max_uV) {
467 DVFS_ERR("%s: set ERROR but already effected, volt=%d\n", __func__, read_back);
470 DVFS_ERR("%s: set ERROR AND NOT effected, volt=%d\n", __func__, read_back);
477 static int dvfs_scale_volt_direct(struct vd_node *vd_clk, int volt_new)
481 DVFS_DBG("%s: volt=%d(old=%d)\n", __func__, volt_new, vd_clk->cur_volt);
483 if (IS_ERR_OR_NULL(vd_clk)) {
484 DVFS_ERR("%s: vd_node error\n", __func__);
488 if (!IS_ERR_OR_NULL(vd_clk->regulator)) {
489 ret = dvfs_regulator_set_voltage_readback(vd_clk->regulator, volt_new, volt_new);
490 dvfs_volt_up_delay(vd_clk,volt_new, vd_clk->cur_volt);
492 vd_clk->volt_set_flag = DVFS_SET_VOLT_FAILURE;
493 DVFS_ERR("%s: %s set voltage up err ret = %d, Vnew = %d(was %d)mV\n",
494 __func__, vd_clk->name, ret, volt_new, vd_clk->cur_volt);
499 DVFS_ERR("%s: invalid regulator\n", __func__);
503 vd_clk->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
504 vd_clk->cur_volt = volt_new;
510 static int dvfs_reset_volt(struct vd_node *dvfs_vd)
512 int flag_set_volt_correct = 0;
513 if (!IS_ERR_OR_NULL(dvfs_vd->regulator))
514 flag_set_volt_correct = dvfs_regulator_get_voltage(dvfs_vd->regulator);
516 DVFS_ERR("%s: invalid regulator\n", __func__);
519 if (flag_set_volt_correct <= 0) {
520 DVFS_ERR("%s (vd:%s), try to reload volt ,by it is error again(%d)!!! stop scaling\n",
521 __func__, dvfs_vd->name, flag_set_volt_correct);
524 dvfs_vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
525 DVFS_WARNING("%s:vd(%s) try to reload volt = %d\n",
526 __func__, dvfs_vd->name, flag_set_volt_correct);
528 /* Reset vd's voltage */
529 dvfs_vd->cur_volt = flag_set_volt_correct;
531 return dvfs_vd->cur_volt;
535 // for clk enable case to get vd regulator info
536 static void clk_enable_dvfs_regulator_check(struct vd_node *vd)
538 vd->cur_volt = dvfs_regulator_get_voltage(vd->regulator);
539 if(vd->cur_volt <= 0){
540 vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
542 vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
545 static void dvfs_get_vd_regulator_volt_list(struct vd_node *vd)
547 unsigned int i, selector = dvfs_regulator_count_voltages(vd->regulator);
548 int n = 0, sel_volt = 0;
550 if(selector > VD_VOL_LIST_CNT)
551 selector = VD_VOL_LIST_CNT;
553 for (i = 0; i < selector; i++) {
554 sel_volt = dvfs_regulator_list_voltage(vd->regulator, i);
556 //DVFS_WARNING("%s: vd(%s) list volt selector=%u, but volt(%d) <=0\n",
557 // __func__, vd->name, i, sel_volt);
560 vd->volt_list[n++] = sel_volt;
561 DVFS_DBG("%s: vd(%s) list volt selector=%u, n=%d, volt=%d\n",
562 __func__, vd->name, i, n, sel_volt);
569 static int vd_regulator_round_volt_max(struct vd_node *vd, int volt)
574 for (i = 0; i < vd->n_voltages; i++) {
575 sel_volt = vd->volt_list[i];
577 DVFS_WARNING("%s: selector=%u, but volt <=0\n",
588 static int vd_regulator_round_volt_min(struct vd_node *vd, int volt)
593 for (i = 0; i < vd->n_voltages; i++) {
594 sel_volt = vd->volt_list[i];
596 DVFS_WARNING("%s: selector=%u, but volt <=0\n",
602 return vd->volt_list[i-1];
612 static int vd_regulator_round_volt(struct vd_node *vd, int volt, int flags)
616 if(flags == VD_LIST_RELATION_L)
617 return vd_regulator_round_volt_min(vd, volt);
619 return vd_regulator_round_volt_max(vd, volt);
622 static void dvfs_table_round_volt(struct dvfs_node *clk_dvfs_node)
626 if(!clk_dvfs_node->dvfs_table || !clk_dvfs_node->vd ||
627 IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator))
630 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
632 test_volt = vd_regulator_round_volt(clk_dvfs_node->vd, clk_dvfs_node->dvfs_table[i].index, VD_LIST_RELATION_H);
635 DVFS_WARNING("%s: clk(%s) round volt(%d) but list <=0\n",
636 __func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].index);
639 DVFS_DBG("clk %s:round_volt %d to %d\n",
640 clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].index, test_volt);
642 clk_dvfs_node->dvfs_table[i].index=test_volt;
646 static void dvfs_vd_get_regulator_volt_time_info(struct vd_node *vd)
648 if(vd->volt_time_flag <= 0){// check regulator support get uping vol timer
649 vd->volt_time_flag = dvfs_regulator_set_voltage_time(vd->regulator, vd->cur_volt, vd->cur_volt+200*1000);
650 if(vd->volt_time_flag < 0){
651 DVFS_DBG("%s,vd %s volt_time is no support\n",
655 DVFS_DBG("%s,vd %s volt_time is support,up 200mv need delay %d us\n",
656 __func__, vd->name, vd->volt_time_flag);
661 static void dvfs_vd_get_regulator_mode_info(struct vd_node *vd)
663 //REGULATOR_MODE_FAST
664 if(vd->mode_flag <= 0){// check regulator support get uping vol timer{
665 vd->mode_flag = dvfs_regulator_get_mode(vd->regulator);
666 if(vd->mode_flag==REGULATOR_MODE_FAST || vd->mode_flag==REGULATOR_MODE_NORMAL
667 || vd->mode_flag == REGULATOR_MODE_IDLE || vd->mode_flag==REGULATOR_MODE_STANDBY){
669 if(dvfs_regulator_set_mode(vd->regulator, vd->mode_flag) < 0){
670 vd->mode_flag = 0;// check again
673 if(vd->mode_flag > 0){
674 DVFS_DBG("%s,vd %s mode(now is %d) support\n",
675 __func__, vd->name, vd->mode_flag);
678 DVFS_DBG("%s,vd %s mode is not support now check\n",
685 struct regulator *dvfs_get_regulator(char *regulator_name)
689 mutex_lock(&rk_dvfs_mutex);
690 list_for_each_entry(vd, &rk_dvfs_tree, node) {
691 if (strcmp(regulator_name, vd->regulator_name) == 0) {
692 mutex_unlock(&rk_dvfs_mutex);
693 return vd->regulator;
696 mutex_unlock(&rk_dvfs_mutex);
700 static int dvfs_get_rate_range(struct dvfs_node *clk_dvfs_node)
702 struct cpufreq_frequency_table *table;
708 clk_dvfs_node->min_rate = 0;
709 clk_dvfs_node->max_rate = 0;
711 table = clk_dvfs_node->dvfs_table;
712 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
713 clk_dvfs_node->max_rate = table[i].frequency / 1000 * 1000 * 1000;
715 clk_dvfs_node->min_rate = table[i].frequency / 1000 * 1000 * 1000;
718 DVFS_DBG("%s: clk %s, limit rate [min, max] = [%u, %u]\n",
719 __func__, clk_dvfs_node->name, clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
724 static void dvfs_table_round_clk_rate(struct dvfs_node *clk_dvfs_node)
726 int i, rate, temp_rate, flags;
728 if(!clk_dvfs_node || !clk_dvfs_node->dvfs_table || !clk_dvfs_node->clk)
731 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
732 //ddr rate = real rate+flags
733 flags = clk_dvfs_node->dvfs_table[i].frequency%1000;
734 rate = (clk_dvfs_node->dvfs_table[i].frequency/1000)*1000;
735 temp_rate = __clk_round_rate(clk_dvfs_node->clk, rate*1000);
737 DVFS_WARNING("%s: clk(%s) rate %d round return %d\n",
738 __func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
742 /* Set rate unit as MHZ */
743 if (temp_rate % MHz != 0)
744 temp_rate = (temp_rate / MHz + 1) * MHz;
746 temp_rate = (temp_rate / 1000) + flags;
748 DVFS_DBG("clk %s round_clk_rate %d to %d\n",
749 clk_dvfs_node->name,clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
751 clk_dvfs_node->dvfs_table[i].frequency = temp_rate;
755 static int clk_dvfs_node_get_ref_volt(struct dvfs_node *clk_dvfs_node, int rate_khz,
756 struct cpufreq_frequency_table *clk_fv)
760 if (rate_khz == 0 || !clk_dvfs_node || !clk_dvfs_node->dvfs_table) {
764 clk_fv->frequency = rate_khz;
767 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
768 if (clk_dvfs_node->dvfs_table[i].frequency >= rate_khz) {
769 clk_fv->frequency = clk_dvfs_node->dvfs_table[i].frequency;
770 clk_fv->index = clk_dvfs_node->dvfs_table[i].index;
771 //printk("%s,%s rate=%ukhz(vol=%d)\n",__func__,clk_dvfs_node->name,
772 //clk_fv->frequency, clk_fv->index);
776 clk_fv->frequency = 0;
778 //DVFS_DBG("%s get corresponding voltage error! out of bound\n", clk_dvfs_node->name);
782 static int dvfs_pd_get_newvolt_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
786 if (clk_dvfs_node->enable_count && (clk_dvfs_node->set_volt >= pd->cur_volt)) {
787 return clk_dvfs_node->set_volt;
790 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
791 if (clk_dvfs_node->enable_count)
792 volt_max = max(volt_max, clk_dvfs_node->set_volt);
797 static void dvfs_update_clk_pds_volt(struct dvfs_node *clk_dvfs_node)
804 pd = clk_dvfs_node->pd;
808 pd->cur_volt = dvfs_pd_get_newvolt_byclk(pd, clk_dvfs_node);
811 static int dvfs_vd_get_newvolt_bypd(struct vd_node *vd)
819 list_for_each_entry(pd, &vd->pd_list, node) {
820 volt_max_vd = max(volt_max_vd, pd->cur_volt);
826 static int dvfs_vd_get_newvolt_byclk(struct dvfs_node *clk_dvfs_node)
831 dvfs_update_clk_pds_volt(clk_dvfs_node);
832 return dvfs_vd_get_newvolt_bypd(clk_dvfs_node->vd);
836 static void dvfs_temp_limit_work_func(struct work_struct *work)
838 unsigned long delay = HZ / 10; // 100ms
841 struct dvfs_node *clk_dvfs_node;
843 queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
845 mutex_lock(&rk_dvfs_mutex);
846 list_for_each_entry(vd, &rk_dvfs_tree, node) {
847 mutex_lock(&vd->mutex);
848 list_for_each_entry(pd, &vd->pd_list, node) {
849 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
850 if (clk_dvfs_node->temp_limit_table) {
851 clk_dvfs_node->temp = rockchip_tsadc_get_temp(clk_dvfs_node->temp_channel);
852 clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
856 mutex_unlock(&vd->mutex);
858 mutex_unlock(&rk_dvfs_mutex);
862 static struct cpufreq_frequency_table rk3288v0_arm_pvtm_table[] = {
863 {.frequency = 216000, .index = 4006},
864 {.frequency = 408000, .index = 6518},
865 {.frequency = 600000, .index = 8345},
866 {.frequency = 816000, .index = 11026},
867 {.frequency = 1008000, .index = 12906},
868 {.frequency = 1200000, .index = 15532},
869 {.frequency = 1416000, .index = 18076},
870 {.frequency = 1608000, .index = 21282},
871 {.frequency = CPUFREQ_TABLE_END, .index = 1},
874 static struct pvtm_info rk3288v0_arm_pvtm_info = {
875 .compatible = "rockchip,rk3288",
876 .pvtm_table = rk3288v0_arm_pvtm_table,
877 .channel = ARM_DVFS_CH,
878 .process_version = RK3288_PROCESS_V0,
879 .scan_rate_hz = 216000000,
880 .sample_time_us = 1000,
881 .volt_step_uv = 12500,
882 .delta_pvtm_by_volt = 400,
883 .delta_pvtm_by_temp = 14,
884 .volt_margin_uv = 25000,
885 .min_volt_uv = 850000,
886 .max_volt_uv = 1400000,
889 static struct cpufreq_frequency_table rk3288v1_arm_pvtm_table[] = {
890 {.frequency = 216000, .index = 4710},
891 {.frequency = 408000, .index = 7200},
892 {.frequency = 600000, .index = 9192},
893 {.frequency = 816000, .index = 12560},
894 {.frequency = 1008000, .index = 14741},
895 {.frequency = 1200000, .index = 16886},
896 {.frequency = 1416000, .index = 20081},
897 {.frequency = 1608000, .index = 24061},
898 {.frequency = CPUFREQ_TABLE_END, .index = 1},
901 static struct pvtm_info rk3288v1_arm_pvtm_info = {
902 .compatible = "rockchip,rk3288",
903 .pvtm_table = rk3288v1_arm_pvtm_table,
904 .channel = ARM_DVFS_CH,
905 .process_version = RK3288_PROCESS_V1,
906 .scan_rate_hz = 216000000,
907 .sample_time_us = 1000,
908 .volt_step_uv = 12500,
909 .delta_pvtm_by_volt = 450,
910 .delta_pvtm_by_temp = 7,
911 .volt_margin_uv = 25000,
912 .min_volt_uv = 850000,
913 .max_volt_uv = 1400000,
916 static struct cpufreq_frequency_table rk3288v2_arm_pvtm_table[] = {
917 {.frequency = 216000, .index = 5369},
918 {.frequency = 408000, .index = 6984},
919 {.frequency = 600000, .index = 8771},
920 {.frequency = 816000, .index = 11434},
921 {.frequency = 1008000, .index = 14178},
922 {.frequency = 1200000, .index = 16797},
923 {.frequency = 1416000, .index = 20178},
924 {.frequency = 1608000, .index = 23303},
925 {.frequency = CPUFREQ_TABLE_END, .index = 1},
928 static struct pvtm_info rk3288v2_arm_pvtm_info = {
929 .compatible = "rockchip,rk3288",
930 .pvtm_table = rk3288v2_arm_pvtm_table,
931 .channel = ARM_DVFS_CH,
932 .process_version = RK3288_PROCESS_V2,
933 .scan_rate_hz = 216000000,
934 .sample_time_us = 1000,
935 .volt_step_uv = 12500,
936 .delta_pvtm_by_volt = 430,
937 .delta_pvtm_by_temp = 12,
938 .volt_margin_uv = 25000,
939 .min_volt_uv = 900000,
940 .max_volt_uv = 1400000,
943 static struct pvtm_info *pvtm_info_table[] = {
944 &rk3288v0_arm_pvtm_info,
945 &rk3288v1_arm_pvtm_info,
946 &rk3288v2_arm_pvtm_info
949 static int pvtm_set_single_dvfs(struct dvfs_node *dvfs_node, u32 idx,
950 struct pvtm_info *info, int *pvtm_list,
953 struct cpufreq_frequency_table *dvfs_table = dvfs_node->dvfs_table;
954 struct cpufreq_frequency_table *pvtm_table = dvfs_node->pvtm_table;
955 int target_pvtm, pvtm_margin, volt_margin;
956 unsigned int n_voltages = dvfs_node->vd->n_voltages;
957 int *volt_list = dvfs_node->vd->volt_list;
960 volt_margin = info->volt_margin_uv + pvtm_table[idx].index;
961 n = volt_margin/info->volt_step_uv;
962 if (volt_margin%info->volt_step_uv)
965 pvtm_margin = n*info->delta_pvtm_by_volt;
966 temp = rockchip_tsadc_get_temp(1);
967 target_pvtm = min_pvtm+temp * info->delta_pvtm_by_temp + pvtm_margin;
969 DVFS_DBG("=====%s: temp:%d, freq:%d, target pvtm:%d=====\n",
970 __func__, temp, dvfs_table[idx].frequency, target_pvtm);
972 for (n = 0; n < n_voltages; n++) {
973 if (pvtm_list[n] >= target_pvtm) {
974 dvfs_table[idx].index = volt_list[n];
975 DVFS_DBG("freq[%d]=%d, volt=%d\n",
976 idx, dvfs_table[idx].frequency, volt_list[n]);
987 static void pvtm_set_dvfs_table(struct dvfs_node *dvfs_node)
989 struct cpufreq_frequency_table *dvfs_table = dvfs_node->dvfs_table;
990 struct pvtm_info *info = dvfs_node->pvtm_info;
991 struct regulator *regulator = dvfs_node->vd->regulator;
994 int pvtm_list[VD_VOL_LIST_CNT] = {0};
995 unsigned int n_voltages = dvfs_node->vd->n_voltages;
996 int *volt_list = dvfs_node->vd->volt_list;
1001 clk_set_rate(dvfs_node->clk, info->scan_rate_hz);
1002 DVFS_DBG("%s:%lu\n", __func__, clk_get_rate(dvfs_node->clk));
1004 for (i = 0; i < n_voltages; i++) {
1005 if ((volt_list[i] >= info->min_volt_uv) &&
1006 (volt_list[i] <= info->max_volt_uv)) {
1007 regulator_set_voltage(regulator, volt_list[i],
1009 pvtm_list[i] = pvtm_get_value(info->channel,
1010 info->sample_time_us);
1014 for (i = 0; dvfs_table[i].frequency != CPUFREQ_TABLE_END; i++) {
1015 for (j = 0; info->pvtm_table[j].frequency !=
1016 CPUFREQ_TABLE_END; j++)
1017 if (info->pvtm_table[j].frequency >=
1018 dvfs_table[i].frequency) {
1019 int min_pvtm = info->pvtm_table[j].index;
1021 ret = pvtm_set_single_dvfs(dvfs_node,
1030 DVFS_WARNING("freq: %d can not reach target pvtm\n",
1031 dvfs_table[i].frequency);
1035 if (info->pvtm_table[j].frequency == CPUFREQ_TABLE_END) {
1036 DVFS_WARNING("not support freq :%d, max freq is %d\n",
1037 dvfs_table[i].frequency,
1038 info->pvtm_table[j-1].frequency);
1044 static void dvfs_virt_temp_limit_work_func(void)
1046 const struct cpufreq_frequency_table *limits_table = NULL;
1047 unsigned int new_temp_limit_rate = -1;
1048 unsigned int nr_cpus = num_online_cpus();
1049 static bool in_perf;
1052 if (!cpu_is_rk312x())
1055 if (rockchip_get_system_status() & SYS_STATUS_PERFORMANCE) {
1057 } else if (in_perf) {
1060 static u64 last_time_in_idle;
1061 static u64 last_time_in_idle_timestamp;
1062 u64 time_in_idle = 0, now;
1065 unsigned cpu, busy_cpus;
1067 for_each_online_cpu(cpu) {
1068 time_in_idle += get_cpu_idle_time_us(cpu, &now);
1070 delta_time = now - last_time_in_idle_timestamp;
1071 delta_idle = time_in_idle - last_time_in_idle;
1072 last_time_in_idle = time_in_idle;
1073 last_time_in_idle_timestamp = now;
1074 delta_idle += delta_time >> 4; /* +6.25% */
1075 if (delta_idle > (nr_cpus - 1)
1076 * delta_time && delta_idle < (nr_cpus + 1) * delta_time)
1078 else if (delta_idle > (nr_cpus - 2) * delta_time)
1080 else if (delta_idle > (nr_cpus - 3) * delta_time)
1085 limits_table = clk_cpu_dvfs_node->virt_temp_limit_table[busy_cpus-1];
1086 DVFS_DBG("delta time %6u us idle %6u us %u cpus select table %d\n",
1087 delta_time, delta_idle, nr_cpus, busy_cpus);
1091 new_temp_limit_rate = limits_table[0].frequency;
1092 for (i = 0; limits_table[i].frequency != CPUFREQ_TABLE_END; i++) {
1093 if (target_temp >= limits_table[i].index)
1094 new_temp_limit_rate = limits_table[i].frequency;
1098 if (clk_cpu_dvfs_node->temp_limit_rate != new_temp_limit_rate) {
1099 clk_cpu_dvfs_node->temp_limit_rate = new_temp_limit_rate;
1100 dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
1101 DVFS_DBG("temp_limit_rate:%d\n", (int)clk_cpu_dvfs_node->temp_limit_rate);
1105 static void dvfs_temp_limit_work_func(struct work_struct *work)
1107 int temp=0, delta_temp=0;
1108 unsigned long delay = HZ/10;
1109 unsigned long arm_rate_step=0;
1110 static int old_temp=0;
1113 queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
1115 temp = rockchip_tsadc_get_temp(1);
1117 if (temp == INVALID_TEMP)
1118 return dvfs_virt_temp_limit_work_func();
1121 delta_temp = (old_temp>temp) ? (old_temp-temp) : (temp-old_temp);
1122 if (delta_temp <= 1)
1125 if (ROCKCHIP_PM_POLICY_PERFORMANCE == rockchip_pm_get_policy()) {
1126 if (!clk_cpu_dvfs_node->per_temp_limit_table) {
1130 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
1131 for (i=0; clk_cpu_dvfs_node->per_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
1132 if (temp > clk_cpu_dvfs_node->per_temp_limit_table[i].index) {
1133 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->per_temp_limit_table[i].frequency;
1136 dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
1137 } else if (ROCKCHIP_PM_POLICY_NORMAL == rockchip_pm_get_policy()){
1138 if (!clk_cpu_dvfs_node->nor_temp_limit_table) {
1142 if (temp > target_temp) {
1143 if (temp > old_temp) {
1144 delta_temp = temp - target_temp;
1145 for (i=0; clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
1146 if (delta_temp > clk_cpu_dvfs_node->nor_temp_limit_table[i].index) {
1147 arm_rate_step = clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency;
1150 if (arm_rate_step && (clk_cpu_dvfs_node->temp_limit_rate > arm_rate_step)) {
1151 clk_cpu_dvfs_node->temp_limit_rate -= arm_rate_step;
1152 dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
1156 if (clk_cpu_dvfs_node->temp_limit_rate < clk_cpu_dvfs_node->max_rate) {
1157 delta_temp = target_temp - temp;
1158 for (i=0; clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
1159 if (delta_temp > clk_cpu_dvfs_node->nor_temp_limit_table[i].index) {
1160 arm_rate_step = clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency;
1164 if (arm_rate_step) {
1165 clk_cpu_dvfs_node->temp_limit_rate += arm_rate_step;
1166 if (clk_cpu_dvfs_node->temp_limit_rate > clk_cpu_dvfs_node->max_rate) {
1167 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
1169 dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
1175 DVFS_DBG("cur temp: %d, temp_limit_core_rate: %lu\n", temp, clk_cpu_dvfs_node->temp_limit_rate);
1179 static DECLARE_DELAYED_WORK(dvfs_temp_limit_work, dvfs_temp_limit_work_func);
1181 int dvfs_clk_enable_limit(struct dvfs_node *clk_dvfs_node, unsigned int min_rate, unsigned int max_rate)
1183 u32 rate = 0, ret = 0;
1185 if (!clk_dvfs_node || (min_rate > max_rate))
1188 if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
1189 mutex_lock(&clk_dvfs_node->vd->mutex);
1191 /* To reset clk_dvfs_node->min_rate/max_rate */
1192 dvfs_get_rate_range(clk_dvfs_node);
1193 clk_dvfs_node->freq_limit_en = 1;
1195 if ((min_rate >= clk_dvfs_node->min_rate) && (min_rate <= clk_dvfs_node->max_rate)) {
1196 clk_dvfs_node->min_rate = min_rate;
1199 if ((max_rate >= clk_dvfs_node->min_rate) && (max_rate <= clk_dvfs_node->max_rate)) {
1200 clk_dvfs_node->max_rate = max_rate;
1203 if (clk_dvfs_node->last_set_rate == 0)
1204 rate = __clk_get_rate(clk_dvfs_node->clk);
1206 rate = clk_dvfs_node->last_set_rate;
1207 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
1209 mutex_unlock(&clk_dvfs_node->vd->mutex);
1213 DVFS_DBG("%s:clk(%s) last_set_rate=%lu; [min_rate, max_rate]=[%u, %u]\n",
1214 __func__, __clk_get_name(clk_dvfs_node->clk),
1215 clk_dvfs_node->last_set_rate,
1216 clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
1220 EXPORT_SYMBOL(dvfs_clk_enable_limit);
1222 int dvfs_clk_disable_limit(struct dvfs_node *clk_dvfs_node)
1229 if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
1230 mutex_lock(&clk_dvfs_node->vd->mutex);
1232 /* To reset clk_dvfs_node->min_rate/max_rate */
1233 dvfs_get_rate_range(clk_dvfs_node);
1234 clk_dvfs_node->freq_limit_en = 0;
1235 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
1237 mutex_unlock(&clk_dvfs_node->vd->mutex);
1240 DVFS_DBG("%s: clk(%s) last_set_rate=%lu; [min_rate, max_rate]=[%u, %u]\n",
1241 __func__, __clk_get_name(clk_dvfs_node->clk),
1242 clk_dvfs_node->last_set_rate,
1243 clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
1247 EXPORT_SYMBOL(dvfs_clk_disable_limit);
1249 void dvfs_disable_temp_limit(void) {
1250 temp_limit_enable = 0;
1251 cancel_delayed_work_sync(&dvfs_temp_limit_work);
1254 int dvfs_clk_get_limit(struct dvfs_node *clk_dvfs_node, unsigned int *min_rate, unsigned int *max_rate)
1261 mutex_lock(&clk_dvfs_node->vd->mutex);
1263 *min_rate = clk_dvfs_node->min_rate;
1264 *max_rate = clk_dvfs_node->max_rate;
1265 freq_limit_en = clk_dvfs_node->freq_limit_en;
1267 mutex_unlock(&clk_dvfs_node->vd->mutex);
1269 return freq_limit_en;
1271 EXPORT_SYMBOL(dvfs_clk_get_limit);
1273 int dvfs_clk_register_set_rate_callback(struct dvfs_node *clk_dvfs_node, clk_set_rate_callback clk_dvfs_target)
1278 mutex_lock(&clk_dvfs_node->vd->mutex);
1279 clk_dvfs_node->clk_dvfs_target = clk_dvfs_target;
1280 mutex_unlock(&clk_dvfs_node->vd->mutex);
1284 EXPORT_SYMBOL(dvfs_clk_register_set_rate_callback);
1286 struct cpufreq_frequency_table *dvfs_get_freq_volt_table(struct dvfs_node *clk_dvfs_node)
1288 struct cpufreq_frequency_table *table;
1293 mutex_lock(&clk_dvfs_node->vd->mutex);
1294 table = clk_dvfs_node->dvfs_table;
1295 mutex_unlock(&clk_dvfs_node->vd->mutex);
1299 EXPORT_SYMBOL(dvfs_get_freq_volt_table);
1301 int dvfs_set_freq_volt_table(struct dvfs_node *clk_dvfs_node, struct cpufreq_frequency_table *table)
1306 if (IS_ERR_OR_NULL(table)){
1307 DVFS_ERR("%s:invalid table!\n", __func__);
1311 mutex_lock(&clk_dvfs_node->vd->mutex);
1312 clk_dvfs_node->dvfs_table = table;
1313 dvfs_get_rate_range(clk_dvfs_node);
1314 dvfs_table_round_clk_rate(clk_dvfs_node);
1315 dvfs_table_round_volt(clk_dvfs_node);
1316 mutex_unlock(&clk_dvfs_node->vd->mutex);
1320 EXPORT_SYMBOL(dvfs_set_freq_volt_table);
1322 static int get_adjust_volt_by_leakage(struct dvfs_node *dvfs_node)
1325 int delta_leakage = 0;
1327 int adjust_volt = 0;
1332 if (dvfs_node->lkg_info.def_table_lkg == -1)
1335 leakage = rockchip_get_leakage(dvfs_node->channel);
1336 if (!leakage || (leakage == 0xff))
1339 delta_leakage = leakage - dvfs_node->lkg_info.def_table_lkg;
1340 if (delta_leakage <= 0) {
1341 for (i = 0; (dvfs_node->lkg_info.table[i].dlt_volt !=
1342 CPUFREQ_TABLE_END); i++) {
1343 if (leakage > dvfs_node->lkg_info.table[i].lkg) {
1345 dvfs_node->lkg_info.table[i].dlt_volt;
1350 } else if (delta_leakage > 0) {
1351 for (i = 0; (dvfs_node->lkg_info.table[i].dlt_volt !=
1352 CPUFREQ_TABLE_END); i++) {
1353 if (leakage <= dvfs_node->lkg_info.table[i].lkg) {
1355 -dvfs_node->lkg_info.table[i].dlt_volt;
1363 static void adjust_table_by_leakage(struct dvfs_node *dvfs_node)
1365 int i, adjust_volt = get_adjust_volt_by_leakage(dvfs_node);
1370 if (!dvfs_node->dvfs_table)
1373 if (dvfs_node->lkg_info.min_adjust_freq == -1)
1377 (dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
1378 if (dvfs_node->dvfs_table[i].frequency >=
1379 dvfs_node->lkg_info.min_adjust_freq)
1380 dvfs_node->dvfs_table[i].index += adjust_volt;
1384 int clk_enable_dvfs(struct dvfs_node *clk_dvfs_node)
1386 struct cpufreq_frequency_table clk_fv;
1394 DVFS_DBG("%s: dvfs clk(%s) enable dvfs!\n",
1395 __func__, __clk_get_name(clk_dvfs_node->clk));
1397 if (!clk_dvfs_node->vd) {
1398 DVFS_ERR("%s: dvfs node(%s) has no vd node!\n",
1399 __func__, clk_dvfs_node->name);
1402 mutex_lock(&clk_dvfs_node->vd->mutex);
1403 if (clk_dvfs_node->enable_count == 0) {
1404 if (IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator)) {
1405 if (clk_dvfs_node->vd->regulator_name)
1406 clk_dvfs_node->vd->regulator = dvfs_regulator_get(NULL, clk_dvfs_node->vd->regulator_name);
1407 if (!IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator)) {
1408 DVFS_DBG("%s: vd(%s) get regulator(%s) ok\n",
1409 __func__, clk_dvfs_node->vd->name, clk_dvfs_node->vd->regulator_name);
1410 clk_enable_dvfs_regulator_check(clk_dvfs_node->vd);
1411 dvfs_get_vd_regulator_volt_list(clk_dvfs_node->vd);
1412 dvfs_vd_get_regulator_volt_time_info(clk_dvfs_node->vd);
1414 clk_dvfs_node->enable_count = 0;
1415 DVFS_ERR("%s: vd(%s) can't get regulator(%s)!\n",
1416 __func__, clk_dvfs_node->vd->name, clk_dvfs_node->vd->regulator_name);
1417 mutex_unlock(&clk_dvfs_node->vd->mutex);
1421 clk_enable_dvfs_regulator_check(clk_dvfs_node->vd);
1424 DVFS_DBG("%s: vd(%s) cur volt=%d\n",
1425 __func__, clk_dvfs_node->name, clk_dvfs_node->vd->cur_volt);
1427 dvfs_table_round_clk_rate(clk_dvfs_node);
1428 dvfs_get_rate_range(clk_dvfs_node);
1429 clk_dvfs_node->freq_limit_en = 1;
1430 if (clk_dvfs_node->lkg_adjust_volt_en)
1431 adjust_table_by_leakage(clk_dvfs_node);
1432 if (clk_dvfs_node->support_pvtm)
1433 pvtm_set_dvfs_table(clk_dvfs_node);
1434 dvfs_table_round_volt(clk_dvfs_node);
1435 clk_dvfs_node->set_freq = clk_dvfs_node_get_rate_kz(clk_dvfs_node->clk);
1436 clk_dvfs_node->last_set_rate = clk_dvfs_node->set_freq*1000;
1438 DVFS_DBG("%s: %s get freq %u!\n",
1439 __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1441 if (clk_dvfs_node_get_ref_volt(clk_dvfs_node, clk_dvfs_node->set_freq, &clk_fv)) {
1442 if (clk_dvfs_node->dvfs_table[0].frequency == CPUFREQ_TABLE_END) {
1443 DVFS_ERR("%s: table empty\n", __func__);
1444 clk_dvfs_node->enable_count = 0;
1445 mutex_unlock(&clk_dvfs_node->vd->mutex);
1448 DVFS_WARNING("%s: clk(%s) freq table all value are smaller than default(%d), use default, just enable dvfs\n",
1449 __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1450 clk_dvfs_node->enable_count++;
1451 mutex_unlock(&clk_dvfs_node->vd->mutex);
1455 clk_dvfs_node->enable_count++;
1456 clk_dvfs_node->set_volt = clk_fv.index;
1457 volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1458 DVFS_DBG("%s: %s, freq %u(ref vol %u)\n",
1459 __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt);
1461 if (clk_dvfs_node->dvfs_nb) {
1462 // must unregister when clk disable
1463 clk_notifier_register(clk, clk_dvfs_node->dvfs_nb);
1466 if(clk_dvfs_node->vd->cur_volt != volt_new) {
1467 ret = dvfs_regulator_set_voltage_readback(clk_dvfs_node->vd->regulator, volt_new, volt_new);
1468 dvfs_volt_up_delay(clk_dvfs_node->vd,volt_new, clk_dvfs_node->vd->cur_volt);
1470 clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
1471 clk_dvfs_node->enable_count = 0;
1472 DVFS_ERR("dvfs enable clk %s,set volt error \n", clk_dvfs_node->name);
1473 mutex_unlock(&clk_dvfs_node->vd->mutex);
1476 clk_dvfs_node->vd->cur_volt = volt_new;
1477 clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
1481 DVFS_DBG("%s: dvfs already enable clk enable = %d!\n",
1482 __func__, clk_dvfs_node->enable_count);
1483 clk_dvfs_node->enable_count++;
1486 if (clk_dvfs_node->regu_mode_en) {
1487 ret = dvfs_regu_mode_table_constrain(clk_dvfs_node);
1489 DVFS_ERR("%s: clk(%s) regu_mode_table is unvalid, set regu_mode_en=0!\n",
1490 __func__, clk_dvfs_node->name);
1491 clk_dvfs_node->regu_mode_en = 0;
1492 mutex_unlock(&clk_dvfs_node->vd->mutex);
1496 ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, clk_dvfs_node->set_freq*1000, &mode);
1498 DVFS_ERR("%s: clk(%s) rate %dKhz get regu_mode fail\n",
1499 __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1500 mutex_unlock(&clk_dvfs_node->vd->mutex);
1503 clk_dvfs_node->regu_mode = mode;
1505 dvfs_update_clk_pds_mode(clk_dvfs_node);
1508 mutex_unlock(&clk_dvfs_node->vd->mutex);
1512 EXPORT_SYMBOL(clk_enable_dvfs);
1514 int clk_disable_dvfs(struct dvfs_node *clk_dvfs_node)
1521 DVFS_DBG("%s:dvfs clk(%s) disable dvfs!\n",
1522 __func__, __clk_get_name(clk_dvfs_node->clk));
1524 mutex_lock(&clk_dvfs_node->vd->mutex);
1525 if (!clk_dvfs_node->enable_count) {
1526 DVFS_WARNING("%s:clk(%s) is already closed!\n",
1527 __func__, __clk_get_name(clk_dvfs_node->clk));
1528 mutex_unlock(&clk_dvfs_node->vd->mutex);
1531 clk_dvfs_node->enable_count--;
1532 if (0 == clk_dvfs_node->enable_count) {
1533 DVFS_DBG("%s:dvfs clk(%s) disable dvfs ok!\n",
1534 __func__, __clk_get_name(clk_dvfs_node->clk));
1535 volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1536 dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1539 clk_notifier_unregister(clk, clk_dvfs_node->dvfs_nb);
1540 DVFS_DBG("clk unregister nb!\n");
1544 mutex_unlock(&clk_dvfs_node->vd->mutex);
1547 EXPORT_SYMBOL(clk_disable_dvfs);
1549 static unsigned long dvfs_get_limit_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1551 unsigned long limit_rate;
1554 if (clk_dvfs_node->freq_limit_en) {
1556 if (rate < clk_dvfs_node->min_rate) {
1557 limit_rate = clk_dvfs_node->min_rate;
1558 } else if (rate > clk_dvfs_node->max_rate) {
1559 limit_rate = clk_dvfs_node->max_rate;
1561 if (temp_limit_enable) {
1562 if (limit_rate > clk_dvfs_node->temp_limit_rate) {
1563 limit_rate = clk_dvfs_node->temp_limit_rate;
1568 DVFS_DBG("%s: rate:%ld, limit_rate:%ld,\n", __func__, rate, limit_rate);
1573 static int dvfs_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1575 struct cpufreq_frequency_table clk_fv;
1576 unsigned long old_rate = 0, new_rate = 0, volt_new = 0, clk_volt_store = 0;
1577 struct clk *clk = clk_dvfs_node->clk;
1583 if (!clk_dvfs_node->enable_count)
1586 if (clk_dvfs_node->vd->volt_set_flag == DVFS_SET_VOLT_FAILURE) {
1587 /* It means the last time set voltage error */
1588 ret = dvfs_reset_volt(clk_dvfs_node->vd);
1594 rate = dvfs_get_limit_rate(clk_dvfs_node, rate);
1595 new_rate = __clk_round_rate(clk, rate);
1596 old_rate = __clk_get_rate(clk);
1597 if (new_rate == old_rate)
1600 DVFS_DBG("enter %s: clk(%s) new_rate = %lu Hz, old_rate = %lu Hz\n",
1601 __func__, clk_dvfs_node->name, rate, old_rate);
1603 /* find the clk corresponding voltage */
1604 ret = clk_dvfs_node_get_ref_volt(clk_dvfs_node, new_rate / 1000, &clk_fv);
1606 DVFS_ERR("%s:dvfs clk(%s) rate %luhz is not support\n",
1607 __func__, clk_dvfs_node->name, new_rate);
1610 clk_volt_store = clk_dvfs_node->set_volt;
1611 clk_dvfs_node->set_volt = clk_fv.index;
1612 volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1613 DVFS_DBG("%s:%s new rate=%lu(was=%lu),new volt=%lu,(was=%d)\n",
1614 __func__, clk_dvfs_node->name, new_rate, old_rate, volt_new,clk_dvfs_node->vd->cur_volt);
1617 /* if up the rate */
1618 if (new_rate > old_rate) {
1619 ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
1621 DVFS_ERR("%s: dvfs clk(%s) rate %luhz set mode err\n",
1622 __func__, clk_dvfs_node->name, new_rate);
1624 ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1626 goto fail_roll_back;
1630 if (clk_dvfs_node->clk_dvfs_target) {
1631 ret = clk_dvfs_node->clk_dvfs_target(clk, rate);
1633 ret = clk_set_rate(clk, rate);
1637 DVFS_ERR("%s:clk(%s) set rate err\n",
1638 __func__, __clk_get_name(clk));
1639 goto fail_roll_back;
1641 clk_dvfs_node->set_freq = new_rate / 1000;
1643 DVFS_DBG("%s:dvfs clk(%s) set rate %lu ok\n",
1644 __func__, clk_dvfs_node->name, __clk_get_rate(clk));
1646 /* if down the rate */
1647 if (new_rate < old_rate) {
1648 ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1652 ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
1654 DVFS_ERR("%s:dvfs clk(%s) rate %luhz set mode err\n",
1655 __func__, clk_dvfs_node->name, new_rate);
1660 clk_dvfs_node->set_volt = clk_volt_store;
1665 unsigned long dvfs_clk_round_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1667 return __clk_round_rate(clk_dvfs_node->clk, rate);
1669 EXPORT_SYMBOL_GPL(dvfs_clk_round_rate);
1671 unsigned long dvfs_clk_get_rate(struct dvfs_node *clk_dvfs_node)
1673 return __clk_get_rate(clk_dvfs_node->clk);
1675 EXPORT_SYMBOL_GPL(dvfs_clk_get_rate);
1677 unsigned long dvfs_clk_get_last_set_rate(struct dvfs_node *clk_dvfs_node)
1679 unsigned long last_set_rate;
1681 mutex_lock(&clk_dvfs_node->vd->mutex);
1682 last_set_rate = clk_dvfs_node->last_set_rate;
1683 mutex_unlock(&clk_dvfs_node->vd->mutex);
1685 return last_set_rate;
1687 EXPORT_SYMBOL_GPL(dvfs_clk_get_last_set_rate);
1690 int dvfs_clk_enable(struct dvfs_node *clk_dvfs_node)
1692 return clk_enable(clk_dvfs_node->clk);
1694 EXPORT_SYMBOL_GPL(dvfs_clk_enable);
1696 void dvfs_clk_disable(struct dvfs_node *clk_dvfs_node)
1698 return clk_disable(clk_dvfs_node->clk);
1700 EXPORT_SYMBOL_GPL(dvfs_clk_disable);
1702 struct dvfs_node *clk_get_dvfs_node(char *clk_name)
1706 struct dvfs_node *clk_dvfs_node;
1708 mutex_lock(&rk_dvfs_mutex);
1709 list_for_each_entry(vd, &rk_dvfs_tree, node) {
1710 mutex_lock(&vd->mutex);
1711 list_for_each_entry(pd, &vd->pd_list, node) {
1712 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
1713 if (0 == strcmp(clk_dvfs_node->name, clk_name)) {
1714 mutex_unlock(&vd->mutex);
1715 mutex_unlock(&rk_dvfs_mutex);
1716 return clk_dvfs_node;
1720 mutex_unlock(&vd->mutex);
1722 mutex_unlock(&rk_dvfs_mutex);
1726 EXPORT_SYMBOL_GPL(clk_get_dvfs_node);
1728 void clk_put_dvfs_node(struct dvfs_node *clk_dvfs_node)
1732 EXPORT_SYMBOL_GPL(clk_put_dvfs_node);
1734 int dvfs_clk_prepare_enable(struct dvfs_node *clk_dvfs_node)
1736 return clk_prepare_enable(clk_dvfs_node->clk);
1738 EXPORT_SYMBOL_GPL(dvfs_clk_prepare_enable);
1741 void dvfs_clk_disable_unprepare(struct dvfs_node *clk_dvfs_node)
1743 clk_disable_unprepare(clk_dvfs_node->clk);
1745 EXPORT_SYMBOL_GPL(dvfs_clk_disable_unprepare);
1747 int dvfs_clk_set_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1754 DVFS_DBG("%s:dvfs node(%s) set rate(%lu)\n",
1755 __func__, clk_dvfs_node->name, rate);
1757 #if 0 // judge by reference func in rk
1758 if (dvfs_support_clk_set_rate(dvfs_info)==false) {
1759 DVFS_ERR("dvfs func:%s is not support!\n", __func__);
1764 if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target) {
1765 mutex_lock(&clk_dvfs_node->vd->mutex);
1766 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
1767 clk_dvfs_node->last_set_rate = rate;
1768 mutex_unlock(&clk_dvfs_node->vd->mutex);
1770 DVFS_ERR("%s:dvfs node(%s) has no vd node or target callback!\n",
1771 __func__, clk_dvfs_node->name);
1776 EXPORT_SYMBOL_GPL(dvfs_clk_set_rate);
1779 int rk_regist_vd(struct vd_node *vd)
1785 vd->volt_time_flag=0;
1787 INIT_LIST_HEAD(&vd->pd_list);
1788 mutex_lock(&rk_dvfs_mutex);
1789 list_add(&vd->node, &rk_dvfs_tree);
1790 mutex_unlock(&rk_dvfs_mutex);
1794 EXPORT_SYMBOL_GPL(rk_regist_vd);
1796 int rk_regist_pd(struct pd_node *pd)
1807 INIT_LIST_HEAD(&pd->clk_list);
1808 mutex_lock(&vd->mutex);
1809 list_add(&pd->node, &vd->pd_list);
1810 mutex_unlock(&vd->mutex);
1814 EXPORT_SYMBOL_GPL(rk_regist_pd);
1816 int rk_regist_clk(struct dvfs_node *clk_dvfs_node)
1824 vd = clk_dvfs_node->vd;
1825 pd = clk_dvfs_node->pd;
1829 mutex_lock(&vd->mutex);
1830 list_add(&clk_dvfs_node->node, &pd->clk_list);
1831 mutex_unlock(&vd->mutex);
1835 EXPORT_SYMBOL_GPL(rk_regist_clk);
1837 static struct cpufreq_frequency_table *of_get_temp_limit_table(struct device_node *dev_node, const char *propname)
1839 struct cpufreq_frequency_table *temp_limt_table = NULL;
1840 const struct property *prop;
1844 prop = of_find_property(dev_node, propname, NULL);
1850 nr = prop->length / sizeof(u32);
1852 pr_err("%s: Invalid freq list\n", __func__);
1856 temp_limt_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
1857 (nr/2 + 1), GFP_KERNEL);
1861 for (i=0; i<nr/2; i++){
1862 temp_limt_table[i].index = be32_to_cpup(val++);
1863 temp_limt_table[i].frequency = be32_to_cpup(val++) * 1000;
1866 temp_limt_table[i].index = 0;
1867 temp_limt_table[i].frequency = CPUFREQ_TABLE_END;
1869 return temp_limt_table;
1873 static int of_get_dvfs_table(struct device_node *dev_node,
1874 struct cpufreq_frequency_table **dvfs_table)
1876 struct cpufreq_frequency_table *tmp_dvfs_table = NULL;
1877 const struct property *prop;
1881 prop = of_find_property(dev_node, "operating-points", NULL);
1887 nr = prop->length / sizeof(u32);
1889 pr_err("%s: Invalid freq list\n", __func__);
1893 tmp_dvfs_table = kzalloc(sizeof(*tmp_dvfs_table) *
1894 (nr/2 + 1), GFP_KERNEL);
1897 for (i = 0; i < nr/2; i++) {
1898 tmp_dvfs_table[i].frequency = be32_to_cpup(val++);
1899 tmp_dvfs_table[i].index = be32_to_cpup(val++);
1902 tmp_dvfs_table[i].index = 0;
1903 tmp_dvfs_table[i].frequency = CPUFREQ_TABLE_END;
1905 *dvfs_table = tmp_dvfs_table;
1911 static int of_get_dvfs_pvtm_table(struct device_node *dev_node,
1912 struct cpufreq_frequency_table **dvfs_table,
1913 struct cpufreq_frequency_table **pvtm_table)
1915 struct cpufreq_frequency_table *tmp_dvfs_table = NULL;
1916 struct cpufreq_frequency_table *tmp_pvtm_table = NULL;
1917 const struct property *prop;
1921 prop = of_find_property(dev_node, "pvtm-operating-points", NULL);
1927 nr = prop->length / sizeof(u32);
1929 pr_err("%s: Invalid freq list\n", __func__);
1933 tmp_dvfs_table = kzalloc(sizeof(*tmp_dvfs_table) *
1934 (nr/3 + 1), GFP_KERNEL);
1936 tmp_pvtm_table = kzalloc(sizeof(*tmp_pvtm_table) *
1937 (nr/3 + 1), GFP_KERNEL);
1941 for (i = 0; i < nr/3; i++) {
1942 tmp_dvfs_table[i].frequency = be32_to_cpup(val++);
1943 tmp_dvfs_table[i].index = be32_to_cpup(val++);
1945 tmp_pvtm_table[i].frequency = tmp_dvfs_table[i].frequency;
1946 tmp_pvtm_table[i].index = be32_to_cpup(val++);
1949 tmp_dvfs_table[i].index = 0;
1950 tmp_dvfs_table[i].frequency = CPUFREQ_TABLE_END;
1952 tmp_pvtm_table[i].index = 0;
1953 tmp_pvtm_table[i].frequency = CPUFREQ_TABLE_END;
1955 *dvfs_table = tmp_dvfs_table;
1956 *pvtm_table = tmp_pvtm_table;
1961 static struct lkg_adjust_volt_table
1962 *of_get_lkg_adjust_volt_table(struct device_node *np,
1963 const char *propname)
1965 struct lkg_adjust_volt_table *lkg_adjust_volt_table = NULL;
1966 const struct property *prop;
1970 prop = of_find_property(np, propname, NULL);
1976 nr = prop->length / sizeof(s32);
1978 pr_err("%s: Invalid freq list\n", __func__);
1982 lkg_adjust_volt_table =
1983 kzalloc(sizeof(struct lkg_adjust_volt_table) *
1984 (nr/2 + 1), GFP_KERNEL);
1988 for (i = 0; i < nr/2; i++) {
1989 lkg_adjust_volt_table[i].lkg = be32_to_cpup(val++);
1990 lkg_adjust_volt_table[i].dlt_volt = be32_to_cpup(val++);
1993 lkg_adjust_volt_table[i].lkg = 0;
1994 lkg_adjust_volt_table[i].dlt_volt = CPUFREQ_TABLE_END;
1996 return lkg_adjust_volt_table;
1999 static int dvfs_node_parse_dt(struct device_node *np,
2000 struct dvfs_node *dvfs_node)
2002 int process_version = rockchip_process_version();
2006 of_property_read_u32_index(np, "channel", 0, &dvfs_node->channel);
2008 pr_info("channel:%d, lkg:%d\n",
2009 dvfs_node->channel, rockchip_get_leakage(dvfs_node->channel));
2011 of_property_read_u32_index(np, "regu-mode-en", 0,
2012 &dvfs_node->regu_mode_en);
2013 if (dvfs_node->regu_mode_en)
2014 dvfs_node->regu_mode_table = of_get_regu_mode_table(np);
2016 dvfs_node->regu_mode_table = NULL;
2018 of_property_read_u32_index(np, "temp-limit-enable", 0,
2019 &temp_limit_enable);
2020 if (temp_limit_enable) {
2021 of_property_read_u32_index(np, "target-temp", 0, &target_temp);
2022 pr_info("target-temp:%d\n", target_temp);
2023 dvfs_node->nor_temp_limit_table =
2024 of_get_temp_limit_table(np,
2025 "normal-temp-limit");
2026 dvfs_node->per_temp_limit_table =
2027 of_get_temp_limit_table(np,
2028 "performance-temp-limit");
2029 dvfs_node->virt_temp_limit_table[0] =
2030 of_get_temp_limit_table(np,
2031 "virt-temp-limit-1-cpu-busy");
2032 dvfs_node->virt_temp_limit_table[1] =
2033 of_get_temp_limit_table(np,
2034 "virt-temp-limit-2-cpu-busy");
2035 dvfs_node->virt_temp_limit_table[2] =
2036 of_get_temp_limit_table(np,
2037 "virt-temp-limit-3-cpu-busy");
2038 dvfs_node->virt_temp_limit_table[3] =
2039 of_get_temp_limit_table(np,
2040 "virt-temp-limit-4-cpu-busy");
2042 dvfs_node->temp_limit_rate = -1;
2044 ret = of_property_read_u32_index(np, "support-pvtm", 0,
2045 &dvfs_node->support_pvtm);
2047 if (of_get_dvfs_pvtm_table(np, &dvfs_node->dvfs_table,
2048 &dvfs_node->pvtm_table))
2051 for (i = 0; i < ARRAY_SIZE(pvtm_info_table); i++) {
2052 struct pvtm_info *pvtm_info = pvtm_info_table[i];
2054 if ((pvtm_info->channel == dvfs_node->channel) &&
2055 (pvtm_info->process_version == process_version) &&
2056 of_machine_is_compatible(pvtm_info->compatible)) {
2057 dvfs_node->pvtm_info = pvtm_info;
2062 if (!dvfs_node->pvtm_info)
2063 dvfs_node->support_pvtm = 0;
2065 if (of_get_dvfs_table(np, &dvfs_node->dvfs_table))
2069 of_property_read_u32_index(np, "lkg_adjust_volt_en", 0,
2070 &dvfs_node->lkg_adjust_volt_en);
2071 if (dvfs_node->lkg_adjust_volt_en) {
2072 dvfs_node->lkg_info.def_table_lkg = -1;
2073 of_property_read_u32_index(np, "def_table_lkg", 0,
2074 &dvfs_node->lkg_info.def_table_lkg);
2076 dvfs_node->lkg_info.min_adjust_freq = -1;
2077 of_property_read_u32_index(np, "min_adjust_freq", 0,
2078 &dvfs_node->lkg_info.min_adjust_freq
2081 dvfs_node->lkg_info.table =
2082 of_get_lkg_adjust_volt_table(np,
2083 "lkg_adjust_volt_table");
2089 int of_dvfs_init(void)
2093 struct device_node *dvfs_dev_node, *clk_dev_node, *vd_dev_node, *pd_dev_node;
2094 struct dvfs_node *dvfs_node;
2098 DVFS_DBG("%s\n", __func__);
2099 pr_info("process version: %d\n", rockchip_process_version());
2101 dvfs_dev_node = of_find_node_by_name(NULL, "dvfs");
2102 if (IS_ERR_OR_NULL(dvfs_dev_node)) {
2103 DVFS_ERR("%s get dvfs dev node err\n", __func__);
2104 return PTR_ERR(dvfs_dev_node);
2107 for_each_available_child_of_node(dvfs_dev_node, vd_dev_node) {
2108 vd = kzalloc(sizeof(struct vd_node), GFP_KERNEL);
2112 mutex_init(&vd->mutex);
2113 vd->name = vd_dev_node->name;
2114 ret = of_property_read_string(vd_dev_node, "regulator_name", &vd->regulator_name);
2116 DVFS_ERR("%s:vd(%s) get regulator_name err, ret:%d\n",
2117 __func__, vd_dev_node->name, ret);
2122 vd->suspend_volt = 0;
2124 vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
2125 vd->vd_dvfs_target = dvfs_target;
2126 ret = rk_regist_vd(vd);
2128 DVFS_ERR("%s:vd(%s) register err:%d\n", __func__, vd->name, ret);
2133 DVFS_DBG("%s:vd(%s) register ok, regulator name:%s,suspend volt:%d\n",
2134 __func__, vd->name, vd->regulator_name, vd->suspend_volt);
2136 for_each_available_child_of_node(vd_dev_node, pd_dev_node) {
2137 pd = kzalloc(sizeof(struct pd_node), GFP_KERNEL);
2142 pd->name = pd_dev_node->name;
2144 ret = rk_regist_pd(pd);
2146 DVFS_ERR("%s:pd(%s) register err:%d\n", __func__, pd->name, ret);
2150 DVFS_DBG("%s:pd(%s) register ok, parent vd:%s\n",
2151 __func__, pd->name, vd->name);
2152 for_each_available_child_of_node(pd_dev_node, clk_dev_node) {
2153 if (!of_device_is_available(clk_dev_node))
2156 dvfs_node = kzalloc(sizeof(struct dvfs_node), GFP_KERNEL);
2160 dvfs_node->name = clk_dev_node->name;
2164 if (dvfs_node_parse_dt(clk_dev_node, dvfs_node))
2167 clk = clk_get(NULL, clk_dev_node->name);
2169 DVFS_ERR("%s:get clk(%s) err:%ld\n", __func__, dvfs_node->name, PTR_ERR(clk));
2175 dvfs_node->clk = clk;
2176 ret = rk_regist_clk(dvfs_node);
2178 DVFS_ERR("%s:dvfs_node(%s) register err:%d\n", __func__, dvfs_node->name, ret);
2182 DVFS_DBG("%s:dvfs_node(%s) register ok, parent pd:%s\n",
2183 __func__, clk_dev_node->name, pd->name);
2192 arch_initcall_sync(of_dvfs_init);
2195 /*********************************************************************************/
2197 * dump_dbg_map() : Draw all informations of dvfs while debug
2199 static int dump_dbg_map(char *buf)
2204 struct dvfs_node *clk_dvfs_node;
2207 mutex_lock(&rk_dvfs_mutex);
2208 printk( "-------------DVFS TREE-----------\n\n\n");
2209 printk( "DVFS TREE:\n");
2211 list_for_each_entry(vd, &rk_dvfs_tree, node) {
2212 mutex_lock(&vd->mutex);
2213 printk( "|\n|- voltage domain:%s\n", vd->name);
2214 printk( "|- current voltage:%d\n", vd->cur_volt);
2215 printk( "|- current regu_mode:%s\n", dvfs_regu_mode_to_string(vd->regu_mode));
2217 list_for_each_entry(pd, &vd->pd_list, node) {
2218 printk( "| |\n| |- power domain:%s, status = %s, current volt = %d, current regu_mode = %s\n",
2219 pd->name, (pd->pd_status == 1) ? "ON" : "OFF", pd->cur_volt,
2220 dvfs_regu_mode_to_string(pd->regu_mode));
2222 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
2223 printk( "| | |\n| | |- clock: %s current: rate %d, volt = %d,"
2224 " enable_dvfs = %s\n",
2225 clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt,
2226 clk_dvfs_node->enable_count == 0 ? "DISABLE" : "ENABLE");
2227 printk( "| | |- clk limit(%s):[%u, %u]; last set rate = %lu\n",
2228 clk_dvfs_node->freq_limit_en ? "enable" : "disable",
2229 clk_dvfs_node->min_rate, clk_dvfs_node->max_rate,
2230 clk_dvfs_node->last_set_rate/1000);
2231 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
2232 printk( "| | | |- freq = %d, volt = %d\n",
2233 clk_dvfs_node->dvfs_table[i].frequency,
2234 clk_dvfs_node->dvfs_table[i].index);
2237 printk( "| | |- clock: %s current: rate %d, regu_mode = %s,"
2238 " regu_mode_en = %d\n",
2239 clk_dvfs_node->name, clk_dvfs_node->set_freq,
2240 dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode),
2241 clk_dvfs_node->regu_mode_en);
2242 if (clk_dvfs_node->regu_mode_table) {
2243 for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
2244 printk( "| | | |- freq = %d, regu_mode = %s\n",
2245 clk_dvfs_node->regu_mode_table[i].frequency/1000,
2246 dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode_table[i].index));
2251 mutex_unlock(&vd->mutex);
2254 printk( "-------------DVFS TREE END------------\n");
2255 mutex_unlock(&rk_dvfs_mutex);
2260 /*********************************************************************************/
2261 static struct kobject *dvfs_kobj;
2262 struct dvfs_attribute {
2263 struct attribute attr;
2264 ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
2266 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
2267 const char *buf, size_t n);
2270 static ssize_t dvfs_tree_store(struct kobject *kobj, struct kobj_attribute *attr,
2271 const char *buf, size_t n)
2275 static ssize_t dvfs_tree_show(struct kobject *kobj, struct kobj_attribute *attr,
2278 return dump_dbg_map(buf);
2282 static struct dvfs_attribute dvfs_attrs[] = {
2283 /* node_name permision show_func store_func */
2284 //#ifdef CONFIG_RK_CLOCK_PROC
2285 __ATTR(dvfs_tree, S_IRUSR | S_IRGRP | S_IWUSR, dvfs_tree_show, dvfs_tree_store),
2289 static int __init dvfs_init(void)
2293 dvfs_kobj = kobject_create_and_add("dvfs", NULL);
2296 for (i = 0; i < ARRAY_SIZE(dvfs_attrs); i++) {
2297 ret = sysfs_create_file(dvfs_kobj, &dvfs_attrs[i].attr);
2299 DVFS_ERR("create index %d error\n", i);
2304 if (temp_limit_enable) {
2305 clk_cpu_dvfs_node = clk_get_dvfs_node("clk_core");
2306 if (!clk_cpu_dvfs_node){
2310 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
2311 dvfs_wq = alloc_workqueue("dvfs", WQ_NON_REENTRANT | WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
2312 queue_delayed_work_on(0, dvfs_wq, &dvfs_temp_limit_work, 0*HZ);
2315 vdd_gpu_regulator = dvfs_get_regulator("vdd_gpu");
2316 if (!IS_ERR_OR_NULL(vdd_gpu_regulator)) {
2317 struct clk *clk = clk_get(NULL, "pd_gpu");
2320 rk_clk_pd_notifier_register(clk, &clk_pd_gpu_notifier);
2322 fb_register_client(&early_suspend_notifier);
2323 register_reboot_notifier(&vdd_gpu_reboot_notifier);
2329 late_initcall(dvfs_init);