1 /* arch/arm/mach-rk30/rk30_dvfs.c
3 * Copyright (C) 2012 ROCKCHIP, Inc.
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #include <linux/slab.h>
16 #include <linux/clk.h>
17 #include <linux/regulator/consumer.h>
18 #include <linux/delay.h>
19 #include <linux/stat.h>
21 #include <linux/opp.h>
22 #include <linux/rockchip/dvfs.h>
23 #include <linux/rockchip/common.h>
25 #include <linux/reboot.h>
26 #include "../../../drivers/clk/rockchip/clk-pd.h"
28 extern int rockchip_tsadc_get_temp(int chn);
30 #define MHz (1000 * 1000)
31 static LIST_HEAD(rk_dvfs_tree);
32 static DEFINE_MUTEX(rk_dvfs_mutex);
33 static struct workqueue_struct *dvfs_wq;
34 static struct dvfs_node *clk_cpu_dvfs_node;
35 static unsigned int target_temp = 80;
36 static int temp_limit_enable = 1;
38 static int pd_gpu_off, early_suspend;
39 static DEFINE_MUTEX(switch_vdd_gpu_mutex);
40 struct regulator *vdd_gpu_regulator;
42 static int vdd_gpu_reboot_notifier_event(struct notifier_block *this,
43 unsigned long event, void *ptr)
47 DVFS_DBG("%s: enable vdd_gpu\n", __func__);
48 mutex_lock(&switch_vdd_gpu_mutex);
49 if (!regulator_is_enabled(vdd_gpu_regulator))
50 ret = regulator_enable(vdd_gpu_regulator);
51 mutex_unlock(&switch_vdd_gpu_mutex);
56 static struct notifier_block vdd_gpu_reboot_notifier = {
57 .notifier_call = vdd_gpu_reboot_notifier_event,
60 static int clk_pd_gpu_notifier_call(struct notifier_block *nb,
61 unsigned long event, void *ptr)
66 case RK_CLK_PD_PREPARE:
67 mutex_lock(&switch_vdd_gpu_mutex);
70 if (!regulator_is_enabled(vdd_gpu_regulator))
71 ret = regulator_enable(vdd_gpu_regulator);
73 mutex_unlock(&switch_vdd_gpu_mutex);
75 case RK_CLK_PD_UNPREPARE:
76 mutex_lock(&switch_vdd_gpu_mutex);
79 if (regulator_is_enabled(vdd_gpu_regulator))
80 ret = regulator_disable(vdd_gpu_regulator);
82 mutex_unlock(&switch_vdd_gpu_mutex);
91 static struct notifier_block clk_pd_gpu_notifier = {
92 .notifier_call = clk_pd_gpu_notifier_call,
96 static int early_suspend_notifier_call(struct notifier_block *self,
97 unsigned long action, void *data)
99 struct fb_event *event = data;
100 int blank_mode = *((int *)event->data);
103 mutex_lock(&switch_vdd_gpu_mutex);
104 if (action == FB_EARLY_EVENT_BLANK) {
105 switch (blank_mode) {
106 case FB_BLANK_UNBLANK:
109 if (!regulator_is_enabled(vdd_gpu_regulator))
110 ret = regulator_enable(
117 } else if (action == FB_EVENT_BLANK) {
118 switch (blank_mode) {
119 case FB_BLANK_POWERDOWN:
122 if (regulator_is_enabled(vdd_gpu_regulator))
123 ret = regulator_disable(
132 mutex_unlock(&switch_vdd_gpu_mutex);
137 static struct notifier_block early_suspend_notifier = {
138 .notifier_call = early_suspend_notifier_call,
141 #define DVFS_REGULATOR_MODE_STANDBY 1
142 #define DVFS_REGULATOR_MODE_IDLE 2
143 #define DVFS_REGULATOR_MODE_NORMAL 3
144 #define DVFS_REGULATOR_MODE_FAST 4
146 static const char* dvfs_regu_mode_to_string(unsigned int mode)
149 case DVFS_REGULATOR_MODE_FAST:
151 case DVFS_REGULATOR_MODE_NORMAL:
153 case DVFS_REGULATOR_MODE_IDLE:
155 case DVFS_REGULATOR_MODE_STANDBY:
162 static int dvfs_regu_mode_convert(unsigned int mode)
165 case DVFS_REGULATOR_MODE_FAST:
166 return REGULATOR_MODE_FAST;
167 case DVFS_REGULATOR_MODE_NORMAL:
168 return REGULATOR_MODE_NORMAL;
169 case DVFS_REGULATOR_MODE_IDLE:
170 return REGULATOR_MODE_IDLE;
171 case DVFS_REGULATOR_MODE_STANDBY:
172 return REGULATOR_MODE_STANDBY;
178 static int dvfs_regu_mode_deconvert(unsigned int mode)
181 case REGULATOR_MODE_FAST:
182 return DVFS_REGULATOR_MODE_FAST;
183 case REGULATOR_MODE_NORMAL:
184 return DVFS_REGULATOR_MODE_NORMAL;
185 case REGULATOR_MODE_IDLE:
186 return DVFS_REGULATOR_MODE_IDLE;
187 case REGULATOR_MODE_STANDBY:
188 return DVFS_REGULATOR_MODE_STANDBY;
194 static struct cpufreq_frequency_table *of_get_regu_mode_table(struct device_node *dev_node)
196 struct cpufreq_frequency_table *regu_mode_table = NULL;
197 const struct property *prop;
201 prop = of_find_property(dev_node, "regu-mode-table", NULL);
207 nr = prop->length / sizeof(u32);
209 pr_err("%s: Invalid freq list\n", __func__);
213 regu_mode_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
214 (nr/2+1), GFP_KERNEL);
215 if (!regu_mode_table) {
216 pr_err("%s: could not allocate regu_mode_table!\n", __func__);
217 return ERR_PTR(-ENOMEM);
222 for (i=0; i<nr/2; i++){
223 regu_mode_table[i].frequency = be32_to_cpup(val++) * 1000;
224 regu_mode_table[i].index = be32_to_cpup(val++);
227 if (regu_mode_table[i-1].frequency != 0) {
228 pr_err("%s: Last freq of regu_mode_table is not 0!\n", __func__);
229 kfree(regu_mode_table);
233 regu_mode_table[i].index = 0;
234 regu_mode_table[i].frequency = CPUFREQ_TABLE_END;
236 return regu_mode_table;
239 static int dvfs_regu_mode_table_constrain(struct dvfs_node *clk_dvfs_node)
242 int mode, convert_mode, valid_mode;
247 if (!clk_dvfs_node->regu_mode_table)
250 if (!clk_dvfs_node->vd)
253 if (IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator))
256 for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
257 mode = clk_dvfs_node->regu_mode_table[i].index;
258 convert_mode = dvfs_regu_mode_convert(mode);
260 ret = regulator_is_supported_mode(clk_dvfs_node->vd->regulator,
263 DVFS_ERR("%s: find mode=%d unsupported\n", __func__,
265 kfree(clk_dvfs_node->regu_mode_table);
266 clk_dvfs_node->regu_mode_table = NULL;
270 valid_mode = dvfs_regu_mode_deconvert(convert_mode);
271 if (valid_mode != mode) {
272 DVFS_ERR("%s: round mode=%d to valid mode=%d!\n",
273 __func__, mode, valid_mode);
274 clk_dvfs_node->regu_mode_table[i].index = valid_mode;
282 static int clk_dvfs_node_get_regu_mode(struct dvfs_node *clk_dvfs_node,
283 unsigned long rate, unsigned int *mode)
288 if ((!clk_dvfs_node) || (!clk_dvfs_node->regu_mode_table))
291 for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
292 if (rate >= clk_dvfs_node->regu_mode_table[i].frequency) {
293 *mode = clk_dvfs_node->regu_mode_table[i].index;
301 static int dvfs_pd_get_newmode_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
303 unsigned int mode_max = 0;
306 if (clk_dvfs_node->regu_mode_en && (clk_dvfs_node->regu_mode >= pd->regu_mode)) {
307 return clk_dvfs_node->regu_mode;
310 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
311 if (clk_dvfs_node->regu_mode_en)
312 mode_max = max(mode_max, (clk_dvfs_node->regu_mode));
318 static void dvfs_update_clk_pds_mode(struct dvfs_node *clk_dvfs_node)
325 pd = clk_dvfs_node->pd;
329 pd->regu_mode = dvfs_pd_get_newmode_byclk(pd, clk_dvfs_node);
332 static int dvfs_vd_get_newmode_bypd(struct vd_node *vd)
334 unsigned int mode_max_vd = 0;
340 list_for_each_entry(pd, &vd->pd_list, node) {
341 mode_max_vd = max(mode_max_vd, pd->regu_mode);
347 static int dvfs_vd_get_newmode_byclk(struct dvfs_node *clk_dvfs_node)
352 dvfs_update_clk_pds_mode(clk_dvfs_node);
354 return dvfs_vd_get_newmode_bypd(clk_dvfs_node->vd);
357 static int dvfs_regu_set_mode(struct vd_node *vd, unsigned int mode)
363 if (IS_ERR_OR_NULL(vd)) {
364 DVFS_ERR("%s: vd_node error\n", __func__);
368 DVFS_DBG("%s: mode=%d(old=%d)\n", __func__, mode, vd->regu_mode);
370 convert_mode = dvfs_regu_mode_convert(mode);
371 if (convert_mode < 0) {
372 DVFS_ERR("%s: mode %d convert error\n", __func__, mode);
376 if (!IS_ERR_OR_NULL(vd->regulator)) {
377 ret = dvfs_regulator_set_mode(vd->regulator, convert_mode);
379 DVFS_ERR("%s: %s set mode %d (was %d) error!\n", __func__,
380 vd->regulator_name, mode, vd->regu_mode);
384 DVFS_ERR("%s: invalid regulator\n", __func__);
388 vd->regu_mode = mode;
393 static int dvfs_regu_mode_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
402 if (!clk_dvfs_node->regu_mode_en)
405 ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, rate, &mode);
407 DVFS_ERR("%s: clk(%s) rate %luhz get mode fail\n",
408 __func__, clk_dvfs_node->name, rate);
411 clk_dvfs_node->regu_mode = mode;
413 mode = dvfs_vd_get_newmode_byclk(clk_dvfs_node);
417 ret = dvfs_regu_set_mode(clk_dvfs_node->vd, mode);
422 static void dvfs_volt_up_delay(struct vd_node *vd, int new_volt, int old_volt)
426 if(new_volt <= old_volt)
428 if(vd->volt_time_flag > 0)
429 u_time = regulator_set_voltage_time(vd->regulator, old_volt, new_volt);
432 if(u_time < 0) {// regulator is not suported time,useing default time
433 DVFS_DBG("%s:vd %s is not suported getting delay time,so we use default\n",
435 u_time = ((new_volt) - (old_volt)) >> 9;
438 DVFS_DBG("%s: vd %s volt %d to %d delay %d us\n",
439 __func__, vd->name, old_volt, new_volt, u_time);
441 if (u_time >= 1000) {
442 mdelay(u_time / 1000);
443 udelay(u_time % 1000);
444 DVFS_WARNING("%s: regulator set vol delay is larger 1ms,old is %d,new is %d\n",
445 __func__, old_volt, new_volt);
451 static int dvfs_regulator_set_voltage_readback(struct regulator *regulator, int min_uV, int max_uV)
453 int ret = 0, read_back = 0;
455 ret = dvfs_regulator_set_voltage(regulator, max_uV, max_uV);
457 DVFS_ERR("%s: now read back to check voltage\n", __func__);
459 /* read back to judge if it is already effect */
461 read_back = dvfs_regulator_get_voltage(regulator);
462 if (read_back == max_uV) {
463 DVFS_ERR("%s: set ERROR but already effected, volt=%d\n", __func__, read_back);
466 DVFS_ERR("%s: set ERROR AND NOT effected, volt=%d\n", __func__, read_back);
473 static int dvfs_scale_volt_direct(struct vd_node *vd_clk, int volt_new)
477 DVFS_DBG("%s: volt=%d(old=%d)\n", __func__, volt_new, vd_clk->cur_volt);
479 if (IS_ERR_OR_NULL(vd_clk)) {
480 DVFS_ERR("%s: vd_node error\n", __func__);
484 if (!IS_ERR_OR_NULL(vd_clk->regulator)) {
485 ret = dvfs_regulator_set_voltage_readback(vd_clk->regulator, volt_new, volt_new);
486 dvfs_volt_up_delay(vd_clk,volt_new, vd_clk->cur_volt);
488 vd_clk->volt_set_flag = DVFS_SET_VOLT_FAILURE;
489 DVFS_ERR("%s: %s set voltage up err ret = %d, Vnew = %d(was %d)mV\n",
490 __func__, vd_clk->name, ret, volt_new, vd_clk->cur_volt);
495 DVFS_ERR("%s: invalid regulator\n", __func__);
499 vd_clk->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
500 vd_clk->cur_volt = volt_new;
506 static int dvfs_reset_volt(struct vd_node *dvfs_vd)
508 int flag_set_volt_correct = 0;
509 if (!IS_ERR_OR_NULL(dvfs_vd->regulator))
510 flag_set_volt_correct = dvfs_regulator_get_voltage(dvfs_vd->regulator);
512 DVFS_ERR("%s: invalid regulator\n", __func__);
515 if (flag_set_volt_correct <= 0) {
516 DVFS_ERR("%s (vd:%s), try to reload volt ,by it is error again(%d)!!! stop scaling\n",
517 __func__, dvfs_vd->name, flag_set_volt_correct);
520 dvfs_vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
521 DVFS_WARNING("%s:vd(%s) try to reload volt = %d\n",
522 __func__, dvfs_vd->name, flag_set_volt_correct);
524 /* Reset vd's voltage */
525 dvfs_vd->cur_volt = flag_set_volt_correct;
527 return dvfs_vd->cur_volt;
531 // for clk enable case to get vd regulator info
532 static void clk_enable_dvfs_regulator_check(struct vd_node *vd)
534 vd->cur_volt = dvfs_regulator_get_voltage(vd->regulator);
535 if(vd->cur_volt <= 0){
536 vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
538 vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
541 static void dvfs_get_vd_regulator_volt_list(struct vd_node *vd)
543 unsigned int i, selector = dvfs_regulator_count_voltages(vd->regulator);
544 int n = 0, sel_volt = 0;
546 if(selector > VD_VOL_LIST_CNT)
547 selector = VD_VOL_LIST_CNT;
549 for (i = 0; i < selector; i++) {
550 sel_volt = dvfs_regulator_list_voltage(vd->regulator, i);
552 //DVFS_WARNING("%s: vd(%s) list volt selector=%u, but volt(%d) <=0\n",
553 // __func__, vd->name, i, sel_volt);
556 vd->volt_list[n++] = sel_volt;
557 DVFS_DBG("%s: vd(%s) list volt selector=%u, n=%d, volt=%d\n",
558 __func__, vd->name, i, n, sel_volt);
565 static int vd_regulator_round_volt_max(struct vd_node *vd, int volt)
570 for (i = 0; i < vd->n_voltages; i++) {
571 sel_volt = vd->volt_list[i];
573 DVFS_WARNING("%s: selector=%u, but volt <=0\n",
584 static int vd_regulator_round_volt_min(struct vd_node *vd, int volt)
589 for (i = 0; i < vd->n_voltages; i++) {
590 sel_volt = vd->volt_list[i];
592 DVFS_WARNING("%s: selector=%u, but volt <=0\n",
598 return vd->volt_list[i-1];
608 static int vd_regulator_round_volt(struct vd_node *vd, int volt, int flags)
612 if(flags == VD_LIST_RELATION_L)
613 return vd_regulator_round_volt_min(vd, volt);
615 return vd_regulator_round_volt_max(vd, volt);
618 static void dvfs_table_round_volt(struct dvfs_node *clk_dvfs_node)
622 if(!clk_dvfs_node->dvfs_table || !clk_dvfs_node->vd ||
623 IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator))
626 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
628 test_volt = vd_regulator_round_volt(clk_dvfs_node->vd, clk_dvfs_node->dvfs_table[i].index, VD_LIST_RELATION_H);
631 DVFS_WARNING("%s: clk(%s) round volt(%d) but list <=0\n",
632 __func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].index);
635 DVFS_DBG("clk %s:round_volt %d to %d\n",
636 clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].index, test_volt);
638 clk_dvfs_node->dvfs_table[i].index=test_volt;
642 static void dvfs_vd_get_regulator_volt_time_info(struct vd_node *vd)
644 if(vd->volt_time_flag <= 0){// check regulator support get uping vol timer
645 vd->volt_time_flag = dvfs_regulator_set_voltage_time(vd->regulator, vd->cur_volt, vd->cur_volt+200*1000);
646 if(vd->volt_time_flag < 0){
647 DVFS_DBG("%s,vd %s volt_time is no support\n",
651 DVFS_DBG("%s,vd %s volt_time is support,up 200mv need delay %d us\n",
652 __func__, vd->name, vd->volt_time_flag);
657 static void dvfs_vd_get_regulator_mode_info(struct vd_node *vd)
659 //REGULATOR_MODE_FAST
660 if(vd->mode_flag <= 0){// check regulator support get uping vol timer{
661 vd->mode_flag = dvfs_regulator_get_mode(vd->regulator);
662 if(vd->mode_flag==REGULATOR_MODE_FAST || vd->mode_flag==REGULATOR_MODE_NORMAL
663 || vd->mode_flag == REGULATOR_MODE_IDLE || vd->mode_flag==REGULATOR_MODE_STANDBY){
665 if(dvfs_regulator_set_mode(vd->regulator, vd->mode_flag) < 0){
666 vd->mode_flag = 0;// check again
669 if(vd->mode_flag > 0){
670 DVFS_DBG("%s,vd %s mode(now is %d) support\n",
671 __func__, vd->name, vd->mode_flag);
674 DVFS_DBG("%s,vd %s mode is not support now check\n",
681 struct regulator *dvfs_get_regulator(char *regulator_name)
685 mutex_lock(&rk_dvfs_mutex);
686 list_for_each_entry(vd, &rk_dvfs_tree, node) {
687 if (strcmp(regulator_name, vd->regulator_name) == 0) {
688 mutex_unlock(&rk_dvfs_mutex);
689 return vd->regulator;
692 mutex_unlock(&rk_dvfs_mutex);
696 static int dvfs_get_rate_range(struct dvfs_node *clk_dvfs_node)
698 struct cpufreq_frequency_table *table;
704 clk_dvfs_node->min_rate = 0;
705 clk_dvfs_node->max_rate = 0;
707 table = clk_dvfs_node->dvfs_table;
708 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
709 clk_dvfs_node->max_rate = table[i].frequency / 1000 * 1000 * 1000;
711 clk_dvfs_node->min_rate = table[i].frequency / 1000 * 1000 * 1000;
714 DVFS_DBG("%s: clk %s, limit rate [min, max] = [%u, %u]\n",
715 __func__, clk_dvfs_node->name, clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
720 static void dvfs_table_round_clk_rate(struct dvfs_node *clk_dvfs_node)
722 int i, rate, temp_rate, flags;
724 if(!clk_dvfs_node || !clk_dvfs_node->dvfs_table || !clk_dvfs_node->clk)
727 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
728 //ddr rate = real rate+flags
729 flags = clk_dvfs_node->dvfs_table[i].frequency%1000;
730 rate = (clk_dvfs_node->dvfs_table[i].frequency/1000)*1000;
731 temp_rate = __clk_round_rate(clk_dvfs_node->clk, rate*1000);
733 DVFS_WARNING("%s: clk(%s) rate %d round return %d\n",
734 __func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
738 /* Set rate unit as MHZ */
739 if (temp_rate % MHz != 0)
740 temp_rate = (temp_rate / MHz + 1) * MHz;
742 temp_rate = (temp_rate / 1000) + flags;
744 DVFS_DBG("clk %s round_clk_rate %d to %d\n",
745 clk_dvfs_node->name,clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
747 clk_dvfs_node->dvfs_table[i].frequency = temp_rate;
751 static int clk_dvfs_node_get_ref_volt(struct dvfs_node *clk_dvfs_node, int rate_khz,
752 struct cpufreq_frequency_table *clk_fv)
756 if (rate_khz == 0 || !clk_dvfs_node || !clk_dvfs_node->dvfs_table) {
760 clk_fv->frequency = rate_khz;
763 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
764 if (clk_dvfs_node->dvfs_table[i].frequency >= rate_khz) {
765 clk_fv->frequency = clk_dvfs_node->dvfs_table[i].frequency;
766 clk_fv->index = clk_dvfs_node->dvfs_table[i].index;
767 //printk("%s,%s rate=%ukhz(vol=%d)\n",__func__,clk_dvfs_node->name,
768 //clk_fv->frequency, clk_fv->index);
772 clk_fv->frequency = 0;
774 //DVFS_DBG("%s get corresponding voltage error! out of bound\n", clk_dvfs_node->name);
778 static int dvfs_pd_get_newvolt_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
782 if (clk_dvfs_node->enable_count && (clk_dvfs_node->set_volt >= pd->cur_volt)) {
783 return clk_dvfs_node->set_volt;
786 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
787 if (clk_dvfs_node->enable_count)
788 volt_max = max(volt_max, clk_dvfs_node->set_volt);
793 static void dvfs_update_clk_pds_volt(struct dvfs_node *clk_dvfs_node)
800 pd = clk_dvfs_node->pd;
804 pd->cur_volt = dvfs_pd_get_newvolt_byclk(pd, clk_dvfs_node);
807 static int dvfs_vd_get_newvolt_bypd(struct vd_node *vd)
815 list_for_each_entry(pd, &vd->pd_list, node) {
816 volt_max_vd = max(volt_max_vd, pd->cur_volt);
822 static int dvfs_vd_get_newvolt_byclk(struct dvfs_node *clk_dvfs_node)
827 dvfs_update_clk_pds_volt(clk_dvfs_node);
828 return dvfs_vd_get_newvolt_bypd(clk_dvfs_node->vd);
832 static void dvfs_temp_limit_work_func(struct work_struct *work)
834 unsigned long delay = HZ / 10; // 100ms
837 struct dvfs_node *clk_dvfs_node;
839 queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
841 mutex_lock(&rk_dvfs_mutex);
842 list_for_each_entry(vd, &rk_dvfs_tree, node) {
843 mutex_lock(&vd->mutex);
844 list_for_each_entry(pd, &vd->pd_list, node) {
845 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
846 if (clk_dvfs_node->temp_limit_table) {
847 clk_dvfs_node->temp = rockchip_tsadc_get_temp(clk_dvfs_node->temp_channel);
848 clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
852 mutex_unlock(&vd->mutex);
854 mutex_unlock(&rk_dvfs_mutex);
858 static void dvfs_temp_limit_work_func(struct work_struct *work)
860 int temp=0, delta_temp=0;
861 unsigned long delay = HZ/10;
862 unsigned long arm_rate_step=0;
863 static int old_temp=0;
866 queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
868 temp = rockchip_tsadc_get_temp(1);
871 delta_temp = (old_temp>temp) ? (old_temp-temp) : (temp-old_temp);
875 if (ROCKCHIP_PM_POLICY_PERFORMANCE == rockchip_pm_get_policy()) {
876 if (!clk_cpu_dvfs_node->per_temp_limit_table) {
880 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
881 for (i=0; clk_cpu_dvfs_node->per_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
882 if (temp > clk_cpu_dvfs_node->per_temp_limit_table[i].index) {
883 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->per_temp_limit_table[i].frequency;
886 dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
887 } else if (ROCKCHIP_PM_POLICY_NORMAL == rockchip_pm_get_policy()){
888 if (!clk_cpu_dvfs_node->nor_temp_limit_table) {
892 if (temp > target_temp) {
893 if (temp > old_temp) {
894 delta_temp = temp - target_temp;
895 for (i=0; clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
896 if (delta_temp > clk_cpu_dvfs_node->nor_temp_limit_table[i].index) {
897 arm_rate_step = clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency;
900 if (arm_rate_step && (clk_cpu_dvfs_node->temp_limit_rate > arm_rate_step)) {
901 clk_cpu_dvfs_node->temp_limit_rate -= arm_rate_step;
902 dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
906 if (clk_cpu_dvfs_node->temp_limit_rate < clk_cpu_dvfs_node->max_rate) {
907 delta_temp = target_temp - temp;
908 for (i=0; clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
909 if (delta_temp > clk_cpu_dvfs_node->nor_temp_limit_table[i].index) {
910 arm_rate_step = clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency;
915 clk_cpu_dvfs_node->temp_limit_rate += arm_rate_step;
916 if (clk_cpu_dvfs_node->temp_limit_rate > clk_cpu_dvfs_node->max_rate) {
917 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
919 dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
925 DVFS_DBG("cur temp: %d, temp_limit_core_rate: %lu\n", temp, clk_cpu_dvfs_node->temp_limit_rate);
929 static DECLARE_DELAYED_WORK(dvfs_temp_limit_work, dvfs_temp_limit_work_func);
932 int dvfs_clk_enable_limit(struct dvfs_node *clk_dvfs_node, unsigned int min_rate, unsigned int max_rate)
934 u32 rate = 0, ret = 0;
936 if (!clk_dvfs_node || (min_rate > max_rate))
939 if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
940 mutex_lock(&clk_dvfs_node->vd->mutex);
942 /* To reset clk_dvfs_node->min_rate/max_rate */
943 dvfs_get_rate_range(clk_dvfs_node);
944 clk_dvfs_node->freq_limit_en = 1;
946 if ((min_rate >= clk_dvfs_node->min_rate) && (min_rate <= clk_dvfs_node->max_rate)) {
947 clk_dvfs_node->min_rate = min_rate;
950 if ((max_rate >= clk_dvfs_node->min_rate) && (max_rate <= clk_dvfs_node->max_rate)) {
951 clk_dvfs_node->max_rate = max_rate;
954 if (clk_dvfs_node->last_set_rate == 0)
955 rate = __clk_get_rate(clk_dvfs_node->clk);
957 rate = clk_dvfs_node->last_set_rate;
958 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
960 mutex_unlock(&clk_dvfs_node->vd->mutex);
964 DVFS_DBG("%s:clk(%s) last_set_rate=%u; [min_rate, max_rate]=[%u, %u]\n",
965 __func__, __clk_get_name(clk_dvfs_node->clk), clk_dvfs_node->last_set_rate,
966 clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
970 EXPORT_SYMBOL(dvfs_clk_enable_limit);
972 int dvfs_clk_disable_limit(struct dvfs_node *clk_dvfs_node)
979 if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
980 mutex_lock(&clk_dvfs_node->vd->mutex);
982 /* To reset clk_dvfs_node->min_rate/max_rate */
983 dvfs_get_rate_range(clk_dvfs_node);
984 clk_dvfs_node->freq_limit_en = 0;
985 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
987 mutex_unlock(&clk_dvfs_node->vd->mutex);
990 DVFS_DBG("%s: clk(%s) last_set_rate=%u; [min_rate, max_rate]=[%u, %u]\n",
991 __func__, __clk_get_name(clk_dvfs_node->clk), clk_dvfs_node->last_set_rate, clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
994 EXPORT_SYMBOL(dvfs_clk_disable_limit);
996 void dvfs_disable_temp_limit(void) {
997 temp_limit_enable = 0;
998 cancel_delayed_work_sync(&dvfs_temp_limit_work);
1001 int dvfs_clk_get_limit(struct dvfs_node *clk_dvfs_node, unsigned int *min_rate, unsigned int *max_rate)
1008 mutex_lock(&clk_dvfs_node->vd->mutex);
1010 *min_rate = clk_dvfs_node->min_rate;
1011 *max_rate = clk_dvfs_node->max_rate;
1012 freq_limit_en = clk_dvfs_node->freq_limit_en;
1014 mutex_unlock(&clk_dvfs_node->vd->mutex);
1016 return freq_limit_en;
1018 EXPORT_SYMBOL(dvfs_clk_get_limit);
1020 int dvfs_clk_register_set_rate_callback(struct dvfs_node *clk_dvfs_node, clk_set_rate_callback clk_dvfs_target)
1025 mutex_lock(&clk_dvfs_node->vd->mutex);
1026 clk_dvfs_node->clk_dvfs_target = clk_dvfs_target;
1027 mutex_unlock(&clk_dvfs_node->vd->mutex);
1031 EXPORT_SYMBOL(dvfs_clk_register_set_rate_callback);
1033 struct cpufreq_frequency_table *dvfs_get_freq_volt_table(struct dvfs_node *clk_dvfs_node)
1035 struct cpufreq_frequency_table *table;
1040 mutex_lock(&clk_dvfs_node->vd->mutex);
1041 table = clk_dvfs_node->dvfs_table;
1042 mutex_unlock(&clk_dvfs_node->vd->mutex);
1046 EXPORT_SYMBOL(dvfs_get_freq_volt_table);
1048 int dvfs_set_freq_volt_table(struct dvfs_node *clk_dvfs_node, struct cpufreq_frequency_table *table)
1053 if (IS_ERR_OR_NULL(table)){
1054 DVFS_ERR("%s:invalid table!\n", __func__);
1058 mutex_lock(&clk_dvfs_node->vd->mutex);
1059 clk_dvfs_node->dvfs_table = table;
1060 dvfs_get_rate_range(clk_dvfs_node);
1061 dvfs_table_round_clk_rate(clk_dvfs_node);
1062 dvfs_table_round_volt(clk_dvfs_node);
1063 mutex_unlock(&clk_dvfs_node->vd->mutex);
1067 EXPORT_SYMBOL(dvfs_set_freq_volt_table);
1069 int clk_enable_dvfs(struct dvfs_node *clk_dvfs_node)
1071 struct cpufreq_frequency_table clk_fv;
1080 DVFS_DBG("%s: dvfs clk(%s) enable dvfs!\n",
1081 __func__, __clk_get_name(clk_dvfs_node->clk));
1083 if (!clk_dvfs_node->vd) {
1084 DVFS_ERR("%s: dvfs node(%s) has no vd node!\n",
1085 __func__, clk_dvfs_node->name);
1088 mutex_lock(&clk_dvfs_node->vd->mutex);
1089 if (clk_dvfs_node->enable_count == 0) {
1090 if (IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator)) {
1091 if (clk_dvfs_node->vd->regulator_name)
1092 clk_dvfs_node->vd->regulator = dvfs_regulator_get(NULL, clk_dvfs_node->vd->regulator_name);
1093 if (!IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator)) {
1094 DVFS_DBG("%s: vd(%s) get regulator(%s) ok\n",
1095 __func__, clk_dvfs_node->vd->name, clk_dvfs_node->vd->regulator_name);
1096 clk_enable_dvfs_regulator_check(clk_dvfs_node->vd);
1097 dvfs_get_vd_regulator_volt_list(clk_dvfs_node->vd);
1098 dvfs_vd_get_regulator_volt_time_info(clk_dvfs_node->vd);
1100 clk_dvfs_node->enable_count = 0;
1101 DVFS_ERR("%s: vd(%s) can't get regulator(%s)!\n",
1102 __func__, clk_dvfs_node->vd->name, clk_dvfs_node->vd->regulator_name);
1103 mutex_unlock(&clk_dvfs_node->vd->mutex);
1107 clk_enable_dvfs_regulator_check(clk_dvfs_node->vd);
1110 DVFS_DBG("%s: vd(%s) cur volt=%d\n",
1111 __func__, clk_dvfs_node->name, clk_dvfs_node->vd->cur_volt);
1113 dvfs_table_round_clk_rate(clk_dvfs_node);
1114 dvfs_get_rate_range(clk_dvfs_node);
1115 clk_dvfs_node->freq_limit_en = 1;
1116 dvfs_table_round_volt(clk_dvfs_node);
1117 clk_dvfs_node->set_freq = clk_dvfs_node_get_rate_kz(clk_dvfs_node->clk);
1118 clk_dvfs_node->last_set_rate = clk_dvfs_node->set_freq*1000;
1120 DVFS_DBG("%s: %s get freq %u!\n",
1121 __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1123 if (clk_dvfs_node_get_ref_volt(clk_dvfs_node, clk_dvfs_node->set_freq, &clk_fv)) {
1124 if (clk_dvfs_node->dvfs_table[0].frequency == CPUFREQ_TABLE_END) {
1125 DVFS_ERR("%s: table empty\n", __func__);
1126 clk_dvfs_node->enable_count = 0;
1127 mutex_unlock(&clk_dvfs_node->vd->mutex);
1130 DVFS_WARNING("%s: clk(%s) freq table all value are smaller than default(%d), use default, just enable dvfs\n",
1131 __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1132 clk_dvfs_node->enable_count++;
1133 mutex_unlock(&clk_dvfs_node->vd->mutex);
1137 clk_dvfs_node->enable_count++;
1138 clk_dvfs_node->set_volt = clk_fv.index;
1139 volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1140 DVFS_DBG("%s: %s, freq %u(ref vol %u)\n",
1141 __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt);
1143 if (clk_dvfs_node->dvfs_nb) {
1144 // must unregister when clk disable
1145 clk_notifier_register(clk, clk_dvfs_node->dvfs_nb);
1148 if(clk_dvfs_node->vd->cur_volt != volt_new) {
1149 ret = dvfs_regulator_set_voltage_readback(clk_dvfs_node->vd->regulator, volt_new, volt_new);
1150 dvfs_volt_up_delay(clk_dvfs_node->vd,volt_new, clk_dvfs_node->vd->cur_volt);
1152 clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
1153 clk_dvfs_node->enable_count = 0;
1154 DVFS_ERR("dvfs enable clk %s,set volt error \n", clk_dvfs_node->name);
1155 mutex_unlock(&clk_dvfs_node->vd->mutex);
1158 clk_dvfs_node->vd->cur_volt = volt_new;
1159 clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
1163 DVFS_DBG("%s: dvfs already enable clk enable = %d!\n",
1164 __func__, clk_dvfs_node->enable_count);
1165 clk_dvfs_node->enable_count++;
1168 if (clk_dvfs_node->regu_mode_en) {
1169 ret = dvfs_regu_mode_table_constrain(clk_dvfs_node);
1171 DVFS_ERR("%s: clk(%s) regu_mode_table is unvalid, set regu_mode_en=0!\n",
1172 __func__, clk_dvfs_node->name);
1173 clk_dvfs_node->regu_mode_en = 0;
1174 mutex_unlock(&clk_dvfs_node->vd->mutex);
1178 ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, clk_dvfs_node->set_freq*1000, &mode);
1180 DVFS_ERR("%s: clk(%s) rate %dKhz get regu_mode fail\n",
1181 __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1182 mutex_unlock(&clk_dvfs_node->vd->mutex);
1185 clk_dvfs_node->regu_mode = mode;
1187 dvfs_update_clk_pds_mode(clk_dvfs_node);
1190 mutex_unlock(&clk_dvfs_node->vd->mutex);
1194 EXPORT_SYMBOL(clk_enable_dvfs);
1196 int clk_disable_dvfs(struct dvfs_node *clk_dvfs_node)
1203 DVFS_DBG("%s:dvfs clk(%s) disable dvfs!\n",
1204 __func__, __clk_get_name(clk_dvfs_node->clk));
1206 mutex_lock(&clk_dvfs_node->vd->mutex);
1207 if (!clk_dvfs_node->enable_count) {
1208 DVFS_WARNING("%s:clk(%s) is already closed!\n",
1209 __func__, __clk_get_name(clk_dvfs_node->clk));
1210 mutex_unlock(&clk_dvfs_node->vd->mutex);
1213 clk_dvfs_node->enable_count--;
1214 if (0 == clk_dvfs_node->enable_count) {
1215 DVFS_DBG("%s:dvfs clk(%s) disable dvfs ok!\n",
1216 __func__, __clk_get_name(clk_dvfs_node->clk));
1217 volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1218 dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1221 clk_notifier_unregister(clk, clk_dvfs_node->dvfs_nb);
1222 DVFS_DBG("clk unregister nb!\n");
1226 mutex_unlock(&clk_dvfs_node->vd->mutex);
1229 EXPORT_SYMBOL(clk_disable_dvfs);
1231 static unsigned long dvfs_get_limit_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1233 unsigned long limit_rate;
1236 if (clk_dvfs_node->freq_limit_en) {
1238 if (rate < clk_dvfs_node->min_rate) {
1239 limit_rate = clk_dvfs_node->min_rate;
1240 } else if (rate > clk_dvfs_node->max_rate) {
1241 limit_rate = clk_dvfs_node->max_rate;
1243 if (temp_limit_enable) {
1244 if (limit_rate > clk_dvfs_node->temp_limit_rate) {
1245 limit_rate = clk_dvfs_node->temp_limit_rate;
1250 DVFS_DBG("%s: rate:%ld, limit_rate:%ld,\n", __func__, rate, limit_rate);
1255 static int dvfs_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1257 struct cpufreq_frequency_table clk_fv;
1258 unsigned long old_rate = 0, new_rate = 0, volt_new = 0, clk_volt_store = 0;
1259 struct clk *clk = clk_dvfs_node->clk;
1265 if (!clk_dvfs_node->enable_count){
1266 DVFS_WARNING("%s:dvfs(%s) is disable\n",
1267 __func__, clk_dvfs_node->name);
1271 if (clk_dvfs_node->vd->volt_set_flag == DVFS_SET_VOLT_FAILURE) {
1272 /* It means the last time set voltage error */
1273 ret = dvfs_reset_volt(clk_dvfs_node->vd);
1279 rate = dvfs_get_limit_rate(clk_dvfs_node, rate);
1280 new_rate = __clk_round_rate(clk, rate);
1281 old_rate = __clk_get_rate(clk);
1282 if (new_rate == old_rate)
1285 DVFS_DBG("enter %s: clk(%s) new_rate = %lu Hz, old_rate = %lu Hz\n",
1286 __func__, clk_dvfs_node->name, rate, old_rate);
1288 /* find the clk corresponding voltage */
1289 ret = clk_dvfs_node_get_ref_volt(clk_dvfs_node, new_rate / 1000, &clk_fv);
1291 DVFS_ERR("%s:dvfs clk(%s) rate %luhz is not support\n",
1292 __func__, clk_dvfs_node->name, new_rate);
1295 clk_volt_store = clk_dvfs_node->set_volt;
1296 clk_dvfs_node->set_volt = clk_fv.index;
1297 volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1298 DVFS_DBG("%s:%s new rate=%lu(was=%lu),new volt=%lu,(was=%d)\n",
1299 __func__, clk_dvfs_node->name, new_rate, old_rate, volt_new,clk_dvfs_node->vd->cur_volt);
1302 /* if up the rate */
1303 if (new_rate > old_rate) {
1304 ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
1306 DVFS_ERR("%s: dvfs clk(%s) rate %luhz set mode err\n",
1307 __func__, clk_dvfs_node->name, new_rate);
1309 ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1311 goto fail_roll_back;
1315 if (clk_dvfs_node->clk_dvfs_target) {
1316 ret = clk_dvfs_node->clk_dvfs_target(clk, rate);
1318 ret = clk_set_rate(clk, rate);
1322 DVFS_ERR("%s:clk(%s) set rate err\n",
1323 __func__, __clk_get_name(clk));
1324 goto fail_roll_back;
1326 clk_dvfs_node->set_freq = new_rate / 1000;
1328 DVFS_DBG("%s:dvfs clk(%s) set rate %lu ok\n",
1329 __func__, clk_dvfs_node->name, __clk_get_rate(clk));
1331 /* if down the rate */
1332 if (new_rate < old_rate) {
1333 ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1337 ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
1339 DVFS_ERR("%s:dvfs clk(%s) rate %luhz set mode err\n",
1340 __func__, clk_dvfs_node->name, new_rate);
1345 clk_dvfs_node->set_volt = clk_volt_store;
1350 unsigned long dvfs_clk_round_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1352 return __clk_round_rate(clk_dvfs_node->clk, rate);
1354 EXPORT_SYMBOL_GPL(dvfs_clk_round_rate);
1356 unsigned long dvfs_clk_get_rate(struct dvfs_node *clk_dvfs_node)
1358 return __clk_get_rate(clk_dvfs_node->clk);
1360 EXPORT_SYMBOL_GPL(dvfs_clk_get_rate);
1362 unsigned long dvfs_clk_get_last_set_rate(struct dvfs_node *clk_dvfs_node)
1364 unsigned long last_set_rate;
1366 mutex_lock(&clk_dvfs_node->vd->mutex);
1367 last_set_rate = clk_dvfs_node->last_set_rate;
1368 mutex_unlock(&clk_dvfs_node->vd->mutex);
1370 return last_set_rate;
1372 EXPORT_SYMBOL_GPL(dvfs_clk_get_last_set_rate);
1375 int dvfs_clk_enable(struct dvfs_node *clk_dvfs_node)
1377 return clk_enable(clk_dvfs_node->clk);
1379 EXPORT_SYMBOL_GPL(dvfs_clk_enable);
1381 void dvfs_clk_disable(struct dvfs_node *clk_dvfs_node)
1383 return clk_disable(clk_dvfs_node->clk);
1385 EXPORT_SYMBOL_GPL(dvfs_clk_disable);
1387 struct dvfs_node *clk_get_dvfs_node(char *clk_name)
1391 struct dvfs_node *clk_dvfs_node;
1393 mutex_lock(&rk_dvfs_mutex);
1394 list_for_each_entry(vd, &rk_dvfs_tree, node) {
1395 mutex_lock(&vd->mutex);
1396 list_for_each_entry(pd, &vd->pd_list, node) {
1397 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
1398 if (0 == strcmp(clk_dvfs_node->name, clk_name)) {
1399 mutex_unlock(&vd->mutex);
1400 mutex_unlock(&rk_dvfs_mutex);
1401 return clk_dvfs_node;
1405 mutex_unlock(&vd->mutex);
1407 mutex_unlock(&rk_dvfs_mutex);
1411 EXPORT_SYMBOL_GPL(clk_get_dvfs_node);
1413 void clk_put_dvfs_node(struct dvfs_node *clk_dvfs_node)
1417 EXPORT_SYMBOL_GPL(clk_put_dvfs_node);
1419 int dvfs_clk_prepare_enable(struct dvfs_node *clk_dvfs_node)
1421 return clk_prepare_enable(clk_dvfs_node->clk);
1423 EXPORT_SYMBOL_GPL(dvfs_clk_prepare_enable);
1426 void dvfs_clk_disable_unprepare(struct dvfs_node *clk_dvfs_node)
1428 clk_disable_unprepare(clk_dvfs_node->clk);
1430 EXPORT_SYMBOL_GPL(dvfs_clk_disable_unprepare);
1432 int dvfs_clk_set_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1439 DVFS_DBG("%s:dvfs node(%s) set rate(%lu)\n",
1440 __func__, clk_dvfs_node->name, rate);
1442 #if 0 // judge by reference func in rk
1443 if (dvfs_support_clk_set_rate(dvfs_info)==false) {
1444 DVFS_ERR("dvfs func:%s is not support!\n", __func__);
1449 if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target) {
1450 mutex_lock(&clk_dvfs_node->vd->mutex);
1451 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
1452 clk_dvfs_node->last_set_rate = rate;
1453 mutex_unlock(&clk_dvfs_node->vd->mutex);
1455 DVFS_ERR("%s:dvfs node(%s) has no vd node or target callback!\n",
1456 __func__, clk_dvfs_node->name);
1461 EXPORT_SYMBOL_GPL(dvfs_clk_set_rate);
1464 int rk_regist_vd(struct vd_node *vd)
1470 vd->volt_time_flag=0;
1472 INIT_LIST_HEAD(&vd->pd_list);
1473 mutex_lock(&rk_dvfs_mutex);
1474 list_add(&vd->node, &rk_dvfs_tree);
1475 mutex_unlock(&rk_dvfs_mutex);
1479 EXPORT_SYMBOL_GPL(rk_regist_vd);
1481 int rk_regist_pd(struct pd_node *pd)
1492 INIT_LIST_HEAD(&pd->clk_list);
1493 mutex_lock(&vd->mutex);
1494 list_add(&pd->node, &vd->pd_list);
1495 mutex_unlock(&vd->mutex);
1499 EXPORT_SYMBOL_GPL(rk_regist_pd);
1501 int rk_regist_clk(struct dvfs_node *clk_dvfs_node)
1509 vd = clk_dvfs_node->vd;
1510 pd = clk_dvfs_node->pd;
1514 mutex_lock(&vd->mutex);
1515 list_add(&clk_dvfs_node->node, &pd->clk_list);
1516 mutex_unlock(&vd->mutex);
1520 EXPORT_SYMBOL_GPL(rk_regist_clk);
1522 static int rk_convert_cpufreq_table(struct dvfs_node *dvfs_node)
1526 struct cpufreq_frequency_table *table;
1529 table = dvfs_node->dvfs_table;
1530 dev = &dvfs_node->dev;
1532 for (i = 0; table[i].frequency!= CPUFREQ_TABLE_END; i++){
1533 opp = opp_find_freq_exact(dev, table[i].frequency * 1000, true);
1535 return PTR_ERR(opp);
1536 table[i].index = opp_get_voltage(opp);
1541 static struct cpufreq_frequency_table *of_get_temp_limit_table(struct device_node *dev_node, const char *propname)
1543 struct cpufreq_frequency_table *temp_limt_table = NULL;
1544 const struct property *prop;
1548 prop = of_find_property(dev_node, propname, NULL);
1554 nr = prop->length / sizeof(u32);
1556 pr_err("%s: Invalid freq list\n", __func__);
1560 temp_limt_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
1561 (nr/2 + 1), GFP_KERNEL);
1565 for (i=0; i<nr/2; i++){
1566 temp_limt_table[i].index = be32_to_cpup(val++);
1567 temp_limt_table[i].frequency = be32_to_cpup(val++) * 1000;
1570 temp_limt_table[i].index = 0;
1571 temp_limt_table[i].frequency = CPUFREQ_TABLE_END;
1573 return temp_limt_table;
1577 int of_dvfs_init(void)
1581 struct device_node *dvfs_dev_node, *clk_dev_node, *vd_dev_node, *pd_dev_node;
1582 struct dvfs_node *dvfs_node;
1587 DVFS_DBG("%s\n", __func__);
1589 dvfs_dev_node = of_find_node_by_name(NULL, "dvfs");
1590 if (IS_ERR_OR_NULL(dvfs_dev_node)) {
1591 DVFS_ERR("%s get dvfs dev node err\n", __func__);
1592 return PTR_ERR(dvfs_dev_node);
1595 val = of_get_property(dvfs_dev_node, "target-temp", NULL);
1597 target_temp = be32_to_cpup(val);
1600 val = of_get_property(dvfs_dev_node, "temp-limit-enable", NULL);
1602 temp_limit_enable = be32_to_cpup(val);
1605 for_each_available_child_of_node(dvfs_dev_node, vd_dev_node) {
1606 vd = kzalloc(sizeof(struct vd_node), GFP_KERNEL);
1610 mutex_init(&vd->mutex);
1611 vd->name = vd_dev_node->name;
1612 ret = of_property_read_string(vd_dev_node, "regulator_name", &vd->regulator_name);
1614 DVFS_ERR("%s:vd(%s) get regulator_name err, ret:%d\n",
1615 __func__, vd_dev_node->name, ret);
1620 vd->suspend_volt = 0;
1622 vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
1623 vd->vd_dvfs_target = dvfs_target;
1624 ret = rk_regist_vd(vd);
1626 DVFS_ERR("%s:vd(%s) register err:%d\n", __func__, vd->name, ret);
1631 DVFS_DBG("%s:vd(%s) register ok, regulator name:%s,suspend volt:%d\n",
1632 __func__, vd->name, vd->regulator_name, vd->suspend_volt);
1634 for_each_available_child_of_node(vd_dev_node, pd_dev_node) {
1635 pd = kzalloc(sizeof(struct pd_node), GFP_KERNEL);
1640 pd->name = pd_dev_node->name;
1642 ret = rk_regist_pd(pd);
1644 DVFS_ERR("%s:pd(%s) register err:%d\n", __func__, pd->name, ret);
1648 DVFS_DBG("%s:pd(%s) register ok, parent vd:%s\n",
1649 __func__, pd->name, vd->name);
1650 for_each_available_child_of_node(pd_dev_node, clk_dev_node) {
1651 if (!of_device_is_available(clk_dev_node))
1654 dvfs_node = kzalloc(sizeof(struct dvfs_node), GFP_KERNEL);
1658 dvfs_node->name = clk_dev_node->name;
1662 val = of_get_property(clk_dev_node, "regu-mode-en", NULL);
1664 dvfs_node->regu_mode_en = be32_to_cpup(val);
1665 if (dvfs_node->regu_mode_en)
1666 dvfs_node->regu_mode_table = of_get_regu_mode_table(clk_dev_node);
1668 dvfs_node->regu_mode_table = NULL;
1670 if (temp_limit_enable) {
1671 val = of_get_property(clk_dev_node, "temp-channel", NULL);
1673 dvfs_node->temp_channel = be32_to_cpup(val);
1675 dvfs_node->nor_temp_limit_table = of_get_temp_limit_table(clk_dev_node, "normal-temp-limit");
1676 dvfs_node->per_temp_limit_table = of_get_temp_limit_table(clk_dev_node, "performance-temp-limit");
1678 dvfs_node->temp_limit_rate = -1;
1679 dvfs_node->dev.of_node = clk_dev_node;
1680 ret = of_init_opp_table(&dvfs_node->dev);
1682 DVFS_ERR("%s:clk(%s) get opp table err:%d\n", __func__, dvfs_node->name, ret);
1687 ret = opp_init_cpufreq_table(&dvfs_node->dev, &dvfs_node->dvfs_table);
1689 DVFS_ERR("%s:clk(%s) get cpufreq table err:%d\n", __func__, dvfs_node->name, ret);
1693 ret = rk_convert_cpufreq_table(dvfs_node);
1699 clk = clk_get(NULL, clk_dev_node->name);
1701 DVFS_ERR("%s:get clk(%s) err:%ld\n", __func__, dvfs_node->name, PTR_ERR(clk));
1707 dvfs_node->clk = clk;
1708 ret = rk_regist_clk(dvfs_node);
1710 DVFS_ERR("%s:dvfs_node(%s) register err:%d\n", __func__, dvfs_node->name, ret);
1714 DVFS_DBG("%s:dvfs_node(%s) register ok, parent pd:%s\n",
1715 __func__, clk_dev_node->name, pd->name);
1723 /*********************************************************************************/
1725 * dump_dbg_map() : Draw all informations of dvfs while debug
1727 static int dump_dbg_map(char *buf)
1732 struct dvfs_node *clk_dvfs_node;
1735 mutex_lock(&rk_dvfs_mutex);
1736 printk( "-------------DVFS TREE-----------\n\n\n");
1737 printk( "DVFS TREE:\n");
1739 list_for_each_entry(vd, &rk_dvfs_tree, node) {
1740 mutex_lock(&vd->mutex);
1741 printk( "|\n|- voltage domain:%s\n", vd->name);
1742 printk( "|- current voltage:%d\n", vd->cur_volt);
1743 printk( "|- current regu_mode:%s\n", dvfs_regu_mode_to_string(vd->regu_mode));
1745 list_for_each_entry(pd, &vd->pd_list, node) {
1746 printk( "| |\n| |- power domain:%s, status = %s, current volt = %d, current regu_mode = %s\n",
1747 pd->name, (pd->pd_status == 1) ? "ON" : "OFF", pd->cur_volt,
1748 dvfs_regu_mode_to_string(pd->regu_mode));
1750 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
1751 printk( "| | |\n| | |- clock: %s current: rate %d, volt = %d,"
1752 " enable_dvfs = %s\n",
1753 clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt,
1754 clk_dvfs_node->enable_count == 0 ? "DISABLE" : "ENABLE");
1755 printk( "| | |- clk limit(%s):[%u, %u]; last set rate = %lu\n",
1756 clk_dvfs_node->freq_limit_en ? "enable" : "disable",
1757 clk_dvfs_node->min_rate, clk_dvfs_node->max_rate,
1758 clk_dvfs_node->last_set_rate/1000);
1759 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
1760 printk( "| | | |- freq = %d, volt = %d\n",
1761 clk_dvfs_node->dvfs_table[i].frequency,
1762 clk_dvfs_node->dvfs_table[i].index);
1765 printk( "| | |- clock: %s current: rate %d, regu_mode = %s,"
1766 " regu_mode_en = %d\n",
1767 clk_dvfs_node->name, clk_dvfs_node->set_freq,
1768 dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode),
1769 clk_dvfs_node->regu_mode_en);
1770 if (clk_dvfs_node->regu_mode_table) {
1771 for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
1772 printk( "| | | |- freq = %d, regu_mode = %s\n",
1773 clk_dvfs_node->regu_mode_table[i].frequency/1000,
1774 dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode_table[i].index));
1779 mutex_unlock(&vd->mutex);
1782 printk( "-------------DVFS TREE END------------\n");
1783 mutex_unlock(&rk_dvfs_mutex);
1788 /*********************************************************************************/
1789 static struct kobject *dvfs_kobj;
1790 struct dvfs_attribute {
1791 struct attribute attr;
1792 ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
1794 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
1795 const char *buf, size_t n);
1798 static ssize_t dvfs_tree_store(struct kobject *kobj, struct kobj_attribute *attr,
1799 const char *buf, size_t n)
1803 static ssize_t dvfs_tree_show(struct kobject *kobj, struct kobj_attribute *attr,
1806 return dump_dbg_map(buf);
1810 static struct dvfs_attribute dvfs_attrs[] = {
1811 /* node_name permision show_func store_func */
1812 //#ifdef CONFIG_RK_CLOCK_PROC
1813 __ATTR(dvfs_tree, S_IRUSR | S_IRGRP | S_IWUSR, dvfs_tree_show, dvfs_tree_store),
1817 static int __init dvfs_init(void)
1821 dvfs_kobj = kobject_create_and_add("dvfs", NULL);
1824 for (i = 0; i < ARRAY_SIZE(dvfs_attrs); i++) {
1825 ret = sysfs_create_file(dvfs_kobj, &dvfs_attrs[i].attr);
1827 DVFS_ERR("create index %d error\n", i);
1832 if (temp_limit_enable) {
1833 clk_cpu_dvfs_node = clk_get_dvfs_node("clk_core");
1834 if (!clk_cpu_dvfs_node){
1838 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
1839 dvfs_wq = alloc_workqueue("dvfs", WQ_NON_REENTRANT | WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
1840 queue_delayed_work_on(0, dvfs_wq, &dvfs_temp_limit_work, 0*HZ);
1843 vdd_gpu_regulator = dvfs_get_regulator("vdd_gpu");
1844 if (!IS_ERR_OR_NULL(vdd_gpu_regulator)) {
1845 struct clk *clk = clk_get(NULL, "pd_gpu");
1848 rk_clk_pd_notifier_register(clk, &clk_pd_gpu_notifier);
1850 fb_register_client(&early_suspend_notifier);
1851 register_reboot_notifier(&vdd_gpu_reboot_notifier);
1857 late_initcall(dvfs_init);