1 /* arch/arm/mach-rk30/rk30_dvfs.c
3 * Copyright (C) 2012 ROCKCHIP, Inc.
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #include <linux/slab.h>
16 #include <linux/clk.h>
17 #include <linux/regulator/consumer.h>
18 #include <linux/delay.h>
19 #include <linux/stat.h>
21 #include <linux/opp.h>
22 #include <linux/rockchip/dvfs.h>
23 #include <linux/rockchip/common.h>
25 #include <linux/reboot.h>
26 #include <linux/rockchip/cpu.h>
27 #include <linux/tick.h>
28 #include <dt-bindings/clock/rk_system_status.h>
29 #include "../../../drivers/clk/rockchip/clk-pd.h"
31 extern int rockchip_tsadc_get_temp(int chn);
33 #define MHz (1000 * 1000)
34 static LIST_HEAD(rk_dvfs_tree);
35 static DEFINE_MUTEX(rk_dvfs_mutex);
36 static struct workqueue_struct *dvfs_wq;
37 static struct dvfs_node *clk_cpu_dvfs_node;
38 static unsigned int target_temp = 80;
39 static int temp_limit_enable;
41 static int pd_gpu_off, early_suspend;
42 static DEFINE_MUTEX(switch_vdd_gpu_mutex);
43 struct regulator *vdd_gpu_regulator;
45 static int vdd_gpu_reboot_notifier_event(struct notifier_block *this,
46 unsigned long event, void *ptr)
50 DVFS_DBG("%s: enable vdd_gpu\n", __func__);
51 mutex_lock(&switch_vdd_gpu_mutex);
52 if (!regulator_is_enabled(vdd_gpu_regulator))
53 ret = regulator_enable(vdd_gpu_regulator);
54 mutex_unlock(&switch_vdd_gpu_mutex);
59 static struct notifier_block vdd_gpu_reboot_notifier = {
60 .notifier_call = vdd_gpu_reboot_notifier_event,
63 static int clk_pd_gpu_notifier_call(struct notifier_block *nb,
64 unsigned long event, void *ptr)
69 case RK_CLK_PD_PREPARE:
70 mutex_lock(&switch_vdd_gpu_mutex);
73 if (!regulator_is_enabled(vdd_gpu_regulator))
74 ret = regulator_enable(vdd_gpu_regulator);
76 mutex_unlock(&switch_vdd_gpu_mutex);
78 case RK_CLK_PD_UNPREPARE:
79 mutex_lock(&switch_vdd_gpu_mutex);
82 if (regulator_is_enabled(vdd_gpu_regulator))
83 ret = regulator_disable(vdd_gpu_regulator);
85 mutex_unlock(&switch_vdd_gpu_mutex);
94 static struct notifier_block clk_pd_gpu_notifier = {
95 .notifier_call = clk_pd_gpu_notifier_call,
99 static int early_suspend_notifier_call(struct notifier_block *self,
100 unsigned long action, void *data)
102 struct fb_event *event = data;
103 int blank_mode = *((int *)event->data);
106 mutex_lock(&switch_vdd_gpu_mutex);
107 if (action == FB_EARLY_EVENT_BLANK) {
108 switch (blank_mode) {
109 case FB_BLANK_UNBLANK:
112 if (!regulator_is_enabled(vdd_gpu_regulator))
113 ret = regulator_enable(
120 } else if (action == FB_EVENT_BLANK) {
121 switch (blank_mode) {
122 case FB_BLANK_POWERDOWN:
125 if (regulator_is_enabled(vdd_gpu_regulator))
126 ret = regulator_disable(
135 mutex_unlock(&switch_vdd_gpu_mutex);
140 static struct notifier_block early_suspend_notifier = {
141 .notifier_call = early_suspend_notifier_call,
144 #define DVFS_REGULATOR_MODE_STANDBY 1
145 #define DVFS_REGULATOR_MODE_IDLE 2
146 #define DVFS_REGULATOR_MODE_NORMAL 3
147 #define DVFS_REGULATOR_MODE_FAST 4
149 static const char* dvfs_regu_mode_to_string(unsigned int mode)
152 case DVFS_REGULATOR_MODE_FAST:
154 case DVFS_REGULATOR_MODE_NORMAL:
156 case DVFS_REGULATOR_MODE_IDLE:
158 case DVFS_REGULATOR_MODE_STANDBY:
165 static int dvfs_regu_mode_convert(unsigned int mode)
168 case DVFS_REGULATOR_MODE_FAST:
169 return REGULATOR_MODE_FAST;
170 case DVFS_REGULATOR_MODE_NORMAL:
171 return REGULATOR_MODE_NORMAL;
172 case DVFS_REGULATOR_MODE_IDLE:
173 return REGULATOR_MODE_IDLE;
174 case DVFS_REGULATOR_MODE_STANDBY:
175 return REGULATOR_MODE_STANDBY;
181 static int dvfs_regu_mode_deconvert(unsigned int mode)
184 case REGULATOR_MODE_FAST:
185 return DVFS_REGULATOR_MODE_FAST;
186 case REGULATOR_MODE_NORMAL:
187 return DVFS_REGULATOR_MODE_NORMAL;
188 case REGULATOR_MODE_IDLE:
189 return DVFS_REGULATOR_MODE_IDLE;
190 case REGULATOR_MODE_STANDBY:
191 return DVFS_REGULATOR_MODE_STANDBY;
197 static struct cpufreq_frequency_table *of_get_regu_mode_table(struct device_node *dev_node)
199 struct cpufreq_frequency_table *regu_mode_table = NULL;
200 const struct property *prop;
204 prop = of_find_property(dev_node, "regu-mode-table", NULL);
210 nr = prop->length / sizeof(u32);
212 pr_err("%s: Invalid freq list\n", __func__);
216 regu_mode_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
217 (nr/2+1), GFP_KERNEL);
218 if (!regu_mode_table) {
219 pr_err("%s: could not allocate regu_mode_table!\n", __func__);
220 return ERR_PTR(-ENOMEM);
225 for (i=0; i<nr/2; i++){
226 regu_mode_table[i].frequency = be32_to_cpup(val++) * 1000;
227 regu_mode_table[i].index = be32_to_cpup(val++);
230 if (regu_mode_table[i-1].frequency != 0) {
231 pr_err("%s: Last freq of regu_mode_table is not 0!\n", __func__);
232 kfree(regu_mode_table);
236 regu_mode_table[i].index = 0;
237 regu_mode_table[i].frequency = CPUFREQ_TABLE_END;
239 return regu_mode_table;
242 static int dvfs_regu_mode_table_constrain(struct dvfs_node *clk_dvfs_node)
245 int mode, convert_mode, valid_mode;
250 if (!clk_dvfs_node->regu_mode_table)
253 if (!clk_dvfs_node->vd)
256 if (IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator))
259 for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
260 mode = clk_dvfs_node->regu_mode_table[i].index;
261 convert_mode = dvfs_regu_mode_convert(mode);
263 ret = regulator_is_supported_mode(clk_dvfs_node->vd->regulator,
266 DVFS_ERR("%s: find mode=%d unsupported\n", __func__,
268 kfree(clk_dvfs_node->regu_mode_table);
269 clk_dvfs_node->regu_mode_table = NULL;
273 valid_mode = dvfs_regu_mode_deconvert(convert_mode);
274 if (valid_mode != mode) {
275 DVFS_ERR("%s: round mode=%d to valid mode=%d!\n",
276 __func__, mode, valid_mode);
277 clk_dvfs_node->regu_mode_table[i].index = valid_mode;
285 static int clk_dvfs_node_get_regu_mode(struct dvfs_node *clk_dvfs_node,
286 unsigned long rate, unsigned int *mode)
291 if ((!clk_dvfs_node) || (!clk_dvfs_node->regu_mode_table))
294 for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
295 if (rate >= clk_dvfs_node->regu_mode_table[i].frequency) {
296 *mode = clk_dvfs_node->regu_mode_table[i].index;
304 static int dvfs_pd_get_newmode_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
306 unsigned int mode_max = 0;
309 if (clk_dvfs_node->regu_mode_en && (clk_dvfs_node->regu_mode >= pd->regu_mode)) {
310 return clk_dvfs_node->regu_mode;
313 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
314 if (clk_dvfs_node->regu_mode_en)
315 mode_max = max(mode_max, (clk_dvfs_node->regu_mode));
321 static void dvfs_update_clk_pds_mode(struct dvfs_node *clk_dvfs_node)
328 pd = clk_dvfs_node->pd;
332 pd->regu_mode = dvfs_pd_get_newmode_byclk(pd, clk_dvfs_node);
335 static int dvfs_vd_get_newmode_bypd(struct vd_node *vd)
337 unsigned int mode_max_vd = 0;
343 list_for_each_entry(pd, &vd->pd_list, node) {
344 mode_max_vd = max(mode_max_vd, pd->regu_mode);
350 static int dvfs_vd_get_newmode_byclk(struct dvfs_node *clk_dvfs_node)
355 dvfs_update_clk_pds_mode(clk_dvfs_node);
357 return dvfs_vd_get_newmode_bypd(clk_dvfs_node->vd);
360 static int dvfs_regu_set_mode(struct vd_node *vd, unsigned int mode)
366 if (IS_ERR_OR_NULL(vd)) {
367 DVFS_ERR("%s: vd_node error\n", __func__);
371 DVFS_DBG("%s: mode=%d(old=%d)\n", __func__, mode, vd->regu_mode);
373 convert_mode = dvfs_regu_mode_convert(mode);
374 if (convert_mode < 0) {
375 DVFS_ERR("%s: mode %d convert error\n", __func__, mode);
379 if (!IS_ERR_OR_NULL(vd->regulator)) {
380 ret = dvfs_regulator_set_mode(vd->regulator, convert_mode);
382 DVFS_ERR("%s: %s set mode %d (was %d) error!\n", __func__,
383 vd->regulator_name, mode, vd->regu_mode);
387 DVFS_ERR("%s: invalid regulator\n", __func__);
391 vd->regu_mode = mode;
396 static int dvfs_regu_mode_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
405 if (!clk_dvfs_node->regu_mode_en)
408 ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, rate, &mode);
410 DVFS_ERR("%s: clk(%s) rate %luhz get mode fail\n",
411 __func__, clk_dvfs_node->name, rate);
414 clk_dvfs_node->regu_mode = mode;
416 mode = dvfs_vd_get_newmode_byclk(clk_dvfs_node);
420 ret = dvfs_regu_set_mode(clk_dvfs_node->vd, mode);
425 static void dvfs_volt_up_delay(struct vd_node *vd, int new_volt, int old_volt)
429 if(new_volt <= old_volt)
431 if(vd->volt_time_flag > 0)
432 u_time = regulator_set_voltage_time(vd->regulator, old_volt, new_volt);
435 if(u_time < 0) {// regulator is not suported time,useing default time
436 DVFS_DBG("%s:vd %s is not suported getting delay time,so we use default\n",
438 u_time = ((new_volt) - (old_volt)) >> 9;
441 DVFS_DBG("%s: vd %s volt %d to %d delay %d us\n",
442 __func__, vd->name, old_volt, new_volt, u_time);
444 if (u_time >= 1000) {
445 mdelay(u_time / 1000);
446 udelay(u_time % 1000);
447 DVFS_WARNING("%s: regulator set vol delay is larger 1ms,old is %d,new is %d\n",
448 __func__, old_volt, new_volt);
454 static int dvfs_regulator_set_voltage_readback(struct regulator *regulator, int min_uV, int max_uV)
456 int ret = 0, read_back = 0;
458 ret = dvfs_regulator_set_voltage(regulator, max_uV, max_uV);
460 DVFS_ERR("%s: now read back to check voltage\n", __func__);
462 /* read back to judge if it is already effect */
464 read_back = dvfs_regulator_get_voltage(regulator);
465 if (read_back == max_uV) {
466 DVFS_ERR("%s: set ERROR but already effected, volt=%d\n", __func__, read_back);
469 DVFS_ERR("%s: set ERROR AND NOT effected, volt=%d\n", __func__, read_back);
476 static int dvfs_scale_volt_direct(struct vd_node *vd_clk, int volt_new)
480 DVFS_DBG("%s: volt=%d(old=%d)\n", __func__, volt_new, vd_clk->cur_volt);
482 if (IS_ERR_OR_NULL(vd_clk)) {
483 DVFS_ERR("%s: vd_node error\n", __func__);
487 if (!IS_ERR_OR_NULL(vd_clk->regulator)) {
488 ret = dvfs_regulator_set_voltage_readback(vd_clk->regulator, volt_new, volt_new);
489 dvfs_volt_up_delay(vd_clk,volt_new, vd_clk->cur_volt);
491 vd_clk->volt_set_flag = DVFS_SET_VOLT_FAILURE;
492 DVFS_ERR("%s: %s set voltage up err ret = %d, Vnew = %d(was %d)mV\n",
493 __func__, vd_clk->name, ret, volt_new, vd_clk->cur_volt);
498 DVFS_ERR("%s: invalid regulator\n", __func__);
502 vd_clk->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
503 vd_clk->cur_volt = volt_new;
509 static int dvfs_reset_volt(struct vd_node *dvfs_vd)
511 int flag_set_volt_correct = 0;
512 if (!IS_ERR_OR_NULL(dvfs_vd->regulator))
513 flag_set_volt_correct = dvfs_regulator_get_voltage(dvfs_vd->regulator);
515 DVFS_ERR("%s: invalid regulator\n", __func__);
518 if (flag_set_volt_correct <= 0) {
519 DVFS_ERR("%s (vd:%s), try to reload volt ,by it is error again(%d)!!! stop scaling\n",
520 __func__, dvfs_vd->name, flag_set_volt_correct);
523 dvfs_vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
524 DVFS_WARNING("%s:vd(%s) try to reload volt = %d\n",
525 __func__, dvfs_vd->name, flag_set_volt_correct);
527 /* Reset vd's voltage */
528 dvfs_vd->cur_volt = flag_set_volt_correct;
530 return dvfs_vd->cur_volt;
534 // for clk enable case to get vd regulator info
535 static void clk_enable_dvfs_regulator_check(struct vd_node *vd)
537 vd->cur_volt = dvfs_regulator_get_voltage(vd->regulator);
538 if(vd->cur_volt <= 0){
539 vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
541 vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
544 static void dvfs_get_vd_regulator_volt_list(struct vd_node *vd)
546 unsigned int i, selector = dvfs_regulator_count_voltages(vd->regulator);
547 int n = 0, sel_volt = 0;
549 if(selector > VD_VOL_LIST_CNT)
550 selector = VD_VOL_LIST_CNT;
552 for (i = 0; i < selector; i++) {
553 sel_volt = dvfs_regulator_list_voltage(vd->regulator, i);
555 //DVFS_WARNING("%s: vd(%s) list volt selector=%u, but volt(%d) <=0\n",
556 // __func__, vd->name, i, sel_volt);
559 vd->volt_list[n++] = sel_volt;
560 DVFS_DBG("%s: vd(%s) list volt selector=%u, n=%d, volt=%d\n",
561 __func__, vd->name, i, n, sel_volt);
568 static int vd_regulator_round_volt_max(struct vd_node *vd, int volt)
573 for (i = 0; i < vd->n_voltages; i++) {
574 sel_volt = vd->volt_list[i];
576 DVFS_WARNING("%s: selector=%u, but volt <=0\n",
587 static int vd_regulator_round_volt_min(struct vd_node *vd, int volt)
592 for (i = 0; i < vd->n_voltages; i++) {
593 sel_volt = vd->volt_list[i];
595 DVFS_WARNING("%s: selector=%u, but volt <=0\n",
601 return vd->volt_list[i-1];
611 static int vd_regulator_round_volt(struct vd_node *vd, int volt, int flags)
615 if(flags == VD_LIST_RELATION_L)
616 return vd_regulator_round_volt_min(vd, volt);
618 return vd_regulator_round_volt_max(vd, volt);
621 static void dvfs_table_round_volt(struct dvfs_node *clk_dvfs_node)
625 if(!clk_dvfs_node->dvfs_table || !clk_dvfs_node->vd ||
626 IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator))
629 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
631 test_volt = vd_regulator_round_volt(clk_dvfs_node->vd, clk_dvfs_node->dvfs_table[i].index, VD_LIST_RELATION_H);
634 DVFS_WARNING("%s: clk(%s) round volt(%d) but list <=0\n",
635 __func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].index);
638 DVFS_DBG("clk %s:round_volt %d to %d\n",
639 clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].index, test_volt);
641 clk_dvfs_node->dvfs_table[i].index=test_volt;
645 static void dvfs_vd_get_regulator_volt_time_info(struct vd_node *vd)
647 if(vd->volt_time_flag <= 0){// check regulator support get uping vol timer
648 vd->volt_time_flag = dvfs_regulator_set_voltage_time(vd->regulator, vd->cur_volt, vd->cur_volt+200*1000);
649 if(vd->volt_time_flag < 0){
650 DVFS_DBG("%s,vd %s volt_time is no support\n",
654 DVFS_DBG("%s,vd %s volt_time is support,up 200mv need delay %d us\n",
655 __func__, vd->name, vd->volt_time_flag);
660 static void dvfs_vd_get_regulator_mode_info(struct vd_node *vd)
662 //REGULATOR_MODE_FAST
663 if(vd->mode_flag <= 0){// check regulator support get uping vol timer{
664 vd->mode_flag = dvfs_regulator_get_mode(vd->regulator);
665 if(vd->mode_flag==REGULATOR_MODE_FAST || vd->mode_flag==REGULATOR_MODE_NORMAL
666 || vd->mode_flag == REGULATOR_MODE_IDLE || vd->mode_flag==REGULATOR_MODE_STANDBY){
668 if(dvfs_regulator_set_mode(vd->regulator, vd->mode_flag) < 0){
669 vd->mode_flag = 0;// check again
672 if(vd->mode_flag > 0){
673 DVFS_DBG("%s,vd %s mode(now is %d) support\n",
674 __func__, vd->name, vd->mode_flag);
677 DVFS_DBG("%s,vd %s mode is not support now check\n",
684 struct regulator *dvfs_get_regulator(char *regulator_name)
688 mutex_lock(&rk_dvfs_mutex);
689 list_for_each_entry(vd, &rk_dvfs_tree, node) {
690 if (strcmp(regulator_name, vd->regulator_name) == 0) {
691 mutex_unlock(&rk_dvfs_mutex);
692 return vd->regulator;
695 mutex_unlock(&rk_dvfs_mutex);
699 static int dvfs_get_rate_range(struct dvfs_node *clk_dvfs_node)
701 struct cpufreq_frequency_table *table;
707 clk_dvfs_node->min_rate = 0;
708 clk_dvfs_node->max_rate = 0;
710 table = clk_dvfs_node->dvfs_table;
711 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
712 clk_dvfs_node->max_rate = table[i].frequency / 1000 * 1000 * 1000;
714 clk_dvfs_node->min_rate = table[i].frequency / 1000 * 1000 * 1000;
717 DVFS_DBG("%s: clk %s, limit rate [min, max] = [%u, %u]\n",
718 __func__, clk_dvfs_node->name, clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
723 static void dvfs_table_round_clk_rate(struct dvfs_node *clk_dvfs_node)
725 int i, rate, temp_rate, flags;
727 if(!clk_dvfs_node || !clk_dvfs_node->dvfs_table || !clk_dvfs_node->clk)
730 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
731 //ddr rate = real rate+flags
732 flags = clk_dvfs_node->dvfs_table[i].frequency%1000;
733 rate = (clk_dvfs_node->dvfs_table[i].frequency/1000)*1000;
734 temp_rate = __clk_round_rate(clk_dvfs_node->clk, rate*1000);
736 DVFS_WARNING("%s: clk(%s) rate %d round return %d\n",
737 __func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
741 /* Set rate unit as MHZ */
742 if (temp_rate % MHz != 0)
743 temp_rate = (temp_rate / MHz + 1) * MHz;
745 temp_rate = (temp_rate / 1000) + flags;
747 DVFS_DBG("clk %s round_clk_rate %d to %d\n",
748 clk_dvfs_node->name,clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
750 clk_dvfs_node->dvfs_table[i].frequency = temp_rate;
754 static int clk_dvfs_node_get_ref_volt(struct dvfs_node *clk_dvfs_node, int rate_khz,
755 struct cpufreq_frequency_table *clk_fv)
759 if (rate_khz == 0 || !clk_dvfs_node || !clk_dvfs_node->dvfs_table) {
763 clk_fv->frequency = rate_khz;
766 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
767 if (clk_dvfs_node->dvfs_table[i].frequency >= rate_khz) {
768 clk_fv->frequency = clk_dvfs_node->dvfs_table[i].frequency;
769 clk_fv->index = clk_dvfs_node->dvfs_table[i].index;
770 //printk("%s,%s rate=%ukhz(vol=%d)\n",__func__,clk_dvfs_node->name,
771 //clk_fv->frequency, clk_fv->index);
775 clk_fv->frequency = 0;
777 //DVFS_DBG("%s get corresponding voltage error! out of bound\n", clk_dvfs_node->name);
781 static int dvfs_pd_get_newvolt_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
785 if (clk_dvfs_node->enable_count && (clk_dvfs_node->set_volt >= pd->cur_volt)) {
786 return clk_dvfs_node->set_volt;
789 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
790 if (clk_dvfs_node->enable_count)
791 volt_max = max(volt_max, clk_dvfs_node->set_volt);
796 static void dvfs_update_clk_pds_volt(struct dvfs_node *clk_dvfs_node)
803 pd = clk_dvfs_node->pd;
807 pd->cur_volt = dvfs_pd_get_newvolt_byclk(pd, clk_dvfs_node);
810 static int dvfs_vd_get_newvolt_bypd(struct vd_node *vd)
818 list_for_each_entry(pd, &vd->pd_list, node) {
819 volt_max_vd = max(volt_max_vd, pd->cur_volt);
825 static int dvfs_vd_get_newvolt_byclk(struct dvfs_node *clk_dvfs_node)
830 dvfs_update_clk_pds_volt(clk_dvfs_node);
831 return dvfs_vd_get_newvolt_bypd(clk_dvfs_node->vd);
835 static void dvfs_temp_limit_work_func(struct work_struct *work)
837 unsigned long delay = HZ / 10; // 100ms
840 struct dvfs_node *clk_dvfs_node;
842 queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
844 mutex_lock(&rk_dvfs_mutex);
845 list_for_each_entry(vd, &rk_dvfs_tree, node) {
846 mutex_lock(&vd->mutex);
847 list_for_each_entry(pd, &vd->pd_list, node) {
848 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
849 if (clk_dvfs_node->temp_limit_table) {
850 clk_dvfs_node->temp = rockchip_tsadc_get_temp(clk_dvfs_node->temp_channel);
851 clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
855 mutex_unlock(&vd->mutex);
857 mutex_unlock(&rk_dvfs_mutex);
861 static void dvfs_virt_temp_limit_work_func(void)
863 const struct cpufreq_frequency_table *limits_table = NULL;
864 unsigned int new_temp_limit_rate = -1;
865 unsigned int nr_cpus = num_online_cpus();
869 if (!soc_is_rk3126() && !soc_is_rk3128())
872 if (rockchip_get_system_status() & SYS_STATUS_PERFORMANCE) {
874 } else if (in_perf) {
877 static u64 last_time_in_idle;
878 static u64 last_time_in_idle_timestamp;
879 u64 time_in_idle = 0, now;
882 unsigned cpu, busy_cpus;
884 for_each_online_cpu(cpu) {
885 time_in_idle += get_cpu_idle_time_us(cpu, &now);
887 delta_time = now - last_time_in_idle_timestamp;
888 delta_idle = time_in_idle - last_time_in_idle;
889 last_time_in_idle = time_in_idle;
890 last_time_in_idle_timestamp = now;
891 delta_idle += delta_time >> 4; /* +6.25% */
892 if (delta_idle > (nr_cpus - 1)
893 * delta_time && delta_idle < (nr_cpus + 1) * delta_time)
895 else if (delta_idle > (nr_cpus - 2) * delta_time)
897 else if (delta_idle > (nr_cpus - 3) * delta_time)
902 limits_table = clk_cpu_dvfs_node->virt_temp_limit_table[busy_cpus-1];
903 DVFS_DBG("delta time %6u us idle %6u us %u cpus select table %d\n",
904 delta_time, delta_idle, nr_cpus, busy_cpus);
908 new_temp_limit_rate = limits_table[0].frequency;
909 for (i = 0; limits_table[i].frequency != CPUFREQ_TABLE_END; i++) {
910 if (target_temp >= limits_table[i].index)
911 new_temp_limit_rate = limits_table[i].frequency;
915 if (clk_cpu_dvfs_node->temp_limit_rate != new_temp_limit_rate) {
916 clk_cpu_dvfs_node->temp_limit_rate = new_temp_limit_rate;
917 dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
918 DVFS_DBG("temp_limit_rate:%d\n", (int)clk_cpu_dvfs_node->temp_limit_rate);
922 static void dvfs_temp_limit_work_func(struct work_struct *work)
924 int temp=0, delta_temp=0;
925 unsigned long delay = HZ/10;
926 unsigned long arm_rate_step=0;
927 static int old_temp=0;
930 queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
932 temp = rockchip_tsadc_get_temp(1);
934 if (temp == INVALID_TEMP)
935 return dvfs_virt_temp_limit_work_func();
938 delta_temp = (old_temp>temp) ? (old_temp-temp) : (temp-old_temp);
942 if (ROCKCHIP_PM_POLICY_PERFORMANCE == rockchip_pm_get_policy()) {
943 if (!clk_cpu_dvfs_node->per_temp_limit_table) {
947 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
948 for (i=0; clk_cpu_dvfs_node->per_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
949 if (temp > clk_cpu_dvfs_node->per_temp_limit_table[i].index) {
950 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->per_temp_limit_table[i].frequency;
953 dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
954 } else if (ROCKCHIP_PM_POLICY_NORMAL == rockchip_pm_get_policy()){
955 if (!clk_cpu_dvfs_node->nor_temp_limit_table) {
959 if (temp > target_temp) {
960 if (temp > old_temp) {
961 delta_temp = temp - target_temp;
962 for (i=0; clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
963 if (delta_temp > clk_cpu_dvfs_node->nor_temp_limit_table[i].index) {
964 arm_rate_step = clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency;
967 if (arm_rate_step && (clk_cpu_dvfs_node->temp_limit_rate > arm_rate_step)) {
968 clk_cpu_dvfs_node->temp_limit_rate -= arm_rate_step;
969 dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
973 if (clk_cpu_dvfs_node->temp_limit_rate < clk_cpu_dvfs_node->max_rate) {
974 delta_temp = target_temp - temp;
975 for (i=0; clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
976 if (delta_temp > clk_cpu_dvfs_node->nor_temp_limit_table[i].index) {
977 arm_rate_step = clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency;
982 clk_cpu_dvfs_node->temp_limit_rate += arm_rate_step;
983 if (clk_cpu_dvfs_node->temp_limit_rate > clk_cpu_dvfs_node->max_rate) {
984 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
986 dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
992 DVFS_DBG("cur temp: %d, temp_limit_core_rate: %lu\n", temp, clk_cpu_dvfs_node->temp_limit_rate);
996 static DECLARE_DELAYED_WORK(dvfs_temp_limit_work, dvfs_temp_limit_work_func);
999 int dvfs_clk_enable_limit(struct dvfs_node *clk_dvfs_node, unsigned int min_rate, unsigned int max_rate)
1001 u32 rate = 0, ret = 0;
1003 if (!clk_dvfs_node || (min_rate > max_rate))
1006 if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
1007 mutex_lock(&clk_dvfs_node->vd->mutex);
1009 /* To reset clk_dvfs_node->min_rate/max_rate */
1010 dvfs_get_rate_range(clk_dvfs_node);
1011 clk_dvfs_node->freq_limit_en = 1;
1013 if ((min_rate >= clk_dvfs_node->min_rate) && (min_rate <= clk_dvfs_node->max_rate)) {
1014 clk_dvfs_node->min_rate = min_rate;
1017 if ((max_rate >= clk_dvfs_node->min_rate) && (max_rate <= clk_dvfs_node->max_rate)) {
1018 clk_dvfs_node->max_rate = max_rate;
1021 if (clk_dvfs_node->last_set_rate == 0)
1022 rate = __clk_get_rate(clk_dvfs_node->clk);
1024 rate = clk_dvfs_node->last_set_rate;
1025 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
1027 mutex_unlock(&clk_dvfs_node->vd->mutex);
1031 DVFS_DBG("%s:clk(%s) last_set_rate=%u; [min_rate, max_rate]=[%u, %u]\n",
1032 __func__, __clk_get_name(clk_dvfs_node->clk), clk_dvfs_node->last_set_rate,
1033 clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
1037 EXPORT_SYMBOL(dvfs_clk_enable_limit);
1039 int dvfs_clk_disable_limit(struct dvfs_node *clk_dvfs_node)
1046 if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
1047 mutex_lock(&clk_dvfs_node->vd->mutex);
1049 /* To reset clk_dvfs_node->min_rate/max_rate */
1050 dvfs_get_rate_range(clk_dvfs_node);
1051 clk_dvfs_node->freq_limit_en = 0;
1052 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
1054 mutex_unlock(&clk_dvfs_node->vd->mutex);
1057 DVFS_DBG("%s: clk(%s) last_set_rate=%u; [min_rate, max_rate]=[%u, %u]\n",
1058 __func__, __clk_get_name(clk_dvfs_node->clk), clk_dvfs_node->last_set_rate, clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
1061 EXPORT_SYMBOL(dvfs_clk_disable_limit);
1063 void dvfs_disable_temp_limit(void) {
1064 temp_limit_enable = 0;
1065 cancel_delayed_work_sync(&dvfs_temp_limit_work);
1068 int dvfs_clk_get_limit(struct dvfs_node *clk_dvfs_node, unsigned int *min_rate, unsigned int *max_rate)
1075 mutex_lock(&clk_dvfs_node->vd->mutex);
1077 *min_rate = clk_dvfs_node->min_rate;
1078 *max_rate = clk_dvfs_node->max_rate;
1079 freq_limit_en = clk_dvfs_node->freq_limit_en;
1081 mutex_unlock(&clk_dvfs_node->vd->mutex);
1083 return freq_limit_en;
1085 EXPORT_SYMBOL(dvfs_clk_get_limit);
1087 int dvfs_clk_register_set_rate_callback(struct dvfs_node *clk_dvfs_node, clk_set_rate_callback clk_dvfs_target)
1092 mutex_lock(&clk_dvfs_node->vd->mutex);
1093 clk_dvfs_node->clk_dvfs_target = clk_dvfs_target;
1094 mutex_unlock(&clk_dvfs_node->vd->mutex);
1098 EXPORT_SYMBOL(dvfs_clk_register_set_rate_callback);
1100 struct cpufreq_frequency_table *dvfs_get_freq_volt_table(struct dvfs_node *clk_dvfs_node)
1102 struct cpufreq_frequency_table *table;
1107 mutex_lock(&clk_dvfs_node->vd->mutex);
1108 table = clk_dvfs_node->dvfs_table;
1109 mutex_unlock(&clk_dvfs_node->vd->mutex);
1113 EXPORT_SYMBOL(dvfs_get_freq_volt_table);
1115 int dvfs_set_freq_volt_table(struct dvfs_node *clk_dvfs_node, struct cpufreq_frequency_table *table)
1120 if (IS_ERR_OR_NULL(table)){
1121 DVFS_ERR("%s:invalid table!\n", __func__);
1125 mutex_lock(&clk_dvfs_node->vd->mutex);
1126 clk_dvfs_node->dvfs_table = table;
1127 dvfs_get_rate_range(clk_dvfs_node);
1128 dvfs_table_round_clk_rate(clk_dvfs_node);
1129 dvfs_table_round_volt(clk_dvfs_node);
1130 mutex_unlock(&clk_dvfs_node->vd->mutex);
1134 EXPORT_SYMBOL(dvfs_set_freq_volt_table);
1136 int clk_enable_dvfs(struct dvfs_node *clk_dvfs_node)
1138 struct cpufreq_frequency_table clk_fv;
1147 DVFS_DBG("%s: dvfs clk(%s) enable dvfs!\n",
1148 __func__, __clk_get_name(clk_dvfs_node->clk));
1150 if (!clk_dvfs_node->vd) {
1151 DVFS_ERR("%s: dvfs node(%s) has no vd node!\n",
1152 __func__, clk_dvfs_node->name);
1155 mutex_lock(&clk_dvfs_node->vd->mutex);
1156 if (clk_dvfs_node->enable_count == 0) {
1157 if (IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator)) {
1158 if (clk_dvfs_node->vd->regulator_name)
1159 clk_dvfs_node->vd->regulator = dvfs_regulator_get(NULL, clk_dvfs_node->vd->regulator_name);
1160 if (!IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator)) {
1161 DVFS_DBG("%s: vd(%s) get regulator(%s) ok\n",
1162 __func__, clk_dvfs_node->vd->name, clk_dvfs_node->vd->regulator_name);
1163 clk_enable_dvfs_regulator_check(clk_dvfs_node->vd);
1164 dvfs_get_vd_regulator_volt_list(clk_dvfs_node->vd);
1165 dvfs_vd_get_regulator_volt_time_info(clk_dvfs_node->vd);
1167 clk_dvfs_node->enable_count = 0;
1168 DVFS_ERR("%s: vd(%s) can't get regulator(%s)!\n",
1169 __func__, clk_dvfs_node->vd->name, clk_dvfs_node->vd->regulator_name);
1170 mutex_unlock(&clk_dvfs_node->vd->mutex);
1174 clk_enable_dvfs_regulator_check(clk_dvfs_node->vd);
1177 DVFS_DBG("%s: vd(%s) cur volt=%d\n",
1178 __func__, clk_dvfs_node->name, clk_dvfs_node->vd->cur_volt);
1180 dvfs_table_round_clk_rate(clk_dvfs_node);
1181 dvfs_get_rate_range(clk_dvfs_node);
1182 clk_dvfs_node->freq_limit_en = 1;
1183 dvfs_table_round_volt(clk_dvfs_node);
1184 clk_dvfs_node->set_freq = clk_dvfs_node_get_rate_kz(clk_dvfs_node->clk);
1185 clk_dvfs_node->last_set_rate = clk_dvfs_node->set_freq*1000;
1187 DVFS_DBG("%s: %s get freq %u!\n",
1188 __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1190 if (clk_dvfs_node_get_ref_volt(clk_dvfs_node, clk_dvfs_node->set_freq, &clk_fv)) {
1191 if (clk_dvfs_node->dvfs_table[0].frequency == CPUFREQ_TABLE_END) {
1192 DVFS_ERR("%s: table empty\n", __func__);
1193 clk_dvfs_node->enable_count = 0;
1194 mutex_unlock(&clk_dvfs_node->vd->mutex);
1197 DVFS_WARNING("%s: clk(%s) freq table all value are smaller than default(%d), use default, just enable dvfs\n",
1198 __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1199 clk_dvfs_node->enable_count++;
1200 mutex_unlock(&clk_dvfs_node->vd->mutex);
1204 clk_dvfs_node->enable_count++;
1205 clk_dvfs_node->set_volt = clk_fv.index;
1206 volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1207 DVFS_DBG("%s: %s, freq %u(ref vol %u)\n",
1208 __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt);
1210 if (clk_dvfs_node->dvfs_nb) {
1211 // must unregister when clk disable
1212 clk_notifier_register(clk, clk_dvfs_node->dvfs_nb);
1215 if(clk_dvfs_node->vd->cur_volt != volt_new) {
1216 ret = dvfs_regulator_set_voltage_readback(clk_dvfs_node->vd->regulator, volt_new, volt_new);
1217 dvfs_volt_up_delay(clk_dvfs_node->vd,volt_new, clk_dvfs_node->vd->cur_volt);
1219 clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
1220 clk_dvfs_node->enable_count = 0;
1221 DVFS_ERR("dvfs enable clk %s,set volt error \n", clk_dvfs_node->name);
1222 mutex_unlock(&clk_dvfs_node->vd->mutex);
1225 clk_dvfs_node->vd->cur_volt = volt_new;
1226 clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
1230 DVFS_DBG("%s: dvfs already enable clk enable = %d!\n",
1231 __func__, clk_dvfs_node->enable_count);
1232 clk_dvfs_node->enable_count++;
1235 if (clk_dvfs_node->regu_mode_en) {
1236 ret = dvfs_regu_mode_table_constrain(clk_dvfs_node);
1238 DVFS_ERR("%s: clk(%s) regu_mode_table is unvalid, set regu_mode_en=0!\n",
1239 __func__, clk_dvfs_node->name);
1240 clk_dvfs_node->regu_mode_en = 0;
1241 mutex_unlock(&clk_dvfs_node->vd->mutex);
1245 ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, clk_dvfs_node->set_freq*1000, &mode);
1247 DVFS_ERR("%s: clk(%s) rate %dKhz get regu_mode fail\n",
1248 __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1249 mutex_unlock(&clk_dvfs_node->vd->mutex);
1252 clk_dvfs_node->regu_mode = mode;
1254 dvfs_update_clk_pds_mode(clk_dvfs_node);
1257 mutex_unlock(&clk_dvfs_node->vd->mutex);
1261 EXPORT_SYMBOL(clk_enable_dvfs);
1263 int clk_disable_dvfs(struct dvfs_node *clk_dvfs_node)
1270 DVFS_DBG("%s:dvfs clk(%s) disable dvfs!\n",
1271 __func__, __clk_get_name(clk_dvfs_node->clk));
1273 mutex_lock(&clk_dvfs_node->vd->mutex);
1274 if (!clk_dvfs_node->enable_count) {
1275 DVFS_WARNING("%s:clk(%s) is already closed!\n",
1276 __func__, __clk_get_name(clk_dvfs_node->clk));
1277 mutex_unlock(&clk_dvfs_node->vd->mutex);
1280 clk_dvfs_node->enable_count--;
1281 if (0 == clk_dvfs_node->enable_count) {
1282 DVFS_DBG("%s:dvfs clk(%s) disable dvfs ok!\n",
1283 __func__, __clk_get_name(clk_dvfs_node->clk));
1284 volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1285 dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1288 clk_notifier_unregister(clk, clk_dvfs_node->dvfs_nb);
1289 DVFS_DBG("clk unregister nb!\n");
1293 mutex_unlock(&clk_dvfs_node->vd->mutex);
1296 EXPORT_SYMBOL(clk_disable_dvfs);
1298 static unsigned long dvfs_get_limit_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1300 unsigned long limit_rate;
1303 if (clk_dvfs_node->freq_limit_en) {
1305 if (rate < clk_dvfs_node->min_rate) {
1306 limit_rate = clk_dvfs_node->min_rate;
1307 } else if (rate > clk_dvfs_node->max_rate) {
1308 limit_rate = clk_dvfs_node->max_rate;
1310 if (temp_limit_enable) {
1311 if (limit_rate > clk_dvfs_node->temp_limit_rate) {
1312 limit_rate = clk_dvfs_node->temp_limit_rate;
1317 DVFS_DBG("%s: rate:%ld, limit_rate:%ld,\n", __func__, rate, limit_rate);
1322 static int dvfs_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1324 struct cpufreq_frequency_table clk_fv;
1325 unsigned long old_rate = 0, new_rate = 0, volt_new = 0, clk_volt_store = 0;
1326 struct clk *clk = clk_dvfs_node->clk;
1332 if (!clk_dvfs_node->enable_count){
1333 DVFS_WARNING("%s:dvfs(%s) is disable\n",
1334 __func__, clk_dvfs_node->name);
1338 if (clk_dvfs_node->vd->volt_set_flag == DVFS_SET_VOLT_FAILURE) {
1339 /* It means the last time set voltage error */
1340 ret = dvfs_reset_volt(clk_dvfs_node->vd);
1346 rate = dvfs_get_limit_rate(clk_dvfs_node, rate);
1347 new_rate = __clk_round_rate(clk, rate);
1348 old_rate = __clk_get_rate(clk);
1349 if (new_rate == old_rate)
1352 DVFS_DBG("enter %s: clk(%s) new_rate = %lu Hz, old_rate = %lu Hz\n",
1353 __func__, clk_dvfs_node->name, rate, old_rate);
1355 /* find the clk corresponding voltage */
1356 ret = clk_dvfs_node_get_ref_volt(clk_dvfs_node, new_rate / 1000, &clk_fv);
1358 DVFS_ERR("%s:dvfs clk(%s) rate %luhz is not support\n",
1359 __func__, clk_dvfs_node->name, new_rate);
1362 clk_volt_store = clk_dvfs_node->set_volt;
1363 clk_dvfs_node->set_volt = clk_fv.index;
1364 volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1365 DVFS_DBG("%s:%s new rate=%lu(was=%lu),new volt=%lu,(was=%d)\n",
1366 __func__, clk_dvfs_node->name, new_rate, old_rate, volt_new,clk_dvfs_node->vd->cur_volt);
1369 /* if up the rate */
1370 if (new_rate > old_rate) {
1371 ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
1373 DVFS_ERR("%s: dvfs clk(%s) rate %luhz set mode err\n",
1374 __func__, clk_dvfs_node->name, new_rate);
1376 ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1378 goto fail_roll_back;
1382 if (clk_dvfs_node->clk_dvfs_target) {
1383 ret = clk_dvfs_node->clk_dvfs_target(clk, rate);
1385 ret = clk_set_rate(clk, rate);
1389 DVFS_ERR("%s:clk(%s) set rate err\n",
1390 __func__, __clk_get_name(clk));
1391 goto fail_roll_back;
1393 clk_dvfs_node->set_freq = new_rate / 1000;
1395 DVFS_DBG("%s:dvfs clk(%s) set rate %lu ok\n",
1396 __func__, clk_dvfs_node->name, __clk_get_rate(clk));
1398 /* if down the rate */
1399 if (new_rate < old_rate) {
1400 ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1404 ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
1406 DVFS_ERR("%s:dvfs clk(%s) rate %luhz set mode err\n",
1407 __func__, clk_dvfs_node->name, new_rate);
1412 clk_dvfs_node->set_volt = clk_volt_store;
1417 unsigned long dvfs_clk_round_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1419 return __clk_round_rate(clk_dvfs_node->clk, rate);
1421 EXPORT_SYMBOL_GPL(dvfs_clk_round_rate);
1423 unsigned long dvfs_clk_get_rate(struct dvfs_node *clk_dvfs_node)
1425 return __clk_get_rate(clk_dvfs_node->clk);
1427 EXPORT_SYMBOL_GPL(dvfs_clk_get_rate);
1429 unsigned long dvfs_clk_get_last_set_rate(struct dvfs_node *clk_dvfs_node)
1431 unsigned long last_set_rate;
1433 mutex_lock(&clk_dvfs_node->vd->mutex);
1434 last_set_rate = clk_dvfs_node->last_set_rate;
1435 mutex_unlock(&clk_dvfs_node->vd->mutex);
1437 return last_set_rate;
1439 EXPORT_SYMBOL_GPL(dvfs_clk_get_last_set_rate);
1442 int dvfs_clk_enable(struct dvfs_node *clk_dvfs_node)
1444 return clk_enable(clk_dvfs_node->clk);
1446 EXPORT_SYMBOL_GPL(dvfs_clk_enable);
1448 void dvfs_clk_disable(struct dvfs_node *clk_dvfs_node)
1450 return clk_disable(clk_dvfs_node->clk);
1452 EXPORT_SYMBOL_GPL(dvfs_clk_disable);
1454 struct dvfs_node *clk_get_dvfs_node(char *clk_name)
1458 struct dvfs_node *clk_dvfs_node;
1460 mutex_lock(&rk_dvfs_mutex);
1461 list_for_each_entry(vd, &rk_dvfs_tree, node) {
1462 mutex_lock(&vd->mutex);
1463 list_for_each_entry(pd, &vd->pd_list, node) {
1464 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
1465 if (0 == strcmp(clk_dvfs_node->name, clk_name)) {
1466 mutex_unlock(&vd->mutex);
1467 mutex_unlock(&rk_dvfs_mutex);
1468 return clk_dvfs_node;
1472 mutex_unlock(&vd->mutex);
1474 mutex_unlock(&rk_dvfs_mutex);
1478 EXPORT_SYMBOL_GPL(clk_get_dvfs_node);
1480 void clk_put_dvfs_node(struct dvfs_node *clk_dvfs_node)
1484 EXPORT_SYMBOL_GPL(clk_put_dvfs_node);
1486 int dvfs_clk_prepare_enable(struct dvfs_node *clk_dvfs_node)
1488 return clk_prepare_enable(clk_dvfs_node->clk);
1490 EXPORT_SYMBOL_GPL(dvfs_clk_prepare_enable);
1493 void dvfs_clk_disable_unprepare(struct dvfs_node *clk_dvfs_node)
1495 clk_disable_unprepare(clk_dvfs_node->clk);
1497 EXPORT_SYMBOL_GPL(dvfs_clk_disable_unprepare);
1499 int dvfs_clk_set_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1506 DVFS_DBG("%s:dvfs node(%s) set rate(%lu)\n",
1507 __func__, clk_dvfs_node->name, rate);
1509 #if 0 // judge by reference func in rk
1510 if (dvfs_support_clk_set_rate(dvfs_info)==false) {
1511 DVFS_ERR("dvfs func:%s is not support!\n", __func__);
1516 if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target) {
1517 mutex_lock(&clk_dvfs_node->vd->mutex);
1518 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
1519 clk_dvfs_node->last_set_rate = rate;
1520 mutex_unlock(&clk_dvfs_node->vd->mutex);
1522 DVFS_ERR("%s:dvfs node(%s) has no vd node or target callback!\n",
1523 __func__, clk_dvfs_node->name);
1528 EXPORT_SYMBOL_GPL(dvfs_clk_set_rate);
1531 int rk_regist_vd(struct vd_node *vd)
1537 vd->volt_time_flag=0;
1539 INIT_LIST_HEAD(&vd->pd_list);
1540 mutex_lock(&rk_dvfs_mutex);
1541 list_add(&vd->node, &rk_dvfs_tree);
1542 mutex_unlock(&rk_dvfs_mutex);
1546 EXPORT_SYMBOL_GPL(rk_regist_vd);
1548 int rk_regist_pd(struct pd_node *pd)
1559 INIT_LIST_HEAD(&pd->clk_list);
1560 mutex_lock(&vd->mutex);
1561 list_add(&pd->node, &vd->pd_list);
1562 mutex_unlock(&vd->mutex);
1566 EXPORT_SYMBOL_GPL(rk_regist_pd);
1568 int rk_regist_clk(struct dvfs_node *clk_dvfs_node)
1576 vd = clk_dvfs_node->vd;
1577 pd = clk_dvfs_node->pd;
1581 mutex_lock(&vd->mutex);
1582 list_add(&clk_dvfs_node->node, &pd->clk_list);
1583 mutex_unlock(&vd->mutex);
1587 EXPORT_SYMBOL_GPL(rk_regist_clk);
1589 static int rk_convert_cpufreq_table(struct dvfs_node *dvfs_node)
1593 struct cpufreq_frequency_table *table;
1596 table = dvfs_node->dvfs_table;
1597 dev = &dvfs_node->dev;
1599 for (i = 0; table[i].frequency!= CPUFREQ_TABLE_END; i++){
1600 opp = opp_find_freq_exact(dev, table[i].frequency * 1000, true);
1602 return PTR_ERR(opp);
1603 table[i].index = opp_get_voltage(opp);
1608 static struct cpufreq_frequency_table *of_get_temp_limit_table(struct device_node *dev_node, const char *propname)
1610 struct cpufreq_frequency_table *temp_limt_table = NULL;
1611 const struct property *prop;
1615 prop = of_find_property(dev_node, propname, NULL);
1621 nr = prop->length / sizeof(u32);
1623 pr_err("%s: Invalid freq list\n", __func__);
1627 temp_limt_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
1628 (nr/2 + 1), GFP_KERNEL);
1632 for (i=0; i<nr/2; i++){
1633 temp_limt_table[i].index = be32_to_cpup(val++);
1634 temp_limt_table[i].frequency = be32_to_cpup(val++) * 1000;
1637 temp_limt_table[i].index = 0;
1638 temp_limt_table[i].frequency = CPUFREQ_TABLE_END;
1640 return temp_limt_table;
1644 int of_dvfs_init(void)
1648 struct device_node *dvfs_dev_node, *clk_dev_node, *vd_dev_node, *pd_dev_node;
1649 struct dvfs_node *dvfs_node;
1654 DVFS_DBG("%s\n", __func__);
1656 dvfs_dev_node = of_find_node_by_name(NULL, "dvfs");
1657 if (IS_ERR_OR_NULL(dvfs_dev_node)) {
1658 DVFS_ERR("%s get dvfs dev node err\n", __func__);
1659 return PTR_ERR(dvfs_dev_node);
1662 for_each_available_child_of_node(dvfs_dev_node, vd_dev_node) {
1663 vd = kzalloc(sizeof(struct vd_node), GFP_KERNEL);
1667 mutex_init(&vd->mutex);
1668 vd->name = vd_dev_node->name;
1669 ret = of_property_read_string(vd_dev_node, "regulator_name", &vd->regulator_name);
1671 DVFS_ERR("%s:vd(%s) get regulator_name err, ret:%d\n",
1672 __func__, vd_dev_node->name, ret);
1677 vd->suspend_volt = 0;
1679 vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
1680 vd->vd_dvfs_target = dvfs_target;
1681 ret = rk_regist_vd(vd);
1683 DVFS_ERR("%s:vd(%s) register err:%d\n", __func__, vd->name, ret);
1688 DVFS_DBG("%s:vd(%s) register ok, regulator name:%s,suspend volt:%d\n",
1689 __func__, vd->name, vd->regulator_name, vd->suspend_volt);
1691 for_each_available_child_of_node(vd_dev_node, pd_dev_node) {
1692 pd = kzalloc(sizeof(struct pd_node), GFP_KERNEL);
1697 pd->name = pd_dev_node->name;
1699 ret = rk_regist_pd(pd);
1701 DVFS_ERR("%s:pd(%s) register err:%d\n", __func__, pd->name, ret);
1705 DVFS_DBG("%s:pd(%s) register ok, parent vd:%s\n",
1706 __func__, pd->name, vd->name);
1707 for_each_available_child_of_node(pd_dev_node, clk_dev_node) {
1708 if (!of_device_is_available(clk_dev_node))
1711 dvfs_node = kzalloc(sizeof(struct dvfs_node), GFP_KERNEL);
1715 dvfs_node->name = clk_dev_node->name;
1719 val = of_get_property(clk_dev_node, "regu-mode-en", NULL);
1721 dvfs_node->regu_mode_en = be32_to_cpup(val);
1722 if (dvfs_node->regu_mode_en)
1723 dvfs_node->regu_mode_table = of_get_regu_mode_table(clk_dev_node);
1725 dvfs_node->regu_mode_table = NULL;
1727 val = of_get_property(clk_dev_node, "temp-limit-enable", NULL);
1729 temp_limit_enable = be32_to_cpup(val);
1730 if (temp_limit_enable) {
1731 val = of_get_property(clk_dev_node, "target-temp", NULL);
1733 target_temp = be32_to_cpup(val);
1734 val = of_get_property(clk_dev_node, "temp-channel", NULL);
1736 dvfs_node->temp_channel = be32_to_cpup(val);
1738 dvfs_node->nor_temp_limit_table = of_get_temp_limit_table(clk_dev_node, "normal-temp-limit");
1739 dvfs_node->per_temp_limit_table = of_get_temp_limit_table(clk_dev_node, "performance-temp-limit");
1740 dvfs_node->virt_temp_limit_table[0] =
1741 of_get_temp_limit_table(clk_dev_node, "virt-temp-limit-1-cpu-busy");
1742 dvfs_node->virt_temp_limit_table[1] =
1743 of_get_temp_limit_table(clk_dev_node, "virt-temp-limit-2-cpu-busy");
1744 dvfs_node->virt_temp_limit_table[2] =
1745 of_get_temp_limit_table(clk_dev_node, "virt-temp-limit-3-cpu-busy");
1746 dvfs_node->virt_temp_limit_table[3] =
1747 of_get_temp_limit_table(clk_dev_node, "virt-temp-limit-4-cpu-busy");
1749 dvfs_node->temp_limit_rate = -1;
1750 dvfs_node->dev.of_node = clk_dev_node;
1751 ret = of_init_opp_table(&dvfs_node->dev);
1753 DVFS_ERR("%s:clk(%s) get opp table err:%d\n", __func__, dvfs_node->name, ret);
1758 ret = opp_init_cpufreq_table(&dvfs_node->dev, &dvfs_node->dvfs_table);
1760 DVFS_ERR("%s:clk(%s) get cpufreq table err:%d\n", __func__, dvfs_node->name, ret);
1764 ret = rk_convert_cpufreq_table(dvfs_node);
1770 clk = clk_get(NULL, clk_dev_node->name);
1772 DVFS_ERR("%s:get clk(%s) err:%ld\n", __func__, dvfs_node->name, PTR_ERR(clk));
1778 dvfs_node->clk = clk;
1779 ret = rk_regist_clk(dvfs_node);
1781 DVFS_ERR("%s:dvfs_node(%s) register err:%d\n", __func__, dvfs_node->name, ret);
1785 DVFS_DBG("%s:dvfs_node(%s) register ok, parent pd:%s\n",
1786 __func__, clk_dev_node->name, pd->name);
1794 /*********************************************************************************/
1796 * dump_dbg_map() : Draw all informations of dvfs while debug
1798 static int dump_dbg_map(char *buf)
1803 struct dvfs_node *clk_dvfs_node;
1806 mutex_lock(&rk_dvfs_mutex);
1807 printk( "-------------DVFS TREE-----------\n\n\n");
1808 printk( "DVFS TREE:\n");
1810 list_for_each_entry(vd, &rk_dvfs_tree, node) {
1811 mutex_lock(&vd->mutex);
1812 printk( "|\n|- voltage domain:%s\n", vd->name);
1813 printk( "|- current voltage:%d\n", vd->cur_volt);
1814 printk( "|- current regu_mode:%s\n", dvfs_regu_mode_to_string(vd->regu_mode));
1816 list_for_each_entry(pd, &vd->pd_list, node) {
1817 printk( "| |\n| |- power domain:%s, status = %s, current volt = %d, current regu_mode = %s\n",
1818 pd->name, (pd->pd_status == 1) ? "ON" : "OFF", pd->cur_volt,
1819 dvfs_regu_mode_to_string(pd->regu_mode));
1821 list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
1822 printk( "| | |\n| | |- clock: %s current: rate %d, volt = %d,"
1823 " enable_dvfs = %s\n",
1824 clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt,
1825 clk_dvfs_node->enable_count == 0 ? "DISABLE" : "ENABLE");
1826 printk( "| | |- clk limit(%s):[%u, %u]; last set rate = %lu\n",
1827 clk_dvfs_node->freq_limit_en ? "enable" : "disable",
1828 clk_dvfs_node->min_rate, clk_dvfs_node->max_rate,
1829 clk_dvfs_node->last_set_rate/1000);
1830 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
1831 printk( "| | | |- freq = %d, volt = %d\n",
1832 clk_dvfs_node->dvfs_table[i].frequency,
1833 clk_dvfs_node->dvfs_table[i].index);
1836 printk( "| | |- clock: %s current: rate %d, regu_mode = %s,"
1837 " regu_mode_en = %d\n",
1838 clk_dvfs_node->name, clk_dvfs_node->set_freq,
1839 dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode),
1840 clk_dvfs_node->regu_mode_en);
1841 if (clk_dvfs_node->regu_mode_table) {
1842 for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
1843 printk( "| | | |- freq = %d, regu_mode = %s\n",
1844 clk_dvfs_node->regu_mode_table[i].frequency/1000,
1845 dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode_table[i].index));
1850 mutex_unlock(&vd->mutex);
1853 printk( "-------------DVFS TREE END------------\n");
1854 mutex_unlock(&rk_dvfs_mutex);
1859 /*********************************************************************************/
1860 static struct kobject *dvfs_kobj;
1861 struct dvfs_attribute {
1862 struct attribute attr;
1863 ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
1865 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
1866 const char *buf, size_t n);
1869 static ssize_t dvfs_tree_store(struct kobject *kobj, struct kobj_attribute *attr,
1870 const char *buf, size_t n)
1874 static ssize_t dvfs_tree_show(struct kobject *kobj, struct kobj_attribute *attr,
1877 return dump_dbg_map(buf);
1881 static struct dvfs_attribute dvfs_attrs[] = {
1882 /* node_name permision show_func store_func */
1883 //#ifdef CONFIG_RK_CLOCK_PROC
1884 __ATTR(dvfs_tree, S_IRUSR | S_IRGRP | S_IWUSR, dvfs_tree_show, dvfs_tree_store),
1888 static int __init dvfs_init(void)
1892 dvfs_kobj = kobject_create_and_add("dvfs", NULL);
1895 for (i = 0; i < ARRAY_SIZE(dvfs_attrs); i++) {
1896 ret = sysfs_create_file(dvfs_kobj, &dvfs_attrs[i].attr);
1898 DVFS_ERR("create index %d error\n", i);
1903 if (temp_limit_enable) {
1904 clk_cpu_dvfs_node = clk_get_dvfs_node("clk_core");
1905 if (!clk_cpu_dvfs_node){
1909 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
1910 dvfs_wq = alloc_workqueue("dvfs", WQ_NON_REENTRANT | WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
1911 queue_delayed_work_on(0, dvfs_wq, &dvfs_temp_limit_work, 0*HZ);
1914 vdd_gpu_regulator = dvfs_get_regulator("vdd_gpu");
1915 if (!IS_ERR_OR_NULL(vdd_gpu_regulator)) {
1916 struct clk *clk = clk_get(NULL, "pd_gpu");
1919 rk_clk_pd_notifier_register(clk, &clk_pd_gpu_notifier);
1921 fb_register_client(&early_suspend_notifier);
1922 register_reboot_notifier(&vdd_gpu_reboot_notifier);
1928 late_initcall(dvfs_init);