f189f87cab1a008f85cf692ed888de25870f7ba2
[firefly-linux-kernel-4.4.55.git] / arch / arm / mach-rockchip / dvfs.c
1 /* arch/arm/mach-rk30/rk30_dvfs.c
2  *
3  * Copyright (C) 2012 ROCKCHIP, Inc.
4  *
5  * This software is licensed under the terms of the GNU General Public
6  * License version 2, as published by the Free Software Foundation, and
7  * may be copied, distributed, and modified under those terms.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  */
15 #include <linux/slab.h>
16 #include <linux/clk.h>
17 #include <linux/regulator/consumer.h>
18 #include <linux/delay.h>
19 #include <linux/stat.h>
20 #include <linux/of.h>
21 #include <linux/opp.h>
22 #include <linux/rockchip/dvfs.h>
23 #include <linux/rockchip/common.h>
24 #include <linux/fb.h>
25 #include <linux/reboot.h>
26 #include <linux/rockchip/cpu.h>
27 #include <linux/tick.h>
28 #include <dt-bindings/clock/rk_system_status.h>
29 #include "../../../drivers/clk/rockchip/clk-pd.h"
30 #include "efuse.h"
31
32 #define MHz     (1000 * 1000)
33 static LIST_HEAD(rk_dvfs_tree);
34 static DEFINE_MUTEX(rk_dvfs_mutex);
35 static struct workqueue_struct *dvfs_wq;
36 static struct dvfs_node *clk_cpu_b_dvfs_node;
37 static struct dvfs_node *clk_cpu_l_dvfs_node;
38 static struct dvfs_node *clk_cpu_bl_dvfs_node;
39 static struct dvfs_node *clk_cpu_dvfs_node;
40 static struct dvfs_node *clk_gpu_dvfs_node;
41 static int pd_gpu_off, early_suspend;
42 static DEFINE_MUTEX(switch_vdd_gpu_mutex);
43 struct regulator *vdd_gpu_regulator;
44 static DEFINE_MUTEX(temp_limit_mutex);
45
46 static int dvfs_get_temp(int chn)
47 {
48         int temp = INVALID_TEMP;
49
50 #if IS_ENABLED(CONFIG_ROCKCHIP_THERMAL)
51         int read_back = 0;
52
53         if (clk_cpu_bl_dvfs_node == NULL ||
54             IS_ERR_OR_NULL(clk_cpu_bl_dvfs_node->vd->regulator))
55                 return temp;
56
57         mutex_lock(&clk_cpu_bl_dvfs_node->vd->mutex);
58         read_back = dvfs_regulator_get_voltage(
59                 clk_cpu_bl_dvfs_node->vd->regulator);
60         temp = rockchip_tsadc_get_temp(chn, read_back);
61         mutex_unlock(&clk_cpu_bl_dvfs_node->vd->mutex);
62 #else
63         temp = rockchip_tsadc_get_temp(chn);
64 #endif
65
66         return temp;
67 }
68
69 static int pvtm_get_temp(struct dvfs_node *dvfs_node, int chn)
70 {
71         int temp = INVALID_TEMP;
72
73 #if IS_ENABLED(CONFIG_ROCKCHIP_THERMAL)
74         int read_back = 0;
75
76         if (dvfs_node == NULL ||
77             IS_ERR_OR_NULL(dvfs_node->vd->regulator))
78                 return temp;
79         read_back = dvfs_regulator_get_voltage(
80                 dvfs_node->vd->regulator);
81         temp = rockchip_tsadc_get_temp(chn, read_back);
82 #else
83         temp = rockchip_tsadc_get_temp(chn);
84 #endif
85
86         return temp;
87 }
88
89
90 static int vdd_gpu_reboot_notifier_event(struct notifier_block *this,
91         unsigned long event, void *ptr)
92 {
93         int ret;
94
95         DVFS_DBG("%s: enable vdd_gpu\n", __func__);
96         mutex_lock(&switch_vdd_gpu_mutex);
97         if (!regulator_is_enabled(vdd_gpu_regulator))
98                 ret = regulator_enable(vdd_gpu_regulator);
99         mutex_unlock(&switch_vdd_gpu_mutex);
100
101         return NOTIFY_OK;
102 }
103
104 static struct notifier_block vdd_gpu_reboot_notifier = {
105         .notifier_call = vdd_gpu_reboot_notifier_event,
106 };
107
108 static int clk_pd_gpu_notifier_call(struct notifier_block *nb,
109         unsigned long event, void *ptr)
110 {
111         int ret;
112
113         switch (event) {
114         case RK_CLK_PD_PREPARE:
115                 mutex_lock(&switch_vdd_gpu_mutex);
116                 pd_gpu_off = 0;
117                 if (early_suspend) {
118                         if (!regulator_is_enabled(vdd_gpu_regulator))
119                                 ret = regulator_enable(vdd_gpu_regulator);
120                 }
121                 mutex_unlock(&switch_vdd_gpu_mutex);
122                 break;
123         case RK_CLK_PD_UNPREPARE:
124                 mutex_lock(&switch_vdd_gpu_mutex);
125                 pd_gpu_off = 1;
126                 if (early_suspend) {
127                         if (regulator_is_enabled(vdd_gpu_regulator))
128                                 ret = regulator_disable(vdd_gpu_regulator);
129                 }
130                 mutex_unlock(&switch_vdd_gpu_mutex);
131                 break;
132         default:
133                 break;
134         }
135
136         return NOTIFY_OK;
137 }
138
139 static struct notifier_block clk_pd_gpu_notifier = {
140         .notifier_call = clk_pd_gpu_notifier_call,
141 };
142
143
144 static int early_suspend_notifier_call(struct notifier_block *self,
145                                 unsigned long action, void *data)
146 {
147         struct fb_event *event = data;
148         int blank_mode = *((int *)event->data);
149         int ret;
150
151         mutex_lock(&switch_vdd_gpu_mutex);
152         if (action == FB_EARLY_EVENT_BLANK) {
153                 switch (blank_mode) {
154                 case FB_BLANK_UNBLANK:
155                         early_suspend = 0;
156                         if (pd_gpu_off) {
157                                 if (!regulator_is_enabled(vdd_gpu_regulator))
158                                         ret = regulator_enable(
159                                         vdd_gpu_regulator);
160                         }
161                         break;
162                 default:
163                         break;
164                 }
165         } else if (action == FB_EVENT_BLANK) {
166                 switch (blank_mode) {
167                 case FB_BLANK_POWERDOWN:
168                         early_suspend = 1;
169                         if (pd_gpu_off) {
170                                 if (regulator_is_enabled(vdd_gpu_regulator))
171                                         ret = regulator_disable(
172                                         vdd_gpu_regulator);
173                         }
174
175                         break;
176                 default:
177                         break;
178                 }
179         }
180         mutex_unlock(&switch_vdd_gpu_mutex);
181
182         return NOTIFY_OK;
183 }
184
185 static struct notifier_block early_suspend_notifier = {
186                 .notifier_call = early_suspend_notifier_call,
187 };
188
189 #define DVFS_REGULATOR_MODE_STANDBY     1
190 #define DVFS_REGULATOR_MODE_IDLE        2
191 #define DVFS_REGULATOR_MODE_NORMAL      3
192 #define DVFS_REGULATOR_MODE_FAST        4
193
194 static const char* dvfs_regu_mode_to_string(unsigned int mode)
195 {
196         switch (mode) {
197         case DVFS_REGULATOR_MODE_FAST:
198                 return "FAST";
199         case DVFS_REGULATOR_MODE_NORMAL:
200                 return "NORMAL";
201         case DVFS_REGULATOR_MODE_IDLE:
202                 return "IDLE";
203         case DVFS_REGULATOR_MODE_STANDBY:
204                 return "STANDBY";
205         default:
206                 return "UNKNOWN";
207         }
208 }
209
210 static int dvfs_regu_mode_convert(unsigned int mode)
211 {
212         switch (mode) {
213         case DVFS_REGULATOR_MODE_FAST:
214                 return REGULATOR_MODE_FAST;
215         case DVFS_REGULATOR_MODE_NORMAL:
216                 return REGULATOR_MODE_NORMAL;
217         case DVFS_REGULATOR_MODE_IDLE:
218                 return REGULATOR_MODE_IDLE;
219         case DVFS_REGULATOR_MODE_STANDBY:
220                 return REGULATOR_MODE_STANDBY;
221         default:
222                 return -EINVAL;
223         }
224 }
225
226 static int dvfs_regu_mode_deconvert(unsigned int mode)
227 {
228         switch (mode) {
229         case REGULATOR_MODE_FAST:
230                 return DVFS_REGULATOR_MODE_FAST;
231         case REGULATOR_MODE_NORMAL:
232                 return DVFS_REGULATOR_MODE_NORMAL;
233         case REGULATOR_MODE_IDLE:
234                 return DVFS_REGULATOR_MODE_IDLE;
235         case REGULATOR_MODE_STANDBY:
236                 return DVFS_REGULATOR_MODE_STANDBY;
237         default:
238                 return -EINVAL;
239         }
240 }
241
242 static struct cpufreq_frequency_table *of_get_regu_mode_table(struct device_node *dev_node)
243 {
244         struct cpufreq_frequency_table *regu_mode_table = NULL;
245         const struct property *prop;
246         const __be32 *val;
247         int nr, i;
248
249         prop = of_find_property(dev_node, "regu-mode-table", NULL);
250         if (!prop)
251                 return NULL;
252         if (!prop->value)
253                 return NULL;
254
255         nr = prop->length / sizeof(u32);
256         if (nr % 2) {
257                 pr_err("%s: Invalid freq list\n", __func__);
258                 return NULL;
259         }
260
261         regu_mode_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
262                              (nr/2+1), GFP_KERNEL);
263         if (!regu_mode_table) {
264                 pr_err("%s: could not allocate regu_mode_table!\n", __func__);
265                 return ERR_PTR(-ENOMEM);
266         }
267
268         val = prop->value;
269
270         for (i=0; i<nr/2; i++){
271                 regu_mode_table[i].frequency = be32_to_cpup(val++) * 1000;
272                 regu_mode_table[i].index = be32_to_cpup(val++);
273         }
274
275         if (regu_mode_table[i-1].frequency != 0) {
276                 pr_err("%s: Last freq of regu_mode_table is not 0!\n", __func__);
277                 kfree(regu_mode_table);
278                 return NULL;
279         }
280
281         regu_mode_table[i].index = 0;
282         regu_mode_table[i].frequency = CPUFREQ_TABLE_END;
283
284         return regu_mode_table;
285 }
286
287 static int dvfs_regu_mode_table_constrain(struct dvfs_node *clk_dvfs_node)
288 {
289         int i, ret;
290         int mode, convert_mode, valid_mode;
291
292         if (!clk_dvfs_node)
293                 return -EINVAL;
294
295         if (!clk_dvfs_node->regu_mode_table)
296                 return -EINVAL;
297
298         if (!clk_dvfs_node->vd)
299                 return -EINVAL;
300
301         if (IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator))
302                 return -EINVAL;
303
304         for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
305                 mode = clk_dvfs_node->regu_mode_table[i].index;
306                 convert_mode = dvfs_regu_mode_convert(mode);
307
308                 ret = regulator_is_supported_mode(clk_dvfs_node->vd->regulator,
309                                                 &convert_mode);
310                 if (ret) {
311                         DVFS_ERR("%s: find mode=%d unsupported\n", __func__,
312                                 mode);
313                         kfree(clk_dvfs_node->regu_mode_table);
314                         clk_dvfs_node->regu_mode_table = NULL;
315                         return ret;
316                 }
317
318                 valid_mode = dvfs_regu_mode_deconvert(convert_mode);
319                 if (valid_mode != mode) {
320                         DVFS_ERR("%s: round mode=%d to valid mode=%d!\n",
321                                 __func__, mode, valid_mode);
322                         clk_dvfs_node->regu_mode_table[i].index = valid_mode;
323                 }
324
325         }
326
327         return 0;
328 }
329
330 static int clk_dvfs_node_get_regu_mode(struct dvfs_node *clk_dvfs_node,
331         unsigned long rate, unsigned int *mode)
332 {
333         int i;
334
335
336         if ((!clk_dvfs_node) || (!clk_dvfs_node->regu_mode_table))
337                 return -EINVAL;
338
339         for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
340                 if (rate >= clk_dvfs_node->regu_mode_table[i].frequency) {
341                         *mode = clk_dvfs_node->regu_mode_table[i].index;
342                         return 0;
343                 }
344         }
345
346         return -EINVAL;
347 }
348
349 static int dvfs_pd_get_newmode_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
350 {
351         unsigned int mode_max = 0;
352
353
354         if (clk_dvfs_node->regu_mode_en && (clk_dvfs_node->regu_mode >= pd->regu_mode)) {
355                 return clk_dvfs_node->regu_mode;
356         }
357
358         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
359                 if (clk_dvfs_node->regu_mode_en)
360                         mode_max = max(mode_max, (clk_dvfs_node->regu_mode));
361         }
362
363         return mode_max;
364 }
365
366 static void dvfs_update_clk_pds_mode(struct dvfs_node *clk_dvfs_node)
367 {
368         struct pd_node *pd;
369
370         if (!clk_dvfs_node)
371                 return;
372
373         pd = clk_dvfs_node->pd;
374         if (!pd)
375                 return;
376
377         pd->regu_mode = dvfs_pd_get_newmode_byclk(pd, clk_dvfs_node);
378 }
379
380 static int dvfs_vd_get_newmode_bypd(struct vd_node *vd)
381 {
382         unsigned int mode_max_vd = 0;
383         struct pd_node *pd;
384
385         if (!vd)
386                 return -EINVAL;
387
388         list_for_each_entry(pd, &vd->pd_list, node) {
389                 mode_max_vd = max(mode_max_vd, pd->regu_mode);
390         }
391
392         return mode_max_vd;
393 }
394
395 static int dvfs_vd_get_newmode_byclk(struct dvfs_node *clk_dvfs_node)
396 {
397         if (!clk_dvfs_node)
398                 return -EINVAL;
399
400         dvfs_update_clk_pds_mode(clk_dvfs_node);
401
402         return  dvfs_vd_get_newmode_bypd(clk_dvfs_node->vd);
403 }
404
405 static int dvfs_regu_set_mode(struct vd_node *vd, unsigned int mode)
406 {
407         int convert_mode;
408         int ret = 0;
409
410
411         if (IS_ERR_OR_NULL(vd)) {
412                 DVFS_ERR("%s: vd_node error\n", __func__);
413                 return -EINVAL;
414         }
415
416         DVFS_DBG("%s: mode=%d(old=%d)\n", __func__, mode, vd->regu_mode);
417
418         convert_mode = dvfs_regu_mode_convert(mode);
419         if (convert_mode < 0) {
420                 DVFS_ERR("%s: mode %d convert error\n", __func__, mode);
421                 return convert_mode;
422         }
423
424         if (!IS_ERR_OR_NULL(vd->regulator)) {
425                 ret = dvfs_regulator_set_mode(vd->regulator, convert_mode);
426                 if (ret < 0) {
427                         DVFS_ERR("%s: %s set mode %d (was %d) error!\n", __func__,
428                                 vd->regulator_name, mode, vd->regu_mode);
429                         return -EAGAIN;
430                 }
431         } else {
432                 DVFS_ERR("%s: invalid regulator\n", __func__);
433                 return -EINVAL;
434         }
435
436         vd->regu_mode = mode;
437
438         return 0;
439 }
440
441 static int dvfs_regu_mode_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
442 {
443         int ret;
444         int mode;
445
446
447         if (!clk_dvfs_node)
448                 return -EINVAL;
449
450         if (!clk_dvfs_node->regu_mode_en)
451                 return 0;
452
453         ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, rate, &mode);
454         if (ret) {
455                 DVFS_ERR("%s: clk(%s) rate %luhz get mode fail\n",
456                         __func__, clk_dvfs_node->name, rate);
457                 return ret;
458         }
459         clk_dvfs_node->regu_mode = mode;
460
461         mode = dvfs_vd_get_newmode_byclk(clk_dvfs_node);
462         if (mode < 0)
463                 return mode;
464
465         ret = dvfs_regu_set_mode(clk_dvfs_node->vd, mode);
466
467         return ret;
468 }
469
470 static void dvfs_volt_up_delay(struct vd_node *vd, int new_volt, int old_volt)
471 {
472         int u_time;
473         
474         if(new_volt <= old_volt)
475                 return;
476         if(vd->volt_time_flag > 0)      
477                 u_time = regulator_set_voltage_time(vd->regulator, old_volt, new_volt);
478         else
479                 u_time = -1;            
480         if(u_time < 0) {// regulator is not suported time,useing default time
481                 DVFS_DBG("%s:vd %s is not suported getting delay time,so we use default\n",
482                                 __func__, vd->name);
483                 u_time = ((new_volt) - (old_volt)) >> 9;
484         }
485         
486         DVFS_DBG("%s: vd %s volt %d to %d delay %d us\n", 
487                 __func__, vd->name, old_volt, new_volt, u_time);
488         
489         if (u_time >= 1000) {
490                 mdelay(u_time / 1000);
491                 udelay(u_time % 1000);
492                 DVFS_WARNING("%s: regulator set vol delay is larger 1ms,old is %d,new is %d\n",
493                         __func__, old_volt, new_volt);
494         } else if (u_time) {
495                 udelay(u_time);
496         }                       
497 }
498
499 static int dvfs_regulator_set_voltage_readback(struct regulator *regulator, int min_uV, int max_uV)
500 {
501         int ret = 0, read_back = 0;
502         
503         ret = dvfs_regulator_set_voltage(regulator, max_uV, max_uV);
504         if (ret < 0) {
505                 DVFS_ERR("%s: now read back to check voltage\n", __func__);
506
507                 /* read back to judge if it is already effect */
508                 mdelay(2);
509                 read_back = dvfs_regulator_get_voltage(regulator);
510                 if (read_back == max_uV) {
511                         DVFS_ERR("%s: set ERROR but already effected, volt=%d\n", __func__, read_back);
512                         ret = 0;
513                 } else {
514                         DVFS_ERR("%s: set ERROR AND NOT effected, volt=%d\n", __func__, read_back);
515                 }
516         }
517         
518         return ret;
519 }
520
521 static int dvfs_scale_volt_direct(struct vd_node *vd_clk, int volt_new)
522 {
523         int ret = 0;
524         
525         DVFS_DBG("%s: volt=%d(old=%d)\n", __func__, volt_new, vd_clk->cur_volt);
526         
527         if (IS_ERR_OR_NULL(vd_clk)) {
528                 DVFS_ERR("%s: vd_node error\n", __func__);
529                 return -EINVAL;
530         }
531
532         if (!IS_ERR_OR_NULL(vd_clk->regulator)) {
533                 ret = dvfs_regulator_set_voltage_readback(vd_clk->regulator, volt_new, volt_new);
534                 dvfs_volt_up_delay(vd_clk,volt_new, vd_clk->cur_volt);
535                 if (ret < 0) {
536                         vd_clk->volt_set_flag = DVFS_SET_VOLT_FAILURE;
537                         DVFS_ERR("%s: %s set voltage up err ret = %d, Vnew = %d(was %d)mV\n",
538                                         __func__, vd_clk->name, ret, volt_new, vd_clk->cur_volt);
539                         return -EAGAIN;
540                 }
541
542         } else {
543                 DVFS_ERR("%s: invalid regulator\n", __func__);
544                 return -EINVAL;
545         }
546
547         vd_clk->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
548         vd_clk->cur_volt = volt_new;
549
550         return 0;
551
552 }
553
554 static int dvfs_reset_volt(struct vd_node *dvfs_vd)
555 {
556         int flag_set_volt_correct = 0;
557         if (!IS_ERR_OR_NULL(dvfs_vd->regulator))
558                 flag_set_volt_correct = dvfs_regulator_get_voltage(dvfs_vd->regulator);
559         else {
560                 DVFS_ERR("%s: invalid regulator\n", __func__);
561                 return -EINVAL;
562         }
563         if (flag_set_volt_correct <= 0) {
564                 DVFS_ERR("%s (vd:%s), try to reload volt ,by it is error again(%d)!!! stop scaling\n",
565                                 __func__, dvfs_vd->name, flag_set_volt_correct);
566                 return -EAGAIN;
567         }
568         dvfs_vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
569         DVFS_WARNING("%s:vd(%s) try to reload volt = %d\n",
570                         __func__, dvfs_vd->name, flag_set_volt_correct);
571
572         /* Reset vd's voltage */
573         dvfs_vd->cur_volt = flag_set_volt_correct;
574
575         return dvfs_vd->cur_volt;
576 }
577
578
579 // for clk enable case to get vd regulator info
580 static void clk_enable_dvfs_regulator_check(struct vd_node *vd)
581 {
582         vd->cur_volt = dvfs_regulator_get_voltage(vd->regulator);
583         if(vd->cur_volt <= 0){
584                 vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
585         }
586         vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
587 }
588
589 static void dvfs_get_vd_regulator_volt_list(struct vd_node *vd)
590 {
591         unsigned int i, selector = dvfs_regulator_count_voltages(vd->regulator);
592         int n = 0, sel_volt = 0;
593         
594         if(selector > VD_VOL_LIST_CNT)
595                 selector = VD_VOL_LIST_CNT;
596
597         for (i = 0; i < selector; i++) {
598                 sel_volt = dvfs_regulator_list_voltage(vd->regulator, i);
599                 if(sel_volt <= 0){      
600                         //DVFS_WARNING("%s: vd(%s) list volt selector=%u, but volt(%d) <=0\n",
601                         //      __func__, vd->name, i, sel_volt);
602                         continue;
603                 }
604                 vd->volt_list[n++] = sel_volt;  
605                 DVFS_DBG("%s: vd(%s) list volt selector=%u, n=%d, volt=%d\n", 
606                         __func__, vd->name, i, n, sel_volt);
607         }
608         
609         vd->n_voltages = n;
610 }
611
612 // >= volt
613 static int vd_regulator_round_volt_max(struct vd_node *vd, int volt)
614 {
615         int sel_volt;
616         int i;
617         
618         for (i = 0; i < vd->n_voltages; i++) {
619                 sel_volt = vd->volt_list[i];
620                 if(sel_volt <= 0){      
621                         DVFS_WARNING("%s: selector=%u, but volt <=0\n", 
622                                 __func__, i);
623                         continue;
624                 }
625                 if(sel_volt >= volt)
626                         return sel_volt;        
627         }
628         return -EINVAL;
629 }
630
631 // >=volt
632 static int vd_regulator_round_volt_min(struct vd_node *vd, int volt)
633 {
634         int sel_volt;
635         int i;
636         
637         for (i = 0; i < vd->n_voltages; i++) {
638                 sel_volt = vd->volt_list[i];
639                 if(sel_volt <= 0){      
640                         DVFS_WARNING("%s: selector=%u, but volt <=0\n", 
641                                 __func__, i);
642                         continue;
643                 }
644                 if(sel_volt > volt){
645                         if(i > 0)
646                                 return vd->volt_list[i-1];
647                         else
648                                 return -EINVAL;
649                 }       
650         }
651         
652         return -EINVAL;
653 }
654
655 // >=volt
656 static int vd_regulator_round_volt(struct vd_node *vd, int volt, int flags)
657 {
658         if(!vd->n_voltages)
659                 return -EINVAL;
660         if(flags == VD_LIST_RELATION_L)
661                 return vd_regulator_round_volt_min(vd, volt);
662         else
663                 return vd_regulator_round_volt_max(vd, volt);   
664 }
665
666 static void dvfs_table_round_volt(struct dvfs_node *clk_dvfs_node)
667 {
668         int i, test_volt;
669
670         if(!clk_dvfs_node->dvfs_table || !clk_dvfs_node->vd || 
671                 IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator))
672                 return;
673
674         for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
675
676                 test_volt = vd_regulator_round_volt(clk_dvfs_node->vd, clk_dvfs_node->dvfs_table[i].index, VD_LIST_RELATION_H);
677                 if(test_volt <= 0)
678                 {       
679                         DVFS_WARNING("%s: clk(%s) round volt(%d) but list <=0\n",
680                                 __func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].index);
681                         break;
682                 }
683                 DVFS_DBG("clk %s:round_volt %d to %d\n",
684                         clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].index, test_volt);
685                 
686                 clk_dvfs_node->dvfs_table[i].index=test_volt;           
687         }
688 }
689
690 static void dvfs_vd_get_regulator_volt_time_info(struct vd_node *vd)
691 {
692         if(vd->volt_time_flag <= 0){// check regulator support get uping vol timer
693                 vd->volt_time_flag = dvfs_regulator_set_voltage_time(vd->regulator, vd->cur_volt, vd->cur_volt+200*1000);
694                 if(vd->volt_time_flag < 0){
695                         DVFS_DBG("%s,vd %s volt_time is no support\n",
696                                 __func__, vd->name);
697                 }
698                 else{
699                         DVFS_DBG("%s,vd %s volt_time is support,up 200mv need delay %d us\n",
700                                 __func__, vd->name, vd->volt_time_flag);
701                 }       
702         }
703 }
704 #if 0
705 static void dvfs_vd_get_regulator_mode_info(struct vd_node *vd)
706 {
707         //REGULATOR_MODE_FAST
708         if(vd->mode_flag <= 0){// check regulator support get uping vol timer{
709                 vd->mode_flag = dvfs_regulator_get_mode(vd->regulator);
710                 if(vd->mode_flag==REGULATOR_MODE_FAST || vd->mode_flag==REGULATOR_MODE_NORMAL
711                         || vd->mode_flag == REGULATOR_MODE_IDLE || vd->mode_flag==REGULATOR_MODE_STANDBY){
712                         
713                         if(dvfs_regulator_set_mode(vd->regulator, vd->mode_flag) < 0){
714                                 vd->mode_flag = 0;// check again
715                         }
716                 }
717                 if(vd->mode_flag > 0){
718                         DVFS_DBG("%s,vd %s mode(now is %d) support\n",
719                                 __func__, vd->name, vd->mode_flag);
720                 }
721                 else{
722                         DVFS_DBG("%s,vd %s mode is not support now check\n",
723                                 __func__, vd->name);
724                 }
725         }
726 }
727 #endif
728
729 struct regulator *dvfs_get_regulator(char *regulator_name) 
730 {
731         struct vd_node *vd;
732
733         mutex_lock(&rk_dvfs_mutex);
734         list_for_each_entry(vd, &rk_dvfs_tree, node) {
735                 if (strcmp(regulator_name, vd->regulator_name) == 0) {
736                         mutex_unlock(&rk_dvfs_mutex);
737                         return vd->regulator;
738                 }
739         }
740         mutex_unlock(&rk_dvfs_mutex);
741         return NULL;
742 }
743
744 static int dvfs_get_rate_range(struct dvfs_node *clk_dvfs_node)
745 {
746         struct cpufreq_frequency_table *table;
747         int i = 0;
748
749         if (!clk_dvfs_node)
750                 return -EINVAL;
751
752         clk_dvfs_node->min_rate = 0;
753         clk_dvfs_node->max_rate = 0;
754
755         table = clk_dvfs_node->dvfs_table;
756         for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
757                 clk_dvfs_node->max_rate = table[i].frequency / 1000 * 1000 * 1000;
758                 if (i == 0)
759                         clk_dvfs_node->min_rate = table[i].frequency / 1000 * 1000 * 1000;
760         }
761
762         DVFS_DBG("%s: clk %s, limit rate [min, max] = [%u, %u]\n",
763                         __func__, clk_dvfs_node->name, clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
764
765         return 0;
766 }
767
768 static void dvfs_table_round_clk_rate(struct dvfs_node  *clk_dvfs_node)
769 {
770         int i, rate, temp_rate, flags;
771         
772         if(!clk_dvfs_node || !clk_dvfs_node->dvfs_table || !clk_dvfs_node->clk)
773                 return;
774
775         for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
776                 //ddr rate = real rate+flags
777                 flags = clk_dvfs_node->dvfs_table[i].frequency%1000;
778                 rate = (clk_dvfs_node->dvfs_table[i].frequency/1000)*1000;
779                 temp_rate = __clk_round_rate(clk_dvfs_node->clk, rate*1000);
780                 if(temp_rate <= 0){     
781                         DVFS_WARNING("%s: clk(%s) rate %d round return %d\n",
782                                 __func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
783                         continue;
784                 }
785                 
786                 /* Set rate unit as MHZ */
787                 if (temp_rate % MHz != 0)
788                         temp_rate = (temp_rate / MHz + 1) * MHz;
789
790                 temp_rate = (temp_rate / 1000) + flags;
791                 
792                 DVFS_DBG("clk %s round_clk_rate %d to %d\n",
793                         clk_dvfs_node->name,clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
794                 
795                 clk_dvfs_node->dvfs_table[i].frequency = temp_rate;             
796         }
797 }
798
799 static int clk_dvfs_node_get_ref_volt(struct dvfs_node *clk_dvfs_node, int rate_khz,
800                 struct cpufreq_frequency_table *clk_fv)
801 {
802         int i = 0;
803         
804         if (rate_khz == 0 || !clk_dvfs_node || !clk_dvfs_node->dvfs_table) {
805                 /* since no need */
806                 return -EINVAL;
807         }
808         clk_fv->frequency = rate_khz;
809         clk_fv->index = 0;
810
811         for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
812                 if (clk_dvfs_node->dvfs_table[i].frequency >= rate_khz) {
813                         clk_fv->frequency = clk_dvfs_node->dvfs_table[i].frequency;
814                         clk_fv->index = clk_dvfs_node->dvfs_table[i].index;
815                          //printk("%s,%s rate=%ukhz(vol=%d)\n",__func__,clk_dvfs_node->name,
816                          //clk_fv->frequency, clk_fv->index);
817                         return 0;
818                 }
819         }
820         clk_fv->frequency = 0;
821         clk_fv->index = 0;
822         //DVFS_DBG("%s get corresponding voltage error! out of bound\n", clk_dvfs_node->name);
823         return -EINVAL;
824 }
825
826 static int dvfs_pd_get_newvolt_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
827 {
828         int volt_max = 0;
829
830         if (clk_dvfs_node->enable_count && (clk_dvfs_node->set_volt >= pd->cur_volt)) {
831                 return clk_dvfs_node->set_volt;
832         }
833
834         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
835                 if (clk_dvfs_node->enable_count)
836                         volt_max = max(volt_max, clk_dvfs_node->set_volt);
837         }
838         return volt_max;
839 }
840
841 static void dvfs_update_clk_pds_volt(struct dvfs_node *clk_dvfs_node)
842 {
843         struct pd_node *pd;
844         
845         if (!clk_dvfs_node)
846                 return;
847         
848         pd = clk_dvfs_node->pd;
849         if (!pd)
850                 return;
851         
852         pd->cur_volt = dvfs_pd_get_newvolt_byclk(pd, clk_dvfs_node);
853 }
854
855 static int dvfs_vd_get_newvolt_bypd(struct vd_node *vd)
856 {
857         int volt_max_vd = 0;
858         struct pd_node *pd;
859
860         if (!vd)
861                 return -EINVAL;
862         
863         list_for_each_entry(pd, &vd->pd_list, node) {
864                 volt_max_vd = max(volt_max_vd, pd->cur_volt);
865         }
866
867         return volt_max_vd;
868 }
869
870 static int dvfs_vd_get_newvolt_byclk(struct dvfs_node *clk_dvfs_node)
871 {
872         if (!clk_dvfs_node)
873                 return -EINVAL;
874
875         dvfs_update_clk_pds_volt(clk_dvfs_node);
876         return  dvfs_vd_get_newvolt_bypd(clk_dvfs_node->vd);
877 }
878
879 #if 0
880 static void dvfs_temp_limit_work_func(struct work_struct *work)
881 {
882         unsigned long delay = HZ / 10; // 100ms
883         struct vd_node *vd;
884         struct pd_node *pd;
885         struct dvfs_node *clk_dvfs_node;
886
887         queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
888
889         mutex_lock(&rk_dvfs_mutex);
890         list_for_each_entry(vd, &rk_dvfs_tree, node) {
891                 mutex_lock(&vd->mutex);
892                 list_for_each_entry(pd, &vd->pd_list, node) {
893                         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
894                                 if (clk_dvfs_node->temp_limit_table) {
895                                         clk_dvfs_node->temp = rockchip_tsadc_get_temp(clk_dvfs_node->temp_channel);
896                                         clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
897                                 }
898                         }
899                 }
900                 mutex_unlock(&vd->mutex);
901         }
902         mutex_unlock(&rk_dvfs_mutex);
903 }
904 #endif
905
906 static struct cpufreq_frequency_table rk3288v0_arm_pvtm_table[] = {
907         {.frequency = 216000,  .index = 4006},
908         {.frequency = 408000,  .index = 6518},
909         {.frequency = 600000,  .index = 8345},
910         {.frequency = 816000,  .index = 11026},
911         {.frequency = 1008000,  .index = 12906},
912         {.frequency = 1200000,  .index = 15532},
913         {.frequency = 1416000,  .index = 18076},
914         {.frequency = 1608000,  .index = 21282},
915         {.frequency = CPUFREQ_TABLE_END, .index = 1},
916 };
917
918 static struct pvtm_info rk3288v0_arm_pvtm_info = {
919         .compatible = "rockchip,rk3288",
920         .pvtm_table = rk3288v0_arm_pvtm_table,
921         .channel = ARM_DVFS_CH,
922         .process_version = RK3288_PROCESS_V0,
923         .scan_rate_hz = 216000000,
924         .sample_time_us = 1000,
925         .volt_step_uv = 12500,
926         .delta_pvtm_by_volt = 400,
927         .delta_pvtm_by_temp = 14,
928         .volt_margin_uv = 25000,
929         .min_volt_uv = 850000,
930         .max_volt_uv = 1400000,
931         .cluster = 0,
932 };
933
934 static struct cpufreq_frequency_table rk3288v1_arm_pvtm_table[] = {
935         {.frequency = 216000,  .index = 4710},
936         {.frequency = 408000,  .index = 7200},
937         {.frequency = 600000,  .index = 9192},
938         {.frequency = 816000,  .index = 12560},
939         {.frequency = 1008000,  .index = 14741},
940         {.frequency = 1200000,  .index = 16886},
941         {.frequency = 1416000,  .index = 20081},
942         {.frequency = 1608000,  .index = 24061},
943         {.frequency = CPUFREQ_TABLE_END, .index = 1},
944 };
945
946 static struct pvtm_info rk3288v1_arm_pvtm_info = {
947         .compatible = "rockchip,rk3288",
948         .pvtm_table = rk3288v1_arm_pvtm_table,
949         .channel = ARM_DVFS_CH,
950         .process_version = RK3288_PROCESS_V1,
951         .scan_rate_hz = 216000000,
952         .sample_time_us = 1000,
953         .volt_step_uv = 12500,
954         .delta_pvtm_by_volt = 450,
955         .delta_pvtm_by_temp = 7,
956         .volt_margin_uv = 25000,
957         .min_volt_uv = 850000,
958         .max_volt_uv = 1400000,
959         .cluster = 0,
960 };
961
962 static struct cpufreq_frequency_table rk3288v2_arm_pvtm_table[] = {
963         {.frequency = 216000,  .index = 5369},
964         {.frequency = 408000,  .index = 6984},
965         {.frequency = 600000,  .index = 8771},
966         {.frequency = 816000,  .index = 11434},
967         {.frequency = 1008000,  .index = 14178},
968         {.frequency = 1200000,  .index = 16797},
969         {.frequency = 1416000,  .index = 20178},
970         {.frequency = 1608000,  .index = 23303},
971         {.frequency = CPUFREQ_TABLE_END, .index = 1},
972 };
973
974 static struct pvtm_info rk3288v2_arm_pvtm_info = {
975         .compatible = "rockchip,rk3288",
976         .pvtm_table = rk3288v2_arm_pvtm_table,
977         .channel = ARM_DVFS_CH,
978         .process_version = RK3288_PROCESS_V2,
979         .scan_rate_hz = 216000000,
980         .sample_time_us = 1000,
981         .volt_step_uv = 12500,
982         .delta_pvtm_by_volt = 430,
983         .delta_pvtm_by_temp = 12,
984         .volt_margin_uv = 25000,
985         .min_volt_uv = 900000,
986         .max_volt_uv = 1400000,
987         .cluster = 0,
988 };
989
990 static struct cpufreq_frequency_table rk3368v0_arm_b_pvtm_table[] = {
991         {.frequency = 216000,  .index = 9891},
992         {.frequency = 312000,  .index = 9891},
993         {.frequency = 408000,  .index = 9891},
994         {.frequency = 600000,  .index = 9891},
995         {.frequency = 696000,  .index = 10115},
996         {.frequency = 816000,  .index = 11014},
997         {.frequency = 1008000,  .index = 13650},
998         {.frequency = 1200000,  .index = 16520},
999         {.frequency = 1296000,  .index = 17856},
1000         {.frequency = 1416000,  .index = 19662},
1001         {.frequency = 1512000,  .index = 21069},
1002         {.frequency = CPUFREQ_TABLE_END, .index = 1},
1003 };
1004
1005 static struct pvtm_info rk3368v0_arm_b_pvtm_info = {
1006         .compatible = "rockchip,rk3368",
1007         .pvtm_table = rk3368v0_arm_b_pvtm_table,
1008         .channel = ARM_DVFS_CH,
1009         .process_version = 0,
1010         .scan_rate_hz = 216000000,
1011         .sample_time_us = 1000,
1012         .volt_step_uv = 12500,
1013         .delta_pvtm_by_volt = 350,
1014         .delta_pvtm_by_temp = 12,
1015         .volt_margin_uv = 50000,
1016         .min_volt_uv = 925000,
1017         .max_volt_uv = 1375000,
1018         .cluster = 0,
1019 };
1020
1021 static struct cpufreq_frequency_table rk3368v0_arm_l_pvtm_table[] = {
1022         {.frequency = 216000,  .index = 9913},
1023         {.frequency = 312000,  .index = 9913},
1024         {.frequency = 408000,  .index = 9913},
1025         {.frequency = 600000,  .index = 9913},
1026         {.frequency = 696000,  .index = 11056},
1027         {.frequency = 816000,  .index = 12816},
1028         {.frequency = 1008000,  .index = 15613},
1029         {.frequency = 1200000,  .index = 18329},
1030         {.frequency = CPUFREQ_TABLE_END, .index = 1},
1031 };
1032
1033 static struct pvtm_info rk3368v0_arm_l_pvtm_info = {
1034         .compatible = "rockchip,rk3368",
1035         .pvtm_table = rk3368v0_arm_l_pvtm_table,
1036         .channel = ARM_DVFS_CH,
1037         .process_version = 0,
1038         .scan_rate_hz = 216000000,
1039         .sample_time_us = 1000,
1040         .volt_step_uv = 12500,
1041         .delta_pvtm_by_volt = 350,
1042         .delta_pvtm_by_temp = 12,
1043         .volt_margin_uv = 50000,
1044         .min_volt_uv = 925000,
1045         .max_volt_uv = 1375000,
1046         .cluster = 1,
1047 };
1048
1049
1050 static struct pvtm_info *pvtm_info_table[] = {
1051         &rk3288v0_arm_pvtm_info,
1052         &rk3288v1_arm_pvtm_info,
1053         &rk3288v2_arm_pvtm_info,
1054         &rk3368v0_arm_b_pvtm_info,
1055         &rk3368v0_arm_l_pvtm_info
1056 };
1057
1058 static int pvtm_set_single_dvfs(struct dvfs_node *dvfs_node, u32 idx,
1059                                 struct pvtm_info *info, int *pvtm_list,
1060                                 u32 min_pvtm)
1061 {
1062         struct cpufreq_frequency_table *dvfs_table = dvfs_node->dvfs_table;
1063         struct cpufreq_frequency_table *pvtm_table = dvfs_node->pvtm_table;
1064         int target_pvtm, pvtm_margin, volt_margin;
1065         unsigned int n_voltages = dvfs_node->vd->n_voltages;
1066         int *volt_list = dvfs_node->vd->volt_list;
1067         int n, temp;
1068
1069         volt_margin = info->volt_margin_uv + pvtm_table[idx].index;
1070         n = volt_margin/info->volt_step_uv;
1071         if (volt_margin%info->volt_step_uv)
1072                 n++;
1073
1074         pvtm_margin = n*info->delta_pvtm_by_volt;
1075         if (cpu_is_rk3288())
1076                 temp = pvtm_get_temp(dvfs_node, 1);
1077         else
1078                 temp = pvtm_get_temp(dvfs_node, 0);
1079
1080         if (temp < dvfs_node->pvtm_min_temp || temp == INVALID_TEMP)
1081                 temp = dvfs_node->pvtm_min_temp;
1082
1083         target_pvtm = min_pvtm+temp * info->delta_pvtm_by_temp + pvtm_margin;
1084
1085         DVFS_DBG("=====%s: temp:%d, freq:%d, target pvtm:%d=====\n",
1086                  __func__, temp, dvfs_table[idx].frequency, target_pvtm);
1087
1088         for (n = 0; n < n_voltages; n++) {
1089                 if (pvtm_list[n] >= target_pvtm) {
1090                         dvfs_table[idx].index = volt_list[n];
1091                         DVFS_DBG("freq[%d]=%d, volt=%d\n",
1092                                  idx, dvfs_table[idx].frequency, volt_list[n]);
1093
1094                         return 0;
1095                 }
1096         }
1097
1098         return -EINVAL;
1099
1100         return 0;
1101 }
1102
1103 static void pvtm_set_dvfs_table(struct dvfs_node *dvfs_node)
1104 {
1105         struct cpufreq_frequency_table *dvfs_table = dvfs_node->dvfs_table;
1106         struct pvtm_info *info = dvfs_node->pvtm_info;
1107         struct regulator *regulator = dvfs_node->vd->regulator;
1108         int i, j;
1109         int ret = 0;
1110         int pvtm_list[VD_VOL_LIST_CNT] = {0};
1111         unsigned int n_voltages = dvfs_node->vd->n_voltages;
1112         int *volt_list = dvfs_node->vd->volt_list;
1113
1114         if (!info)
1115                 return;
1116
1117         clk_set_rate(dvfs_node->clk, info->scan_rate_hz);
1118         DVFS_DBG("%s:%lu\n", __func__, clk_get_rate(dvfs_node->clk));
1119
1120         for (i = 0; i < n_voltages; i++) {
1121                 if ((volt_list[i] >= info->min_volt_uv) &&
1122                     (volt_list[i] <= info->max_volt_uv)) {
1123                         regulator_set_voltage(regulator, volt_list[i],
1124                                               volt_list[i]);
1125                         pvtm_list[i] = pvtm_get_value(info->channel,
1126                                                       info->sample_time_us);
1127                 }
1128         }
1129
1130         for (i = 0; dvfs_table[i].frequency != CPUFREQ_TABLE_END; i++) {
1131                 for (j = 0; info->pvtm_table[j].frequency !=
1132                      CPUFREQ_TABLE_END; j++)
1133                         if (info->pvtm_table[j].frequency >=
1134                             dvfs_table[i].frequency) {
1135                                 int min_pvtm = info->pvtm_table[j].index;
1136
1137                                 ret = pvtm_set_single_dvfs(dvfs_node,
1138                                                            i,
1139                                                            info,
1140                                                            pvtm_list,
1141                                                            min_pvtm);
1142                                 break;
1143                         }
1144
1145                 if (ret) {
1146                         dvfs_node->max_limit_freq =
1147                                 dvfs_table[i-1].frequency * 1000;
1148                         DVFS_WARNING("freq: %d can not reach target pvtm\n",
1149                                      dvfs_table[i].frequency);
1150                         DVFS_WARNING("max freq: %d\n",
1151                                      dvfs_node->max_limit_freq);
1152                         break;
1153                 }
1154
1155                 if (info->pvtm_table[j].frequency == CPUFREQ_TABLE_END) {
1156                         DVFS_WARNING("not support freq :%d, max freq is %d\n",
1157                                      dvfs_table[i].frequency,
1158                                      info->pvtm_table[j-1].frequency);
1159                         break;
1160                 }
1161         }
1162 }
1163
1164 static void dvfs_virt_temp_limit_work_func(struct dvfs_node *dvfs_node)
1165 {
1166         const struct cpufreq_frequency_table *limits_table = NULL;
1167         unsigned int new_temp_limit_rate = -1;
1168         unsigned int nr_cpus = num_online_cpus();
1169         static bool in_perf;
1170         int i;
1171
1172         if (!cpu_is_rk312x())
1173                 return;
1174
1175         if (rockchip_get_system_status() & SYS_STATUS_PERFORMANCE) {
1176                 in_perf = true;
1177         } else if (in_perf) {
1178                 in_perf = false;
1179         } else {
1180                 static u64 last_time_in_idle;
1181                 static u64 last_time_in_idle_timestamp;
1182                 u64 time_in_idle = 0, now;
1183                 u32 delta_idle;
1184                 u32 delta_time;
1185                 unsigned cpu, busy_cpus;
1186
1187                 for_each_online_cpu(cpu) {
1188                         time_in_idle += get_cpu_idle_time_us(cpu, &now);
1189                 }
1190                 delta_time = now - last_time_in_idle_timestamp;
1191                 delta_idle = time_in_idle - last_time_in_idle;
1192                 last_time_in_idle = time_in_idle;
1193                 last_time_in_idle_timestamp = now;
1194                 delta_idle += delta_time >> 4; /* +6.25% */
1195                 if (delta_idle > (nr_cpus - 1)
1196                     * delta_time && delta_idle < (nr_cpus + 1) * delta_time)
1197                         busy_cpus = 1;
1198                 else if (delta_idle > (nr_cpus - 2) * delta_time)
1199                         busy_cpus = 2;
1200                 else if (delta_idle > (nr_cpus - 3) * delta_time)
1201                         busy_cpus = 3;
1202                 else
1203                         busy_cpus = 4;
1204
1205                 limits_table = dvfs_node->virt_temp_limit_table[busy_cpus-1];
1206                 DVFS_DBG("delta time %6u us idle %6u us %u cpus select table %d\n",
1207                          delta_time, delta_idle, nr_cpus, busy_cpus);
1208         }
1209
1210         if (limits_table) {
1211                 new_temp_limit_rate = limits_table[0].frequency;
1212                 for (i = 0; limits_table[i].frequency != CPUFREQ_TABLE_END; i++) {
1213                         if (dvfs_node->target_temp >=
1214                                 limits_table[i].index)
1215                                 new_temp_limit_rate = limits_table[i].frequency;
1216                 }
1217         }
1218
1219         if (dvfs_node->temp_limit_rate != new_temp_limit_rate) {
1220                 dvfs_node->temp_limit_rate = new_temp_limit_rate;
1221                 dvfs_clk_set_rate(dvfs_node, dvfs_node->last_set_rate);
1222                 DVFS_DBG("temp_limit_rate:%d\n",
1223                          (int)dvfs_node->temp_limit_rate);
1224         }
1225 }
1226
1227 static void dvfs_temp_limit_performance(struct dvfs_node *dvfs_node, int temp)
1228 {
1229         int i;
1230
1231         dvfs_node->temp_limit_rate = dvfs_node->max_rate;
1232         for (i = 0; dvfs_node->per_temp_limit_table[i].frequency !=
1233                 CPUFREQ_TABLE_END; i++) {
1234                 if (temp > dvfs_node->per_temp_limit_table[i].index)
1235                         dvfs_node->temp_limit_rate =
1236                         dvfs_node->per_temp_limit_table[i].frequency;
1237         }
1238         dvfs_clk_set_rate(dvfs_node, dvfs_node->last_set_rate);
1239 }
1240
1241 static void dvfs_temp_limit_normal(struct dvfs_node *dvfs_node, int temp)
1242 {
1243         int delta_temp = 0;
1244         unsigned long arm_rate_step = 0;
1245         int i;
1246
1247         if (temp > dvfs_node->target_temp) {
1248                 if (temp > dvfs_node->old_temp) {
1249                         delta_temp = temp - dvfs_node->target_temp;
1250                         for (i = 0;
1251                         dvfs_node->nor_temp_limit_table[i].frequency !=
1252                                 CPUFREQ_TABLE_END; i++) {
1253                                 if (delta_temp >
1254                                 dvfs_node->nor_temp_limit_table[i].index)
1255                                         arm_rate_step =
1256                                 dvfs_node->nor_temp_limit_table[i].frequency;
1257                         }
1258                         if (arm_rate_step &&
1259                             (dvfs_node->temp_limit_rate > arm_rate_step)) {
1260                                 dvfs_node->temp_limit_rate -= arm_rate_step;
1261                                 if (dvfs_node->temp_limit_rate <
1262                                         dvfs_node->min_temp_limit)
1263                                         dvfs_node->temp_limit_rate =
1264                                         dvfs_node->min_temp_limit;
1265                                 dvfs_clk_set_rate(dvfs_node,
1266                                                   dvfs_node->last_set_rate);
1267                         }
1268                 }
1269         } else {
1270                 if (dvfs_node->temp_limit_rate < dvfs_node->max_rate) {
1271                         delta_temp = dvfs_node->target_temp - temp;
1272                         for (i = 0;
1273                         dvfs_node->nor_temp_limit_table[i].frequency !=
1274                                 CPUFREQ_TABLE_END; i++) {
1275                                 if (delta_temp >
1276                                 dvfs_node->nor_temp_limit_table[i].index)
1277                                         arm_rate_step =
1278                                 dvfs_node->nor_temp_limit_table[i].frequency;
1279                         }
1280
1281                         if (arm_rate_step) {
1282                                 dvfs_node->temp_limit_rate += arm_rate_step;
1283                                 if (dvfs_node->temp_limit_rate >
1284                                         dvfs_node->max_rate)
1285                                         dvfs_node->temp_limit_rate =
1286                                         dvfs_node->max_rate;
1287                                 dvfs_clk_set_rate(dvfs_node,
1288                                                   dvfs_node->last_set_rate);
1289                         }
1290                 }
1291         }
1292 }
1293
1294 static void dvfs_temp_limit(struct dvfs_node *dvfs_node, int temp)
1295 {
1296         int delta_temp = 0;
1297
1298         //debounce
1299         delta_temp = (dvfs_node->old_temp > temp) ? (dvfs_node->old_temp-temp) :
1300         (temp-dvfs_node->old_temp);
1301         if (delta_temp <= 1)
1302                 return;
1303
1304         if (ROCKCHIP_PM_POLICY_PERFORMANCE == rockchip_pm_get_policy()) {
1305                 if (!dvfs_node->per_temp_limit_table)
1306                         return;
1307                 dvfs_temp_limit_performance(dvfs_node, temp);
1308         } else if (ROCKCHIP_PM_POLICY_NORMAL == rockchip_pm_get_policy()){
1309                 if (!dvfs_node->nor_temp_limit_table)
1310                         return;
1311                 dvfs_temp_limit_normal(dvfs_node, temp);
1312         }
1313         dvfs_node->old_temp = temp;
1314         DVFS_DBG("cur temp: %d, temp_limit_core_rate: %lu\n",
1315                  temp, dvfs_node->temp_limit_rate);
1316 }
1317
1318 static void dvfs_temp_limit_work_func(struct work_struct *work)
1319 {
1320         unsigned long delay = HZ/10;
1321         int temp = INVALID_TEMP;
1322
1323         queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
1324
1325         mutex_lock(&temp_limit_mutex);
1326         if (clk_cpu_b_dvfs_node &&
1327             clk_cpu_b_dvfs_node->temp_limit_enable == 1) {
1328                 temp = dvfs_get_temp(0);
1329                 if (temp != INVALID_TEMP)
1330                         dvfs_temp_limit(clk_cpu_b_dvfs_node, temp);
1331         }
1332         if (clk_cpu_l_dvfs_node &&
1333             clk_cpu_l_dvfs_node->temp_limit_enable == 1) {
1334                 if (temp == INVALID_TEMP)
1335                         temp = dvfs_get_temp(0);
1336                 if (temp != INVALID_TEMP)
1337                         dvfs_temp_limit(clk_cpu_l_dvfs_node, temp);
1338         }
1339         if (clk_cpu_dvfs_node &&
1340             clk_cpu_dvfs_node->temp_limit_enable == 1) {
1341                 temp = dvfs_get_temp(1);
1342                 if (temp == INVALID_TEMP)
1343                         dvfs_virt_temp_limit_work_func(clk_cpu_dvfs_node);
1344                 else
1345                         dvfs_temp_limit(clk_cpu_dvfs_node, temp);
1346         }
1347         if (clk_gpu_dvfs_node &&
1348             clk_gpu_dvfs_node->temp_limit_enable == 1) {
1349                 temp = dvfs_get_temp(2);
1350                 if (temp != INVALID_TEMP)
1351                         dvfs_temp_limit(clk_gpu_dvfs_node, temp);
1352         }
1353         mutex_unlock(&temp_limit_mutex);
1354 }
1355 static DECLARE_DELAYED_WORK(dvfs_temp_limit_work, dvfs_temp_limit_work_func);
1356
1357 int dvfs_clk_enable_limit(struct dvfs_node *clk_dvfs_node, unsigned int min_rate, unsigned int max_rate)
1358 {
1359         u32 rate = 0, ret = 0;
1360
1361         if (!clk_dvfs_node || (min_rate > max_rate))
1362                 return -EINVAL;
1363         
1364         if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
1365                 mutex_lock(&clk_dvfs_node->vd->mutex);
1366                 
1367                 /* To reset clk_dvfs_node->min_rate/max_rate */
1368                 dvfs_get_rate_range(clk_dvfs_node);
1369                 clk_dvfs_node->freq_limit_en = 1;
1370
1371                 if ((min_rate >= clk_dvfs_node->min_rate) && (min_rate <= clk_dvfs_node->max_rate)) {
1372                         clk_dvfs_node->min_rate = min_rate;
1373                 }
1374                 
1375                 if ((max_rate >= clk_dvfs_node->min_rate) && (max_rate <= clk_dvfs_node->max_rate)) {
1376                         clk_dvfs_node->max_rate = max_rate;
1377                 }
1378
1379                 if (clk_dvfs_node->last_set_rate == 0)
1380                         rate = __clk_get_rate(clk_dvfs_node->clk);
1381                 else
1382                         rate = clk_dvfs_node->last_set_rate;
1383                 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
1384
1385                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1386
1387         }
1388
1389         DVFS_DBG("%s:clk(%s) last_set_rate=%lu; [min_rate, max_rate]=[%u, %u]\n",
1390                  __func__, __clk_get_name(clk_dvfs_node->clk),
1391                  clk_dvfs_node->last_set_rate,
1392                  clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
1393
1394         return 0;
1395 }
1396 EXPORT_SYMBOL(dvfs_clk_enable_limit);
1397
1398 int dvfs_clk_disable_limit(struct dvfs_node *clk_dvfs_node)
1399 {
1400         u32 ret = 0;
1401
1402         if (!clk_dvfs_node)
1403                 return -EINVAL;
1404         
1405         if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
1406                 mutex_lock(&clk_dvfs_node->vd->mutex);
1407                 
1408                 /* To reset clk_dvfs_node->min_rate/max_rate */
1409                 dvfs_get_rate_range(clk_dvfs_node);
1410                 clk_dvfs_node->freq_limit_en = 0;
1411                 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
1412
1413                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1414         }
1415
1416         DVFS_DBG("%s: clk(%s) last_set_rate=%lu; [min_rate, max_rate]=[%u, %u]\n",
1417                  __func__, __clk_get_name(clk_dvfs_node->clk),
1418                  clk_dvfs_node->last_set_rate,
1419                  clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
1420
1421         return 0;
1422 }
1423 EXPORT_SYMBOL(dvfs_clk_disable_limit);
1424
1425 void dvfs_disable_temp_limit(void) {
1426         if (clk_cpu_b_dvfs_node)
1427                 clk_cpu_b_dvfs_node->temp_limit_enable = 0;
1428         if (clk_cpu_l_dvfs_node)
1429                 clk_cpu_l_dvfs_node->temp_limit_enable = 0;
1430         if (clk_cpu_dvfs_node)
1431                 clk_cpu_dvfs_node->temp_limit_enable = 0;
1432         if (clk_gpu_dvfs_node)
1433                 clk_gpu_dvfs_node->temp_limit_enable = 0;
1434         cancel_delayed_work_sync(&dvfs_temp_limit_work);
1435 }
1436
1437 int dvfs_clk_get_limit(struct dvfs_node *clk_dvfs_node, unsigned int *min_rate, unsigned int *max_rate) 
1438 {
1439         int freq_limit_en;
1440
1441         if (!clk_dvfs_node)
1442                 return -EINVAL;
1443
1444         mutex_lock(&clk_dvfs_node->vd->mutex);
1445
1446         *min_rate = clk_dvfs_node->min_rate;
1447         *max_rate = clk_dvfs_node->max_rate;
1448         freq_limit_en = clk_dvfs_node->freq_limit_en;
1449
1450         mutex_unlock(&clk_dvfs_node->vd->mutex);
1451
1452         return freq_limit_en;
1453 }
1454 EXPORT_SYMBOL(dvfs_clk_get_limit);
1455
1456 int dvfs_clk_register_set_rate_callback(struct dvfs_node *clk_dvfs_node, clk_set_rate_callback clk_dvfs_target)
1457 {
1458         if (!clk_dvfs_node)
1459                 return -EINVAL;
1460                         
1461         mutex_lock(&clk_dvfs_node->vd->mutex);
1462         clk_dvfs_node->clk_dvfs_target = clk_dvfs_target;
1463         mutex_unlock(&clk_dvfs_node->vd->mutex);
1464
1465         return 0;
1466 }
1467 EXPORT_SYMBOL(dvfs_clk_register_set_rate_callback);
1468
1469 struct cpufreq_frequency_table *dvfs_get_freq_volt_table(struct dvfs_node *clk_dvfs_node) 
1470 {
1471         struct cpufreq_frequency_table *table;
1472
1473         if (!clk_dvfs_node)
1474                 return NULL;
1475
1476         mutex_lock(&clk_dvfs_node->vd->mutex);
1477         table = clk_dvfs_node->dvfs_table;
1478         mutex_unlock(&clk_dvfs_node->vd->mutex);
1479         
1480         return table;
1481 }
1482 EXPORT_SYMBOL(dvfs_get_freq_volt_table);
1483
1484 int dvfs_set_freq_volt_table(struct dvfs_node *clk_dvfs_node, struct cpufreq_frequency_table *table)
1485 {
1486         if (!clk_dvfs_node)
1487                 return -EINVAL;
1488
1489         if (IS_ERR_OR_NULL(table)){
1490                 DVFS_ERR("%s:invalid table!\n", __func__);
1491                 return -EINVAL;
1492         }
1493         
1494         mutex_lock(&clk_dvfs_node->vd->mutex);
1495         clk_dvfs_node->dvfs_table = table;
1496         dvfs_get_rate_range(clk_dvfs_node);
1497         dvfs_table_round_clk_rate(clk_dvfs_node);
1498         dvfs_table_round_volt(clk_dvfs_node);
1499         mutex_unlock(&clk_dvfs_node->vd->mutex);
1500
1501         return 0;
1502 }
1503 EXPORT_SYMBOL(dvfs_set_freq_volt_table);
1504
1505 static int get_adjust_volt_by_leakage(struct dvfs_node *dvfs_node)
1506 {
1507         int leakage = 0;
1508         int delta_leakage = 0;
1509         int i = 0;
1510         int adjust_volt = 0;
1511
1512         if (!dvfs_node->vd)
1513                 return 0;
1514
1515         if (dvfs_node->lkg_info.def_table_lkg == -1)
1516                 return 0;
1517
1518         leakage = rockchip_get_leakage(dvfs_node->channel);
1519         if (!leakage || (leakage == 0xff))
1520                 return 0;
1521
1522         delta_leakage = leakage - dvfs_node->lkg_info.def_table_lkg;
1523         if (delta_leakage <= 0) {
1524                 for (i = 0; (dvfs_node->lkg_info.table[i].dlt_volt !=
1525                         CPUFREQ_TABLE_END); i++) {
1526                         if (leakage > dvfs_node->lkg_info.table[i].lkg) {
1527                                 adjust_volt =
1528                                         dvfs_node->lkg_info.table[i].dlt_volt;
1529                         } else {
1530                                 return adjust_volt;
1531                         }
1532                 }
1533         } else if (delta_leakage > 0) {
1534                 for (i = 0; (dvfs_node->lkg_info.table[i].dlt_volt !=
1535                         CPUFREQ_TABLE_END); i++) {
1536                         if (leakage <= dvfs_node->lkg_info.table[i].lkg) {
1537                                 adjust_volt =
1538                                         -dvfs_node->lkg_info.table[i].dlt_volt;
1539                                 return adjust_volt;
1540                         }
1541                 }
1542         }
1543         return adjust_volt;
1544 }
1545
1546 static void adjust_table_by_leakage(struct dvfs_node *dvfs_node)
1547 {
1548         int i, adjust_volt = get_adjust_volt_by_leakage(dvfs_node);
1549
1550         if (!adjust_volt)
1551                 return;
1552
1553         if (!dvfs_node->dvfs_table)
1554                 return;
1555
1556         if (dvfs_node->lkg_info.min_adjust_freq == -1)
1557                 return;
1558
1559         for (i = 0;
1560         (dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
1561                 if (dvfs_node->dvfs_table[i].frequency >=
1562                         dvfs_node->lkg_info.min_adjust_freq)
1563                         dvfs_node->dvfs_table[i].index += adjust_volt;
1564         }
1565 }
1566
1567 int clk_enable_dvfs(struct dvfs_node *clk_dvfs_node)
1568 {
1569         struct cpufreq_frequency_table clk_fv;
1570         int volt_new;
1571         unsigned int mode;
1572         int ret;
1573
1574         if (!clk_dvfs_node)
1575                 return -EINVAL;
1576         
1577         DVFS_DBG("%s: dvfs clk(%s) enable dvfs!\n", 
1578                 __func__, __clk_get_name(clk_dvfs_node->clk));
1579
1580         if (!clk_dvfs_node->vd) {
1581                 DVFS_ERR("%s: dvfs node(%s) has no vd node!\n", 
1582                         __func__, clk_dvfs_node->name);
1583                 return -EINVAL;
1584         }
1585         mutex_lock(&clk_dvfs_node->vd->mutex);
1586         if (clk_dvfs_node->enable_count == 0) {
1587                 if (IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator)) {
1588                         if (clk_dvfs_node->vd->regulator_name)
1589                                 clk_dvfs_node->vd->regulator = dvfs_regulator_get(NULL, clk_dvfs_node->vd->regulator_name);
1590                         if (!IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator)) {
1591                                 DVFS_DBG("%s: vd(%s) get regulator(%s) ok\n",
1592                                         __func__, clk_dvfs_node->vd->name, clk_dvfs_node->vd->regulator_name);
1593                                 clk_enable_dvfs_regulator_check(clk_dvfs_node->vd);
1594                                 dvfs_get_vd_regulator_volt_list(clk_dvfs_node->vd);
1595                                 dvfs_vd_get_regulator_volt_time_info(clk_dvfs_node->vd);
1596                         } else {
1597                                 clk_dvfs_node->vd->regulator = NULL;
1598                                 clk_dvfs_node->enable_count = 0;
1599                                 DVFS_ERR("%s: vd(%s) can't get regulator(%s)!\n", 
1600                                         __func__, clk_dvfs_node->vd->name, clk_dvfs_node->vd->regulator_name);
1601                                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1602                                 return -ENXIO;
1603                         }
1604                 } else {
1605                         clk_enable_dvfs_regulator_check(clk_dvfs_node->vd);
1606                 }
1607                 
1608                 DVFS_DBG("%s: vd(%s) cur volt=%d\n",
1609                         __func__, clk_dvfs_node->name, clk_dvfs_node->vd->cur_volt);
1610
1611                 dvfs_table_round_clk_rate(clk_dvfs_node);
1612                 dvfs_get_rate_range(clk_dvfs_node);
1613                 clk_dvfs_node->freq_limit_en = 1;
1614                 clk_dvfs_node->max_limit_freq = clk_dvfs_node->max_rate;
1615                 if (clk_dvfs_node->lkg_adjust_volt_en)
1616                         adjust_table_by_leakage(clk_dvfs_node);
1617                 if (clk_dvfs_node->support_pvtm)
1618                         pvtm_set_dvfs_table(clk_dvfs_node);
1619                 dvfs_table_round_volt(clk_dvfs_node);
1620                 clk_dvfs_node->set_freq = clk_dvfs_node_get_rate_kz(clk_dvfs_node->clk);
1621                 clk_dvfs_node->last_set_rate = clk_dvfs_node->set_freq*1000;
1622                 
1623                 DVFS_DBG("%s: %s get freq %u!\n", 
1624                         __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1625
1626                 if (clk_dvfs_node_get_ref_volt(clk_dvfs_node, clk_dvfs_node->set_freq, &clk_fv)) {
1627                         if (clk_dvfs_node->dvfs_table[0].frequency == CPUFREQ_TABLE_END) {
1628                                 DVFS_ERR("%s: table empty\n", __func__);
1629                                 clk_dvfs_node->enable_count = 0;
1630                                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1631                                 return -EINVAL;
1632                         } else {
1633                                 DVFS_WARNING("%s: clk(%s) freq table all value are smaller than default(%d), use default, just enable dvfs\n", 
1634                                         __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1635                                 clk_dvfs_node->enable_count++;
1636                                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1637                                 return 0;
1638                         }
1639                 }
1640                 clk_dvfs_node->enable_count++;
1641                 clk_dvfs_node->set_volt = clk_fv.index;
1642                 volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1643                 DVFS_DBG("%s: %s, freq %u(ref vol %u)\n",
1644                         __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt);
1645 #if 0
1646                 if (clk_dvfs_node->dvfs_nb) {
1647                         // must unregister when clk disable
1648                         clk_notifier_register(clk, clk_dvfs_node->dvfs_nb);
1649                 }
1650 #endif
1651                 if(clk_dvfs_node->vd->cur_volt != volt_new) {
1652                         ret = dvfs_regulator_set_voltage_readback(clk_dvfs_node->vd->regulator, volt_new, volt_new);
1653                         dvfs_volt_up_delay(clk_dvfs_node->vd,volt_new, clk_dvfs_node->vd->cur_volt);
1654                         if (ret < 0) {
1655                                 clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
1656                                 clk_dvfs_node->enable_count = 0;
1657                                 DVFS_ERR("dvfs enable clk %s,set volt error \n", clk_dvfs_node->name);
1658                                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1659                                 return -EAGAIN;
1660                         }
1661                         clk_dvfs_node->vd->cur_volt = volt_new;
1662                         clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
1663                 }
1664
1665         } else {
1666                 DVFS_DBG("%s: dvfs already enable clk enable = %d!\n",
1667                         __func__, clk_dvfs_node->enable_count);
1668                 clk_dvfs_node->enable_count++;
1669         }
1670
1671         if (clk_dvfs_node->regu_mode_en) {
1672                 ret = dvfs_regu_mode_table_constrain(clk_dvfs_node);
1673                 if (ret) {
1674                         DVFS_ERR("%s: clk(%s) regu_mode_table is unvalid, set regu_mode_en=0!\n",
1675                                         __func__, clk_dvfs_node->name);
1676                         clk_dvfs_node->regu_mode_en = 0;
1677                         mutex_unlock(&clk_dvfs_node->vd->mutex);
1678                         return ret;
1679                 }
1680
1681                 ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, clk_dvfs_node->set_freq*1000, &mode);
1682                 if (ret < 0) {
1683                         DVFS_ERR("%s: clk(%s) rate %dKhz get regu_mode fail\n",
1684                                         __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1685                         mutex_unlock(&clk_dvfs_node->vd->mutex);
1686                         return ret;
1687                 } else
1688                         clk_dvfs_node->regu_mode = mode;
1689
1690                 dvfs_update_clk_pds_mode(clk_dvfs_node);
1691         }
1692
1693         mutex_unlock(&clk_dvfs_node->vd->mutex);
1694         
1695         return 0;
1696 }
1697 EXPORT_SYMBOL(clk_enable_dvfs);
1698
1699 int clk_disable_dvfs(struct dvfs_node *clk_dvfs_node)
1700 {
1701         int volt_new;
1702
1703         if (!clk_dvfs_node)
1704                 return -EINVAL;
1705
1706         DVFS_DBG("%s:dvfs clk(%s) disable dvfs!\n", 
1707                 __func__, __clk_get_name(clk_dvfs_node->clk));
1708
1709         mutex_lock(&clk_dvfs_node->vd->mutex);
1710         if (!clk_dvfs_node->enable_count) {
1711                 DVFS_WARNING("%s:clk(%s) is already closed!\n", 
1712                         __func__, __clk_get_name(clk_dvfs_node->clk));
1713                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1714                 return 0;
1715         } else {
1716                 clk_dvfs_node->enable_count--;
1717                 if (0 == clk_dvfs_node->enable_count) {
1718                         DVFS_DBG("%s:dvfs clk(%s) disable dvfs ok!\n",
1719                                 __func__, __clk_get_name(clk_dvfs_node->clk));
1720                         volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1721                         dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1722
1723 #if 0
1724                         clk_notifier_unregister(clk, clk_dvfs_node->dvfs_nb);
1725                         DVFS_DBG("clk unregister nb!\n");
1726 #endif
1727                 }
1728         }
1729         mutex_unlock(&clk_dvfs_node->vd->mutex);
1730         return 0;
1731 }
1732 EXPORT_SYMBOL(clk_disable_dvfs);
1733
1734 static unsigned long dvfs_get_limit_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1735 {
1736         unsigned long limit_rate;
1737
1738         limit_rate = rate;
1739         if (clk_dvfs_node->freq_limit_en) {
1740                 //dvfs table limit
1741                 if (rate < clk_dvfs_node->min_rate) {
1742                         limit_rate = clk_dvfs_node->min_rate;
1743                 } else if (rate > clk_dvfs_node->max_rate) {
1744                         limit_rate = clk_dvfs_node->max_rate;
1745                 }
1746                 if (clk_dvfs_node->temp_limit_enable) {
1747                         if (limit_rate > clk_dvfs_node->temp_limit_rate) {
1748                                 limit_rate = clk_dvfs_node->temp_limit_rate;
1749                         }
1750                 }
1751                 if (limit_rate > clk_dvfs_node->max_limit_freq)
1752                         limit_rate = clk_dvfs_node->max_limit_freq;
1753         }
1754
1755         DVFS_DBG("%s: rate:%ld, limit_rate:%ld,\n", __func__, rate, limit_rate);
1756
1757         return limit_rate;
1758 }
1759
1760 static int dvfs_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1761 {
1762         struct cpufreq_frequency_table clk_fv;
1763         unsigned long old_rate = 0, new_rate = 0, volt_new = 0, clk_volt_store = 0;
1764         struct clk *clk = clk_dvfs_node->clk;
1765         int ret;
1766
1767         if (!clk)
1768                 return -EINVAL;
1769
1770         if (!clk_dvfs_node->enable_count) {
1771                 DVFS_ERR("%s: %s is disable, set rate error\n",
1772                          __func__, clk_dvfs_node->name);
1773                 return 0;
1774         }
1775         
1776         if (clk_dvfs_node->vd->volt_set_flag == DVFS_SET_VOLT_FAILURE) {
1777                 /* It means the last time set voltage error */
1778                 ret = dvfs_reset_volt(clk_dvfs_node->vd);
1779                 if (ret < 0) {
1780                         return -EAGAIN;
1781                 }
1782         }
1783
1784         rate = dvfs_get_limit_rate(clk_dvfs_node, rate);
1785         new_rate = __clk_round_rate(clk, rate);
1786         old_rate = __clk_get_rate(clk);
1787         if (new_rate == old_rate)
1788                 return 0;
1789
1790         DVFS_DBG("enter %s: clk(%s) new_rate = %lu Hz, old_rate =  %lu Hz\n", 
1791                 __func__, clk_dvfs_node->name, rate, old_rate); 
1792
1793         /* find the clk corresponding voltage */
1794         ret = clk_dvfs_node_get_ref_volt(clk_dvfs_node, new_rate / 1000, &clk_fv);
1795         if (ret) {
1796                 DVFS_ERR("%s:dvfs clk(%s) rate %luhz is not support\n",
1797                         __func__, clk_dvfs_node->name, new_rate);
1798                 return ret;
1799         }
1800         clk_volt_store = clk_dvfs_node->set_volt;
1801         clk_dvfs_node->set_volt = clk_fv.index;
1802         volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1803         DVFS_DBG("%s:%s new rate=%lu(was=%lu),new volt=%lu,(was=%d)\n",
1804                 __func__, clk_dvfs_node->name, new_rate, old_rate, volt_new,clk_dvfs_node->vd->cur_volt);
1805
1806
1807         /* if up the rate */
1808         if (new_rate > old_rate) {
1809                 ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
1810                 if (ret)
1811                         DVFS_ERR("%s: dvfs clk(%s) rate %luhz set mode err\n",
1812                                 __func__, clk_dvfs_node->name, new_rate);
1813
1814                 ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1815                 if (ret)
1816                         goto fail_roll_back;
1817         }
1818
1819         /* scale rate */
1820         if (clk_dvfs_node->clk_dvfs_target) {
1821                 ret = clk_dvfs_node->clk_dvfs_target(clk, rate);
1822         } else {
1823                 ret = clk_set_rate(clk, rate);
1824         }
1825
1826         if (ret) {
1827                 DVFS_ERR("%s:clk(%s) set rate err\n", 
1828                         __func__, __clk_get_name(clk));
1829                 goto fail_roll_back;
1830         }
1831         clk_dvfs_node->set_freq = new_rate / 1000;
1832
1833         DVFS_DBG("%s:dvfs clk(%s) set rate %lu ok\n", 
1834                 __func__, clk_dvfs_node->name, __clk_get_rate(clk));
1835
1836         /* if down the rate */
1837         if (new_rate < old_rate) {
1838                 ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1839                 if (ret)
1840                         goto out;
1841
1842                 ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
1843                 if (ret)
1844                         DVFS_ERR("%s:dvfs clk(%s) rate %luhz set mode err\n",
1845                         __func__, clk_dvfs_node->name, new_rate);
1846         }
1847
1848         return 0;
1849 fail_roll_back:
1850         clk_dvfs_node->set_volt = clk_volt_store;
1851 out:
1852         return ret;
1853 }
1854
1855 unsigned long dvfs_clk_round_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1856 {
1857         return __clk_round_rate(clk_dvfs_node->clk, rate);
1858 }
1859 EXPORT_SYMBOL_GPL(dvfs_clk_round_rate);
1860
1861 unsigned long dvfs_clk_get_rate(struct dvfs_node *clk_dvfs_node)
1862 {
1863         return __clk_get_rate(clk_dvfs_node->clk);
1864 }
1865 EXPORT_SYMBOL_GPL(dvfs_clk_get_rate);
1866
1867 unsigned long dvfs_clk_get_last_set_rate(struct dvfs_node *clk_dvfs_node)
1868 {
1869         unsigned long last_set_rate;
1870
1871         mutex_lock(&clk_dvfs_node->vd->mutex);
1872         last_set_rate = clk_dvfs_node->last_set_rate;
1873         mutex_unlock(&clk_dvfs_node->vd->mutex);
1874
1875         return last_set_rate;
1876 }
1877 EXPORT_SYMBOL_GPL(dvfs_clk_get_last_set_rate);
1878
1879
1880 int dvfs_clk_enable(struct dvfs_node *clk_dvfs_node)
1881 {
1882         return clk_enable(clk_dvfs_node->clk);
1883 }
1884 EXPORT_SYMBOL_GPL(dvfs_clk_enable);
1885
1886 void dvfs_clk_disable(struct dvfs_node *clk_dvfs_node)
1887 {
1888         return clk_disable(clk_dvfs_node->clk);
1889 }
1890 EXPORT_SYMBOL_GPL(dvfs_clk_disable);
1891
1892 struct dvfs_node *clk_get_dvfs_node(char *clk_name)
1893 {
1894         struct vd_node *vd;
1895         struct pd_node *pd;
1896         struct dvfs_node *clk_dvfs_node;
1897
1898         mutex_lock(&rk_dvfs_mutex);
1899         list_for_each_entry(vd, &rk_dvfs_tree, node) {
1900                 mutex_lock(&vd->mutex);
1901                 list_for_each_entry(pd, &vd->pd_list, node) {
1902                         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
1903                                 if (0 == strcmp(clk_dvfs_node->name, clk_name)) {
1904                                         mutex_unlock(&vd->mutex);
1905                                         mutex_unlock(&rk_dvfs_mutex);
1906                                         return clk_dvfs_node;
1907                                 }
1908                         }
1909                 }
1910                 mutex_unlock(&vd->mutex);
1911         }
1912         mutex_unlock(&rk_dvfs_mutex);
1913         
1914         return NULL;    
1915 }
1916 EXPORT_SYMBOL_GPL(clk_get_dvfs_node);
1917
1918 void clk_put_dvfs_node(struct dvfs_node *clk_dvfs_node)
1919 {
1920         return;
1921 }
1922 EXPORT_SYMBOL_GPL(clk_put_dvfs_node);
1923
1924 int dvfs_clk_prepare_enable(struct dvfs_node *clk_dvfs_node)
1925 {
1926         return clk_prepare_enable(clk_dvfs_node->clk);
1927 }
1928 EXPORT_SYMBOL_GPL(dvfs_clk_prepare_enable);
1929
1930
1931 void dvfs_clk_disable_unprepare(struct dvfs_node *clk_dvfs_node)
1932 {
1933         clk_disable_unprepare(clk_dvfs_node->clk);
1934 }
1935 EXPORT_SYMBOL_GPL(dvfs_clk_disable_unprepare);
1936
1937 int dvfs_clk_set_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1938 {
1939         int ret = -EINVAL;
1940         
1941         if (!clk_dvfs_node)
1942                 return -EINVAL;
1943         
1944         DVFS_DBG("%s:dvfs node(%s) set rate(%lu)\n", 
1945                 __func__, clk_dvfs_node->name, rate);
1946         
1947         #if 0 // judge by reference func in rk
1948         if (dvfs_support_clk_set_rate(dvfs_info)==false) {
1949                 DVFS_ERR("dvfs func:%s is not support!\n", __func__);
1950                 return ret;
1951         }
1952         #endif
1953
1954         if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target) {
1955                 mutex_lock(&clk_dvfs_node->vd->mutex);
1956                 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
1957                 clk_dvfs_node->last_set_rate = rate;
1958                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1959         } else {
1960                 DVFS_ERR("%s:dvfs node(%s) has no vd node or target callback!\n", 
1961                         __func__, clk_dvfs_node->name);
1962         }
1963                 
1964         return ret;     
1965 }
1966 EXPORT_SYMBOL_GPL(dvfs_clk_set_rate);
1967
1968
1969 int rk_regist_vd(struct vd_node *vd)
1970 {
1971         if (!vd)
1972                 return -EINVAL;
1973
1974         vd->mode_flag=0;
1975         vd->volt_time_flag=0;
1976         vd->n_voltages=0;
1977         INIT_LIST_HEAD(&vd->pd_list);
1978         mutex_lock(&rk_dvfs_mutex);
1979         list_add(&vd->node, &rk_dvfs_tree);
1980         mutex_unlock(&rk_dvfs_mutex);
1981
1982         return 0;
1983 }
1984 EXPORT_SYMBOL_GPL(rk_regist_vd);
1985
1986 int rk_regist_pd(struct pd_node *pd)
1987 {
1988         struct vd_node  *vd;
1989
1990         if (!pd)
1991                 return -EINVAL;
1992
1993         vd = pd->vd;
1994         if (!vd)
1995                 return -EINVAL;
1996
1997         INIT_LIST_HEAD(&pd->clk_list);
1998         mutex_lock(&vd->mutex);
1999         list_add(&pd->node, &vd->pd_list);
2000         mutex_unlock(&vd->mutex);
2001         
2002         return 0;
2003 }
2004 EXPORT_SYMBOL_GPL(rk_regist_pd);
2005
2006 int rk_regist_clk(struct dvfs_node *clk_dvfs_node)
2007 {
2008         struct vd_node  *vd;
2009         struct pd_node  *pd;
2010
2011         if (!clk_dvfs_node)
2012                 return -EINVAL;
2013
2014         vd = clk_dvfs_node->vd;
2015         pd = clk_dvfs_node->pd;
2016         if (!vd || !pd)
2017                 return -EINVAL;
2018
2019         mutex_lock(&vd->mutex);
2020         list_add(&clk_dvfs_node->node, &pd->clk_list);
2021         mutex_unlock(&vd->mutex);
2022         
2023         return 0;
2024 }
2025 EXPORT_SYMBOL_GPL(rk_regist_clk);
2026
2027 static struct cpufreq_frequency_table *of_get_temp_limit_table(struct device_node *dev_node, const char *propname)
2028 {
2029         struct cpufreq_frequency_table *temp_limt_table = NULL;
2030         const struct property *prop;
2031         const __be32 *val;
2032         int nr, i;
2033
2034         prop = of_find_property(dev_node, propname, NULL);
2035         if (!prop)
2036                 return NULL;
2037         if (!prop->value)
2038                 return NULL;
2039
2040         nr = prop->length / sizeof(u32);
2041         if (nr % 2) {
2042                 pr_err("%s: Invalid freq list\n", __func__);
2043                 return NULL;
2044         }
2045
2046         temp_limt_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
2047                              (nr/2 + 1), GFP_KERNEL);
2048
2049         val = prop->value;
2050
2051         for (i=0; i<nr/2; i++){
2052                 temp_limt_table[i].index = be32_to_cpup(val++);
2053                 temp_limt_table[i].frequency = be32_to_cpup(val++) * 1000;
2054         }
2055
2056         temp_limt_table[i].index = 0;
2057         temp_limt_table[i].frequency = CPUFREQ_TABLE_END;
2058
2059         return temp_limt_table;
2060
2061 }
2062
2063 static int of_get_dvfs_table(struct device_node *dev_node,
2064                              struct cpufreq_frequency_table **dvfs_table)
2065 {
2066         struct cpufreq_frequency_table *tmp_dvfs_table = NULL;
2067         const struct property *prop;
2068         const __be32 *val;
2069         int nr, i;
2070
2071         prop = of_find_property(dev_node, "operating-points", NULL);
2072         if (!prop)
2073                 return -EINVAL;
2074         if (!prop->value)
2075                 return -EINVAL;
2076
2077         nr = prop->length / sizeof(u32);
2078         if (nr % 2) {
2079                 pr_err("%s: Invalid freq list\n", __func__);
2080                 return -EINVAL;
2081         }
2082
2083         tmp_dvfs_table = kzalloc(sizeof(*tmp_dvfs_table) *
2084                              (nr/2 + 1), GFP_KERNEL);
2085         val = prop->value;
2086
2087         for (i = 0; i < nr/2; i++) {
2088                 tmp_dvfs_table[i].frequency = be32_to_cpup(val++);
2089                 tmp_dvfs_table[i].index = be32_to_cpup(val++);
2090         }
2091
2092         tmp_dvfs_table[i].index = 0;
2093         tmp_dvfs_table[i].frequency = CPUFREQ_TABLE_END;
2094
2095         *dvfs_table = tmp_dvfs_table;
2096
2097         return 0;
2098 }
2099
2100
2101 static int of_get_dvfs_pvtm_table(struct device_node *dev_node,
2102                                   struct cpufreq_frequency_table **dvfs_table,
2103                                   struct cpufreq_frequency_table **pvtm_table)
2104 {
2105         struct cpufreq_frequency_table *tmp_dvfs_table = NULL;
2106         struct cpufreq_frequency_table *tmp_pvtm_table = NULL;
2107         const struct property *prop;
2108         const __be32 *val;
2109         int nr, i;
2110
2111         prop = of_find_property(dev_node, "pvtm-operating-points", NULL);
2112         if (!prop)
2113                 return -EINVAL;
2114         if (!prop->value)
2115                 return -EINVAL;
2116
2117         nr = prop->length / sizeof(u32);
2118         if (nr % 3) {
2119                 pr_err("%s: Invalid freq list\n", __func__);
2120                 return -EINVAL;
2121         }
2122
2123         tmp_dvfs_table = kzalloc(sizeof(*tmp_dvfs_table) *
2124                              (nr/3 + 1), GFP_KERNEL);
2125
2126         tmp_pvtm_table = kzalloc(sizeof(*tmp_pvtm_table) *
2127                              (nr/3 + 1), GFP_KERNEL);
2128
2129         val = prop->value;
2130
2131         for (i = 0; i < nr/3; i++) {
2132                 tmp_dvfs_table[i].frequency = be32_to_cpup(val++);
2133                 tmp_dvfs_table[i].index = be32_to_cpup(val++);
2134
2135                 tmp_pvtm_table[i].frequency = tmp_dvfs_table[i].frequency;
2136                 tmp_pvtm_table[i].index = be32_to_cpup(val++);
2137         }
2138
2139         tmp_dvfs_table[i].index = 0;
2140         tmp_dvfs_table[i].frequency = CPUFREQ_TABLE_END;
2141
2142         tmp_pvtm_table[i].index = 0;
2143         tmp_pvtm_table[i].frequency = CPUFREQ_TABLE_END;
2144
2145         *dvfs_table = tmp_dvfs_table;
2146         *pvtm_table = tmp_pvtm_table;
2147
2148         return 0;
2149 }
2150
2151 static struct lkg_adjust_volt_table
2152         *of_get_lkg_adjust_volt_table(struct device_node *np,
2153         const char *propname)
2154 {
2155         struct lkg_adjust_volt_table *lkg_adjust_volt_table = NULL;
2156         const struct property *prop;
2157         const __be32 *val;
2158         int nr, i;
2159
2160         prop = of_find_property(np, propname, NULL);
2161         if (!prop)
2162                 return NULL;
2163         if (!prop->value)
2164                 return NULL;
2165
2166         nr = prop->length / sizeof(s32);
2167         if (nr % 2) {
2168                 pr_err("%s: Invalid freq list\n", __func__);
2169                 return NULL;
2170         }
2171
2172         lkg_adjust_volt_table =
2173                 kzalloc(sizeof(struct lkg_adjust_volt_table) *
2174                 (nr/2 + 1), GFP_KERNEL);
2175
2176         val = prop->value;
2177
2178         for (i = 0; i < nr/2; i++) {
2179                 lkg_adjust_volt_table[i].lkg = be32_to_cpup(val++);
2180                 lkg_adjust_volt_table[i].dlt_volt = be32_to_cpup(val++);
2181         }
2182
2183         lkg_adjust_volt_table[i].lkg = 0;
2184         lkg_adjust_volt_table[i].dlt_volt = CPUFREQ_TABLE_END;
2185
2186         return lkg_adjust_volt_table;
2187 }
2188
2189 static int dvfs_node_parse_dt(struct device_node *np,
2190                               struct dvfs_node *dvfs_node)
2191 {
2192         int process_version = rockchip_process_version();
2193         int i = 0;
2194         int ret;
2195
2196         of_property_read_u32_index(np, "channel", 0, &dvfs_node->channel);
2197
2198         pr_info("channel:%d, lkg:%d\n",
2199                 dvfs_node->channel, rockchip_get_leakage(dvfs_node->channel));
2200
2201         of_property_read_u32_index(np, "regu-mode-en", 0,
2202                                    &dvfs_node->regu_mode_en);
2203         if (dvfs_node->regu_mode_en)
2204                 dvfs_node->regu_mode_table = of_get_regu_mode_table(np);
2205         else
2206                 dvfs_node->regu_mode_table = NULL;
2207
2208         of_property_read_u32_index(np, "temp-limit-enable", 0,
2209                                    &dvfs_node->temp_limit_enable);
2210         if (dvfs_node->temp_limit_enable) {
2211                 of_property_read_u32_index(np, "min_temp_limit",
2212                                            0, &dvfs_node->min_temp_limit);
2213                 dvfs_node->min_temp_limit *= 1000;
2214                 of_property_read_u32_index(np, "target-temp",
2215                                            0, &dvfs_node->target_temp);
2216                 pr_info("target-temp:%d\n", dvfs_node->target_temp);
2217                 dvfs_node->nor_temp_limit_table =
2218                         of_get_temp_limit_table(np,
2219                                                 "normal-temp-limit");
2220                 dvfs_node->per_temp_limit_table =
2221                         of_get_temp_limit_table(np,
2222                                                 "performance-temp-limit");
2223                 dvfs_node->virt_temp_limit_table[0] =
2224                         of_get_temp_limit_table(np,
2225                                                 "virt-temp-limit-1-cpu-busy");
2226                 dvfs_node->virt_temp_limit_table[1] =
2227                         of_get_temp_limit_table(np,
2228                                                 "virt-temp-limit-2-cpu-busy");
2229                 dvfs_node->virt_temp_limit_table[2] =
2230                         of_get_temp_limit_table(np,
2231                                                 "virt-temp-limit-3-cpu-busy");
2232                 dvfs_node->virt_temp_limit_table[3] =
2233                         of_get_temp_limit_table(np,
2234                                                 "virt-temp-limit-4-cpu-busy");
2235         }
2236         dvfs_node->temp_limit_rate = -1;
2237
2238         dvfs_node->cluster = 0;
2239         of_property_read_u32_index(np, "cluster", 0, &dvfs_node->cluster);
2240
2241         dvfs_node->pvtm_min_temp = 0;
2242         of_property_read_u32_index(np, "pvtm_min_temp", 0,
2243                                    &dvfs_node->pvtm_min_temp);
2244
2245         ret = of_property_read_u32_index(np, "support-pvtm", 0,
2246                                          &dvfs_node->support_pvtm);
2247         if (!ret) {
2248                 if (of_get_dvfs_pvtm_table(np, &dvfs_node->dvfs_table,
2249                                            &dvfs_node->pvtm_table))
2250                         return -EINVAL;
2251
2252                 for (i = 0; i < ARRAY_SIZE(pvtm_info_table); i++) {
2253                         struct pvtm_info *pvtm_info = pvtm_info_table[i];
2254
2255                         if ((pvtm_info->channel == dvfs_node->channel) &&
2256                             (pvtm_info->process_version == process_version) &&
2257                             (pvtm_info->cluster == dvfs_node->cluster) &&
2258                              of_machine_is_compatible(pvtm_info->compatible)) {
2259                                 dvfs_node->pvtm_info = pvtm_info;
2260                                 break;
2261                         }
2262                 }
2263
2264                 if (!dvfs_node->pvtm_info)
2265                         dvfs_node->support_pvtm = 0;
2266         } else {
2267                 if (of_get_dvfs_table(np, &dvfs_node->dvfs_table))
2268                         return -EINVAL;
2269         }
2270
2271         of_property_read_u32_index(np, "lkg_adjust_volt_en", 0,
2272                                    &dvfs_node->lkg_adjust_volt_en);
2273         if (dvfs_node->lkg_adjust_volt_en) {
2274                 dvfs_node->lkg_info.def_table_lkg = -1;
2275                 of_property_read_u32_index(np, "def_table_lkg", 0,
2276                                            &dvfs_node->lkg_info.def_table_lkg);
2277
2278                 dvfs_node->lkg_info.min_adjust_freq = -1;
2279                 of_property_read_u32_index(np, "min_adjust_freq", 0,
2280                                            &dvfs_node->lkg_info.min_adjust_freq
2281                                            );
2282
2283                 dvfs_node->lkg_info.table =
2284                         of_get_lkg_adjust_volt_table(np,
2285                                                      "lkg_adjust_volt_table");
2286         }
2287
2288         return 0;
2289 }
2290
2291 int of_dvfs_init(void)
2292 {
2293         struct vd_node *vd;
2294         struct pd_node *pd;
2295         struct device_node *dvfs_dev_node, *clk_dev_node, *vd_dev_node, *pd_dev_node;
2296         struct dvfs_node *dvfs_node;
2297         struct clk *clk;
2298         int ret;
2299
2300         DVFS_DBG("%s\n", __func__);
2301         pr_info("process version: %d\n", rockchip_process_version());
2302
2303         dvfs_dev_node = of_find_node_by_name(NULL, "dvfs");
2304         if (IS_ERR_OR_NULL(dvfs_dev_node)) {
2305                 DVFS_ERR("%s get dvfs dev node err\n", __func__);
2306                 return PTR_ERR(dvfs_dev_node);
2307         }
2308
2309         for_each_available_child_of_node(dvfs_dev_node, vd_dev_node) {
2310                 vd = kzalloc(sizeof(struct vd_node), GFP_KERNEL);
2311                 if (!vd)
2312                         return -ENOMEM;
2313
2314                 mutex_init(&vd->mutex);
2315                 vd->name = vd_dev_node->name;
2316                 ret = of_property_read_string(vd_dev_node, "regulator_name", &vd->regulator_name);
2317                 if (ret) {
2318                         DVFS_ERR("%s:vd(%s) get regulator_name err, ret:%d\n", 
2319                                 __func__, vd_dev_node->name, ret);
2320                         kfree(vd);
2321                         continue;
2322                 }
2323                 
2324                 vd->suspend_volt = 0;
2325                 
2326                 vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
2327                 vd->vd_dvfs_target = dvfs_target;
2328                 ret = rk_regist_vd(vd);
2329                 if (ret){
2330                         DVFS_ERR("%s:vd(%s) register err:%d\n", __func__, vd->name, ret);
2331                         kfree(vd);
2332                         continue;
2333                 }
2334
2335                 DVFS_DBG("%s:vd(%s) register ok, regulator name:%s,suspend volt:%d\n", 
2336                         __func__, vd->name, vd->regulator_name, vd->suspend_volt);
2337                 
2338                 for_each_available_child_of_node(vd_dev_node, pd_dev_node) {            
2339                         pd = kzalloc(sizeof(struct pd_node), GFP_KERNEL);
2340                         if (!pd)
2341                                 return -ENOMEM;
2342
2343                         pd->vd = vd;
2344                         pd->name = pd_dev_node->name;
2345                         
2346                         ret = rk_regist_pd(pd);
2347                         if (ret){
2348                                 DVFS_ERR("%s:pd(%s) register err:%d\n", __func__, pd->name, ret);
2349                                 kfree(pd);
2350                                 continue;
2351                         }
2352                         DVFS_DBG("%s:pd(%s) register ok, parent vd:%s\n", 
2353                                 __func__, pd->name, vd->name);                  
2354                         for_each_available_child_of_node(pd_dev_node, clk_dev_node) {
2355                                 if (!of_device_is_available(clk_dev_node))
2356                                         continue;
2357                                 
2358                                 dvfs_node = kzalloc(sizeof(struct dvfs_node), GFP_KERNEL);
2359                                 if (!dvfs_node)
2360                                         return -ENOMEM;
2361                                 
2362                                 dvfs_node->name = clk_dev_node->name;
2363                                 dvfs_node->pd = pd;
2364                                 dvfs_node->vd = vd;
2365
2366                                 if (dvfs_node_parse_dt(clk_dev_node, dvfs_node))
2367                                         continue;
2368                                 
2369                                 clk = clk_get(NULL, clk_dev_node->name);
2370                                 if (IS_ERR(clk)){
2371                                         DVFS_ERR("%s:get clk(%s) err:%ld\n", __func__, dvfs_node->name, PTR_ERR(clk));
2372                                         kfree(dvfs_node);
2373                                         continue;
2374                                         
2375                                 }
2376                                 
2377                                 dvfs_node->clk = clk;
2378                                 ret = rk_regist_clk(dvfs_node);
2379                                 if (ret){
2380                                         DVFS_ERR("%s:dvfs_node(%s) register err:%d\n", __func__, dvfs_node->name, ret);
2381                                         return ret;
2382                                 }
2383
2384                                 DVFS_DBG("%s:dvfs_node(%s) register ok, parent pd:%s\n", 
2385                                         __func__, clk_dev_node->name, pd->name);        
2386
2387                         }
2388                 }       
2389         }
2390         return 0;
2391 }
2392
2393 #ifdef CONFIG_ARM64
2394 arch_initcall_sync(of_dvfs_init);
2395 #endif
2396
2397 /*********************************************************************************/
2398 /**
2399  * dump_dbg_map() : Draw all informations of dvfs while debug
2400  */
2401 static int dump_dbg_map(char *buf)
2402 {
2403         int i;
2404         struct vd_node  *vd;
2405         struct pd_node  *pd;
2406         struct dvfs_node        *clk_dvfs_node;
2407         char *s = buf;
2408         
2409         mutex_lock(&rk_dvfs_mutex);
2410         printk( "-------------DVFS TREE-----------\n\n\n");
2411         printk( "DVFS TREE:\n");
2412
2413         list_for_each_entry(vd, &rk_dvfs_tree, node) {
2414                 mutex_lock(&vd->mutex);
2415                 printk( "|\n|- voltage domain:%s\n", vd->name);
2416                 printk( "|- current voltage:%d\n", vd->cur_volt);
2417                 printk( "|- current regu_mode:%s\n", dvfs_regu_mode_to_string(vd->regu_mode));
2418
2419                 list_for_each_entry(pd, &vd->pd_list, node) {
2420                         printk( "|  |\n|  |- power domain:%s, status = %s, current volt = %d, current regu_mode = %s\n",
2421                                         pd->name, (pd->pd_status == 1) ? "ON" : "OFF", pd->cur_volt,
2422                                         dvfs_regu_mode_to_string(pd->regu_mode));
2423
2424                         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
2425                                 printk( "|  |  |\n|  |  |- clock: %s current: rate %d, volt = %d,"
2426                                                 " enable_dvfs = %s\n",
2427                                                 clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt,
2428                                                 clk_dvfs_node->enable_count == 0 ? "DISABLE" : "ENABLE");
2429                                 printk( "|  |  |- clk limit(%s):[%u, %u]; last set rate = %lu\n",
2430                                                 clk_dvfs_node->freq_limit_en ? "enable" : "disable",
2431                                                 clk_dvfs_node->min_rate, clk_dvfs_node->max_rate,
2432                                                 clk_dvfs_node->last_set_rate/1000);
2433                                 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
2434                                         printk( "|  |  |  |- freq = %d, volt = %d\n",
2435                                                         clk_dvfs_node->dvfs_table[i].frequency,
2436                                                         clk_dvfs_node->dvfs_table[i].index);
2437
2438                                 }
2439                                 printk( "|  |  |- clock: %s current: rate %d, regu_mode = %s,"
2440                                                 " regu_mode_en = %d\n",
2441                                                 clk_dvfs_node->name, clk_dvfs_node->set_freq,
2442                                                 dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode),
2443                                                 clk_dvfs_node->regu_mode_en);
2444                                 if (clk_dvfs_node->regu_mode_table) {
2445                                         for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
2446                                                 printk( "|  |  |  |- freq = %d, regu_mode = %s\n",
2447                                                                 clk_dvfs_node->regu_mode_table[i].frequency/1000,
2448                                                                 dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode_table[i].index));
2449                                         }
2450                                 }
2451                         }
2452                 }
2453                 mutex_unlock(&vd->mutex);
2454         }
2455         
2456         printk( "-------------DVFS TREE END------------\n");
2457         mutex_unlock(&rk_dvfs_mutex);
2458         
2459         return s - buf;
2460 }
2461
2462 /*********************************************************************************/
2463 static struct kobject *dvfs_kobj;
2464 struct dvfs_attribute {
2465         struct attribute        attr;
2466         ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
2467                         char *buf);
2468         ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
2469                         const char *buf, size_t n);
2470 };
2471
2472 static ssize_t dvfs_tree_store(struct kobject *kobj, struct kobj_attribute *attr,
2473                const char *buf, size_t n)
2474 {
2475        return n;
2476 }
2477 static ssize_t dvfs_tree_show(struct kobject *kobj, struct kobj_attribute *attr,
2478                char *buf)
2479 {
2480        return dump_dbg_map(buf);
2481 }
2482
2483 static ssize_t cpu_temp_target_store(struct kobject *kobj,
2484                                      struct kobj_attribute *attr,
2485                                      const char *buf, size_t n)
2486 {
2487         int ret = 0;
2488
2489         mutex_lock(&temp_limit_mutex);
2490         if (clk_cpu_b_dvfs_node) {
2491                 ret = kstrtouint(buf, 0, &clk_cpu_b_dvfs_node->target_temp);
2492                 if (ret < 0)
2493                         goto error;
2494         }
2495         if (clk_cpu_l_dvfs_node) {
2496                 ret = kstrtouint(buf, 0, &clk_cpu_l_dvfs_node->target_temp);
2497                 if (ret < 0)
2498                         goto error;
2499         }
2500         if (clk_cpu_dvfs_node) {
2501                 ret = kstrtouint(buf, 0, &clk_cpu_dvfs_node->target_temp);
2502                 if (ret < 0)
2503                         goto error;
2504         }
2505 error:
2506         mutex_unlock(&temp_limit_mutex);
2507         return n;
2508 }
2509 static ssize_t cpu_temp_target_show(struct kobject *kobj,
2510                                     struct kobj_attribute *attr, char *buf)
2511 {
2512         int ret = 0;
2513
2514         if (clk_cpu_b_dvfs_node)
2515                 ret += sprintf(buf + ret, "cpu_b:%d\n",
2516                 clk_cpu_b_dvfs_node->target_temp);
2517         if (clk_cpu_l_dvfs_node)
2518                 ret += sprintf(buf + ret, "cpu_l:%d\n",
2519                 clk_cpu_l_dvfs_node->target_temp);
2520         if (clk_cpu_dvfs_node)
2521                 ret += sprintf(buf + ret, "cpu:%d\n",
2522                 clk_cpu_dvfs_node->target_temp);
2523
2524         return ret;
2525 }
2526
2527 static ssize_t cpu_temp_enable_store(struct kobject *kobj,
2528                                      struct kobj_attribute *attr,
2529                                      const char *buf, size_t n)
2530 {
2531         int ret = 0;
2532
2533         mutex_lock(&temp_limit_mutex);
2534         if (clk_cpu_b_dvfs_node) {
2535                 ret = kstrtouint(buf, 0,
2536                                  &clk_cpu_b_dvfs_node->temp_limit_enable);
2537                 if (ret < 0)
2538                         goto error;
2539                 clk_cpu_b_dvfs_node->temp_limit_rate =
2540                         clk_cpu_b_dvfs_node->max_rate;
2541         }
2542         if (clk_cpu_l_dvfs_node) {
2543                 ret = kstrtouint(buf, 0,
2544                                  &clk_cpu_l_dvfs_node->temp_limit_enable);
2545                 if (ret < 0)
2546                         goto error;
2547                 clk_cpu_l_dvfs_node->temp_limit_rate =
2548                         clk_cpu_l_dvfs_node->max_rate;
2549         }
2550         if (clk_cpu_dvfs_node) {
2551                 ret = kstrtouint(buf, 0, &clk_cpu_dvfs_node->temp_limit_enable);
2552                 if (ret < 0)
2553                         goto error;
2554                 clk_cpu_dvfs_node->temp_limit_rate =
2555                         clk_cpu_dvfs_node->max_rate;
2556         }
2557 error:
2558         mutex_unlock(&temp_limit_mutex);
2559         return n;
2560 }
2561 static ssize_t cpu_temp_enable_show(struct kobject *kobj,
2562                                     struct kobj_attribute *attr, char *buf)
2563 {
2564         int ret = 0;
2565
2566         if (clk_cpu_b_dvfs_node)
2567                 ret += sprintf(buf + ret, "cpu_b:%d\n",
2568                 clk_cpu_b_dvfs_node->temp_limit_enable);
2569         if (clk_cpu_l_dvfs_node)
2570                 ret += sprintf(buf + ret, "cpu_l:%d\n",
2571                 clk_cpu_l_dvfs_node->temp_limit_enable);
2572         if (clk_cpu_dvfs_node)
2573                 ret += sprintf(buf + ret, "cpu:%d\n",
2574                 clk_cpu_dvfs_node->temp_limit_enable);
2575
2576         return ret;
2577 }
2578
2579 static struct dvfs_attribute dvfs_attrs[] = {
2580         /*     node_name        permision               show_func       store_func */
2581 //#ifdef CONFIG_RK_CLOCK_PROC
2582         __ATTR(dvfs_tree,       S_IRUSR | S_IRGRP | S_IWUSR,
2583                dvfs_tree_show,  dvfs_tree_store),
2584         __ATTR(cpu_temp_target, S_IRUSR | S_IRGRP | S_IWUSR,
2585                cpu_temp_target_show,    cpu_temp_target_store),
2586         __ATTR(cpu_temp_enable, S_IRUSR | S_IRGRP | S_IWUSR,
2587                cpu_temp_enable_show,    cpu_temp_enable_store),
2588 //#endif
2589 };
2590
2591 static int __init dvfs_init(void)
2592 {
2593         int i, ret = 0;
2594
2595         dvfs_kobj = kobject_create_and_add("dvfs", NULL);
2596         if (!dvfs_kobj)
2597                 return -ENOMEM;
2598         for (i = 0; i < ARRAY_SIZE(dvfs_attrs); i++) {
2599                 ret = sysfs_create_file(dvfs_kobj, &dvfs_attrs[i].attr);
2600                 if (ret != 0) {
2601                         DVFS_ERR("create index %d error\n", i);
2602                         return ret;
2603                 }
2604         }
2605
2606         clk_cpu_b_dvfs_node = clk_get_dvfs_node("clk_core_b");
2607         if (clk_cpu_b_dvfs_node) {
2608                 clk_cpu_b_dvfs_node->temp_limit_rate =
2609                 clk_cpu_b_dvfs_node->max_rate;
2610                 if (clk_cpu_bl_dvfs_node == NULL)
2611                         clk_cpu_bl_dvfs_node = clk_cpu_b_dvfs_node;
2612         }
2613
2614         clk_cpu_l_dvfs_node = clk_get_dvfs_node("clk_core_l");
2615         if (clk_cpu_l_dvfs_node) {
2616                 clk_cpu_l_dvfs_node->temp_limit_rate =
2617                 clk_cpu_l_dvfs_node->max_rate;
2618                 if (clk_cpu_bl_dvfs_node == NULL)
2619                         clk_cpu_bl_dvfs_node = clk_cpu_l_dvfs_node;
2620         }
2621
2622         clk_cpu_dvfs_node = clk_get_dvfs_node("clk_core");
2623         if (clk_cpu_dvfs_node)
2624                 clk_cpu_dvfs_node->temp_limit_rate =
2625                 clk_cpu_dvfs_node->max_rate;
2626
2627         clk_gpu_dvfs_node = clk_get_dvfs_node("clk_gpu");
2628         if (clk_gpu_dvfs_node)
2629                 clk_gpu_dvfs_node->temp_limit_rate =
2630                 clk_gpu_dvfs_node->max_rate;
2631
2632         if ((clk_cpu_b_dvfs_node && clk_cpu_b_dvfs_node->temp_limit_enable) ||
2633             (clk_cpu_l_dvfs_node && clk_cpu_l_dvfs_node->temp_limit_enable) ||
2634             (clk_gpu_dvfs_node && clk_gpu_dvfs_node->temp_limit_enable) ||
2635             (clk_cpu_dvfs_node && clk_cpu_dvfs_node->temp_limit_enable)) {
2636                 dvfs_wq = alloc_workqueue("dvfs", WQ_NON_REENTRANT |
2637                         WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
2638                 queue_delayed_work_on(0, dvfs_wq, &dvfs_temp_limit_work, 0*HZ);
2639         }
2640
2641         vdd_gpu_regulator = dvfs_get_regulator("vdd_gpu");
2642         if (!IS_ERR_OR_NULL(vdd_gpu_regulator)) {
2643                 struct clk *clk = clk_get(NULL, "pd_gpu");
2644
2645                 if (clk)
2646                         rk_clk_pd_notifier_register(clk, &clk_pd_gpu_notifier);
2647
2648                 fb_register_client(&early_suspend_notifier);
2649                 register_reboot_notifier(&vdd_gpu_reboot_notifier);
2650         }
2651
2652         return ret;
2653 }
2654
2655 late_initcall(dvfs_init);