ba676f6864644d82b6883db43c7e75f1d28d975b
[firefly-linux-kernel-4.4.55.git] / arch / arm / mach-rockchip / dvfs.c
1 /* arch/arm/mach-rk30/rk30_dvfs.c
2  *
3  * Copyright (C) 2012 ROCKCHIP, Inc.
4  *
5  * This software is licensed under the terms of the GNU General Public
6  * License version 2, as published by the Free Software Foundation, and
7  * may be copied, distributed, and modified under those terms.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  */
15 #include <linux/slab.h>
16 #include <linux/clk.h>
17 #include <linux/regulator/consumer.h>
18 #include <linux/delay.h>
19 #include <linux/stat.h>
20 #include <linux/of.h>
21 #include <linux/opp.h>
22 #include <linux/rockchip/dvfs.h>
23 #include <linux/rockchip/common.h>
24 #include <linux/fb.h>
25 #include <linux/reboot.h>
26 #include <linux/rockchip/cpu.h>
27 #include <linux/tick.h>
28 #include <dt-bindings/clock/rk_system_status.h>
29 #include "../../../drivers/clk/rockchip/clk-pd.h"
30
31 extern int rockchip_tsadc_get_temp(int chn);
32
33 #define MHz     (1000 * 1000)
34 static LIST_HEAD(rk_dvfs_tree);
35 static DEFINE_MUTEX(rk_dvfs_mutex);
36 static struct workqueue_struct *dvfs_wq;
37 static struct dvfs_node *clk_cpu_dvfs_node;
38 static unsigned int target_temp = 80;
39 static int temp_limit_enable;
40
41 static int pd_gpu_off, early_suspend;
42 static DEFINE_MUTEX(switch_vdd_gpu_mutex);
43 struct regulator *vdd_gpu_regulator;
44
45 static int vdd_gpu_reboot_notifier_event(struct notifier_block *this,
46         unsigned long event, void *ptr)
47 {
48         int ret;
49
50         DVFS_DBG("%s: enable vdd_gpu\n", __func__);
51         mutex_lock(&switch_vdd_gpu_mutex);
52         if (!regulator_is_enabled(vdd_gpu_regulator))
53                 ret = regulator_enable(vdd_gpu_regulator);
54         mutex_unlock(&switch_vdd_gpu_mutex);
55
56         return NOTIFY_OK;
57 }
58
59 static struct notifier_block vdd_gpu_reboot_notifier = {
60         .notifier_call = vdd_gpu_reboot_notifier_event,
61 };
62
63 static int clk_pd_gpu_notifier_call(struct notifier_block *nb,
64         unsigned long event, void *ptr)
65 {
66         int ret;
67
68         switch (event) {
69         case RK_CLK_PD_PREPARE:
70                 mutex_lock(&switch_vdd_gpu_mutex);
71                 pd_gpu_off = 0;
72                 if (early_suspend) {
73                         if (!regulator_is_enabled(vdd_gpu_regulator))
74                                 ret = regulator_enable(vdd_gpu_regulator);
75                 }
76                 mutex_unlock(&switch_vdd_gpu_mutex);
77                 break;
78         case RK_CLK_PD_UNPREPARE:
79                 mutex_lock(&switch_vdd_gpu_mutex);
80                 pd_gpu_off = 1;
81                 if (early_suspend) {
82                         if (regulator_is_enabled(vdd_gpu_regulator))
83                                 ret = regulator_disable(vdd_gpu_regulator);
84                 }
85                 mutex_unlock(&switch_vdd_gpu_mutex);
86                 break;
87         default:
88                 break;
89         }
90
91         return NOTIFY_OK;
92 }
93
94 static struct notifier_block clk_pd_gpu_notifier = {
95         .notifier_call = clk_pd_gpu_notifier_call,
96 };
97
98
99 static int early_suspend_notifier_call(struct notifier_block *self,
100                                 unsigned long action, void *data)
101 {
102         struct fb_event *event = data;
103         int blank_mode = *((int *)event->data);
104         int ret;
105
106         mutex_lock(&switch_vdd_gpu_mutex);
107         if (action == FB_EARLY_EVENT_BLANK) {
108                 switch (blank_mode) {
109                 case FB_BLANK_UNBLANK:
110                         early_suspend = 0;
111                         if (pd_gpu_off) {
112                                 if (!regulator_is_enabled(vdd_gpu_regulator))
113                                         ret = regulator_enable(
114                                         vdd_gpu_regulator);
115                         }
116                         break;
117                 default:
118                         break;
119                 }
120         } else if (action == FB_EVENT_BLANK) {
121                 switch (blank_mode) {
122                 case FB_BLANK_POWERDOWN:
123                         early_suspend = 1;
124                         if (pd_gpu_off) {
125                                 if (regulator_is_enabled(vdd_gpu_regulator))
126                                         ret = regulator_disable(
127                                         vdd_gpu_regulator);
128                         }
129
130                         break;
131                 default:
132                         break;
133                 }
134         }
135         mutex_unlock(&switch_vdd_gpu_mutex);
136
137         return NOTIFY_OK;
138 }
139
140 static struct notifier_block early_suspend_notifier = {
141                 .notifier_call = early_suspend_notifier_call,
142 };
143
144 #define DVFS_REGULATOR_MODE_STANDBY     1
145 #define DVFS_REGULATOR_MODE_IDLE        2
146 #define DVFS_REGULATOR_MODE_NORMAL      3
147 #define DVFS_REGULATOR_MODE_FAST        4
148
149 static const char* dvfs_regu_mode_to_string(unsigned int mode)
150 {
151         switch (mode) {
152         case DVFS_REGULATOR_MODE_FAST:
153                 return "FAST";
154         case DVFS_REGULATOR_MODE_NORMAL:
155                 return "NORMAL";
156         case DVFS_REGULATOR_MODE_IDLE:
157                 return "IDLE";
158         case DVFS_REGULATOR_MODE_STANDBY:
159                 return "STANDBY";
160         default:
161                 return "UNKNOWN";
162         }
163 }
164
165 static int dvfs_regu_mode_convert(unsigned int mode)
166 {
167         switch (mode) {
168         case DVFS_REGULATOR_MODE_FAST:
169                 return REGULATOR_MODE_FAST;
170         case DVFS_REGULATOR_MODE_NORMAL:
171                 return REGULATOR_MODE_NORMAL;
172         case DVFS_REGULATOR_MODE_IDLE:
173                 return REGULATOR_MODE_IDLE;
174         case DVFS_REGULATOR_MODE_STANDBY:
175                 return REGULATOR_MODE_STANDBY;
176         default:
177                 return -EINVAL;
178         }
179 }
180
181 static int dvfs_regu_mode_deconvert(unsigned int mode)
182 {
183         switch (mode) {
184         case REGULATOR_MODE_FAST:
185                 return DVFS_REGULATOR_MODE_FAST;
186         case REGULATOR_MODE_NORMAL:
187                 return DVFS_REGULATOR_MODE_NORMAL;
188         case REGULATOR_MODE_IDLE:
189                 return DVFS_REGULATOR_MODE_IDLE;
190         case REGULATOR_MODE_STANDBY:
191                 return DVFS_REGULATOR_MODE_STANDBY;
192         default:
193                 return -EINVAL;
194         }
195 }
196
197 static struct cpufreq_frequency_table *of_get_regu_mode_table(struct device_node *dev_node)
198 {
199         struct cpufreq_frequency_table *regu_mode_table = NULL;
200         const struct property *prop;
201         const __be32 *val;
202         int nr, i;
203
204         prop = of_find_property(dev_node, "regu-mode-table", NULL);
205         if (!prop)
206                 return NULL;
207         if (!prop->value)
208                 return NULL;
209
210         nr = prop->length / sizeof(u32);
211         if (nr % 2) {
212                 pr_err("%s: Invalid freq list\n", __func__);
213                 return NULL;
214         }
215
216         regu_mode_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
217                              (nr/2+1), GFP_KERNEL);
218         if (!regu_mode_table) {
219                 pr_err("%s: could not allocate regu_mode_table!\n", __func__);
220                 return ERR_PTR(-ENOMEM);
221         }
222
223         val = prop->value;
224
225         for (i=0; i<nr/2; i++){
226                 regu_mode_table[i].frequency = be32_to_cpup(val++) * 1000;
227                 regu_mode_table[i].index = be32_to_cpup(val++);
228         }
229
230         if (regu_mode_table[i-1].frequency != 0) {
231                 pr_err("%s: Last freq of regu_mode_table is not 0!\n", __func__);
232                 kfree(regu_mode_table);
233                 return NULL;
234         }
235
236         regu_mode_table[i].index = 0;
237         regu_mode_table[i].frequency = CPUFREQ_TABLE_END;
238
239         return regu_mode_table;
240 }
241
242 static int dvfs_regu_mode_table_constrain(struct dvfs_node *clk_dvfs_node)
243 {
244         int i, ret;
245         int mode, convert_mode, valid_mode;
246
247         if (!clk_dvfs_node)
248                 return -EINVAL;
249
250         if (!clk_dvfs_node->regu_mode_table)
251                 return -EINVAL;
252
253         if (!clk_dvfs_node->vd)
254                 return -EINVAL;
255
256         if (IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator))
257                 return -EINVAL;
258
259         for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
260                 mode = clk_dvfs_node->regu_mode_table[i].index;
261                 convert_mode = dvfs_regu_mode_convert(mode);
262
263                 ret = regulator_is_supported_mode(clk_dvfs_node->vd->regulator,
264                                                 &convert_mode);
265                 if (ret) {
266                         DVFS_ERR("%s: find mode=%d unsupported\n", __func__,
267                                 mode);
268                         kfree(clk_dvfs_node->regu_mode_table);
269                         clk_dvfs_node->regu_mode_table = NULL;
270                         return ret;
271                 }
272
273                 valid_mode = dvfs_regu_mode_deconvert(convert_mode);
274                 if (valid_mode != mode) {
275                         DVFS_ERR("%s: round mode=%d to valid mode=%d!\n",
276                                 __func__, mode, valid_mode);
277                         clk_dvfs_node->regu_mode_table[i].index = valid_mode;
278                 }
279
280         }
281
282         return 0;
283 }
284
285 static int clk_dvfs_node_get_regu_mode(struct dvfs_node *clk_dvfs_node,
286         unsigned long rate, unsigned int *mode)
287 {
288         int i;
289
290
291         if ((!clk_dvfs_node) || (!clk_dvfs_node->regu_mode_table))
292                 return -EINVAL;
293
294         for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
295                 if (rate >= clk_dvfs_node->regu_mode_table[i].frequency) {
296                         *mode = clk_dvfs_node->regu_mode_table[i].index;
297                         return 0;
298                 }
299         }
300
301         return -EINVAL;
302 }
303
304 static int dvfs_pd_get_newmode_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
305 {
306         unsigned int mode_max = 0;
307
308
309         if (clk_dvfs_node->regu_mode_en && (clk_dvfs_node->regu_mode >= pd->regu_mode)) {
310                 return clk_dvfs_node->regu_mode;
311         }
312
313         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
314                 if (clk_dvfs_node->regu_mode_en)
315                         mode_max = max(mode_max, (clk_dvfs_node->regu_mode));
316         }
317
318         return mode_max;
319 }
320
321 static void dvfs_update_clk_pds_mode(struct dvfs_node *clk_dvfs_node)
322 {
323         struct pd_node *pd;
324
325         if (!clk_dvfs_node)
326                 return;
327
328         pd = clk_dvfs_node->pd;
329         if (!pd)
330                 return;
331
332         pd->regu_mode = dvfs_pd_get_newmode_byclk(pd, clk_dvfs_node);
333 }
334
335 static int dvfs_vd_get_newmode_bypd(struct vd_node *vd)
336 {
337         unsigned int mode_max_vd = 0;
338         struct pd_node *pd;
339
340         if (!vd)
341                 return -EINVAL;
342
343         list_for_each_entry(pd, &vd->pd_list, node) {
344                 mode_max_vd = max(mode_max_vd, pd->regu_mode);
345         }
346
347         return mode_max_vd;
348 }
349
350 static int dvfs_vd_get_newmode_byclk(struct dvfs_node *clk_dvfs_node)
351 {
352         if (!clk_dvfs_node)
353                 return -EINVAL;
354
355         dvfs_update_clk_pds_mode(clk_dvfs_node);
356
357         return  dvfs_vd_get_newmode_bypd(clk_dvfs_node->vd);
358 }
359
360 static int dvfs_regu_set_mode(struct vd_node *vd, unsigned int mode)
361 {
362         int convert_mode;
363         int ret = 0;
364
365
366         if (IS_ERR_OR_NULL(vd)) {
367                 DVFS_ERR("%s: vd_node error\n", __func__);
368                 return -EINVAL;
369         }
370
371         DVFS_DBG("%s: mode=%d(old=%d)\n", __func__, mode, vd->regu_mode);
372
373         convert_mode = dvfs_regu_mode_convert(mode);
374         if (convert_mode < 0) {
375                 DVFS_ERR("%s: mode %d convert error\n", __func__, mode);
376                 return convert_mode;
377         }
378
379         if (!IS_ERR_OR_NULL(vd->regulator)) {
380                 ret = dvfs_regulator_set_mode(vd->regulator, convert_mode);
381                 if (ret < 0) {
382                         DVFS_ERR("%s: %s set mode %d (was %d) error!\n", __func__,
383                                 vd->regulator_name, mode, vd->regu_mode);
384                         return -EAGAIN;
385                 }
386         } else {
387                 DVFS_ERR("%s: invalid regulator\n", __func__);
388                 return -EINVAL;
389         }
390
391         vd->regu_mode = mode;
392
393         return 0;
394 }
395
396 static int dvfs_regu_mode_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
397 {
398         int ret;
399         int mode;
400
401
402         if (!clk_dvfs_node)
403                 return -EINVAL;
404
405         if (!clk_dvfs_node->regu_mode_en)
406                 return 0;
407
408         ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, rate, &mode);
409         if (ret) {
410                 DVFS_ERR("%s: clk(%s) rate %luhz get mode fail\n",
411                         __func__, clk_dvfs_node->name, rate);
412                 return ret;
413         }
414         clk_dvfs_node->regu_mode = mode;
415
416         mode = dvfs_vd_get_newmode_byclk(clk_dvfs_node);
417         if (mode < 0)
418                 return mode;
419
420         ret = dvfs_regu_set_mode(clk_dvfs_node->vd, mode);
421
422         return ret;
423 }
424
425 static void dvfs_volt_up_delay(struct vd_node *vd, int new_volt, int old_volt)
426 {
427         int u_time;
428         
429         if(new_volt <= old_volt)
430                 return;
431         if(vd->volt_time_flag > 0)      
432                 u_time = regulator_set_voltage_time(vd->regulator, old_volt, new_volt);
433         else
434                 u_time = -1;            
435         if(u_time < 0) {// regulator is not suported time,useing default time
436                 DVFS_DBG("%s:vd %s is not suported getting delay time,so we use default\n",
437                                 __func__, vd->name);
438                 u_time = ((new_volt) - (old_volt)) >> 9;
439         }
440         
441         DVFS_DBG("%s: vd %s volt %d to %d delay %d us\n", 
442                 __func__, vd->name, old_volt, new_volt, u_time);
443         
444         if (u_time >= 1000) {
445                 mdelay(u_time / 1000);
446                 udelay(u_time % 1000);
447                 DVFS_WARNING("%s: regulator set vol delay is larger 1ms,old is %d,new is %d\n",
448                         __func__, old_volt, new_volt);
449         } else if (u_time) {
450                 udelay(u_time);
451         }                       
452 }
453
454 static int dvfs_regulator_set_voltage_readback(struct regulator *regulator, int min_uV, int max_uV)
455 {
456         int ret = 0, read_back = 0;
457         
458         ret = dvfs_regulator_set_voltage(regulator, max_uV, max_uV);
459         if (ret < 0) {
460                 DVFS_ERR("%s: now read back to check voltage\n", __func__);
461
462                 /* read back to judge if it is already effect */
463                 mdelay(2);
464                 read_back = dvfs_regulator_get_voltage(regulator);
465                 if (read_back == max_uV) {
466                         DVFS_ERR("%s: set ERROR but already effected, volt=%d\n", __func__, read_back);
467                         ret = 0;
468                 } else {
469                         DVFS_ERR("%s: set ERROR AND NOT effected, volt=%d\n", __func__, read_back);
470                 }
471         }
472         
473         return ret;
474 }
475
476 static int dvfs_scale_volt_direct(struct vd_node *vd_clk, int volt_new)
477 {
478         int ret = 0;
479         
480         DVFS_DBG("%s: volt=%d(old=%d)\n", __func__, volt_new, vd_clk->cur_volt);
481         
482         if (IS_ERR_OR_NULL(vd_clk)) {
483                 DVFS_ERR("%s: vd_node error\n", __func__);
484                 return -EINVAL;
485         }
486
487         if (!IS_ERR_OR_NULL(vd_clk->regulator)) {
488                 ret = dvfs_regulator_set_voltage_readback(vd_clk->regulator, volt_new, volt_new);
489                 dvfs_volt_up_delay(vd_clk,volt_new, vd_clk->cur_volt);
490                 if (ret < 0) {
491                         vd_clk->volt_set_flag = DVFS_SET_VOLT_FAILURE;
492                         DVFS_ERR("%s: %s set voltage up err ret = %d, Vnew = %d(was %d)mV\n",
493                                         __func__, vd_clk->name, ret, volt_new, vd_clk->cur_volt);
494                         return -EAGAIN;
495                 }
496
497         } else {
498                 DVFS_ERR("%s: invalid regulator\n", __func__);
499                 return -EINVAL;
500         }
501
502         vd_clk->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
503         vd_clk->cur_volt = volt_new;
504
505         return 0;
506
507 }
508
509 static int dvfs_reset_volt(struct vd_node *dvfs_vd)
510 {
511         int flag_set_volt_correct = 0;
512         if (!IS_ERR_OR_NULL(dvfs_vd->regulator))
513                 flag_set_volt_correct = dvfs_regulator_get_voltage(dvfs_vd->regulator);
514         else {
515                 DVFS_ERR("%s: invalid regulator\n", __func__);
516                 return -EINVAL;
517         }
518         if (flag_set_volt_correct <= 0) {
519                 DVFS_ERR("%s (vd:%s), try to reload volt ,by it is error again(%d)!!! stop scaling\n",
520                                 __func__, dvfs_vd->name, flag_set_volt_correct);
521                 return -EAGAIN;
522         }
523         dvfs_vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
524         DVFS_WARNING("%s:vd(%s) try to reload volt = %d\n",
525                         __func__, dvfs_vd->name, flag_set_volt_correct);
526
527         /* Reset vd's voltage */
528         dvfs_vd->cur_volt = flag_set_volt_correct;
529
530         return dvfs_vd->cur_volt;
531 }
532
533
534 // for clk enable case to get vd regulator info
535 static void clk_enable_dvfs_regulator_check(struct vd_node *vd)
536 {
537         vd->cur_volt = dvfs_regulator_get_voltage(vd->regulator);
538         if(vd->cur_volt <= 0){
539                 vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
540         }
541         vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
542 }
543
544 static void dvfs_get_vd_regulator_volt_list(struct vd_node *vd)
545 {
546         unsigned int i, selector = dvfs_regulator_count_voltages(vd->regulator);
547         int n = 0, sel_volt = 0;
548         
549         if(selector > VD_VOL_LIST_CNT)
550                 selector = VD_VOL_LIST_CNT;
551
552         for (i = 0; i < selector; i++) {
553                 sel_volt = dvfs_regulator_list_voltage(vd->regulator, i);
554                 if(sel_volt <= 0){      
555                         //DVFS_WARNING("%s: vd(%s) list volt selector=%u, but volt(%d) <=0\n",
556                         //      __func__, vd->name, i, sel_volt);
557                         continue;
558                 }
559                 vd->volt_list[n++] = sel_volt;  
560                 DVFS_DBG("%s: vd(%s) list volt selector=%u, n=%d, volt=%d\n", 
561                         __func__, vd->name, i, n, sel_volt);
562         }
563         
564         vd->n_voltages = n;
565 }
566
567 // >= volt
568 static int vd_regulator_round_volt_max(struct vd_node *vd, int volt)
569 {
570         int sel_volt;
571         int i;
572         
573         for (i = 0; i < vd->n_voltages; i++) {
574                 sel_volt = vd->volt_list[i];
575                 if(sel_volt <= 0){      
576                         DVFS_WARNING("%s: selector=%u, but volt <=0\n", 
577                                 __func__, i);
578                         continue;
579                 }
580                 if(sel_volt >= volt)
581                         return sel_volt;        
582         }
583         return -EINVAL;
584 }
585
586 // >=volt
587 static int vd_regulator_round_volt_min(struct vd_node *vd, int volt)
588 {
589         int sel_volt;
590         int i;
591         
592         for (i = 0; i < vd->n_voltages; i++) {
593                 sel_volt = vd->volt_list[i];
594                 if(sel_volt <= 0){      
595                         DVFS_WARNING("%s: selector=%u, but volt <=0\n", 
596                                 __func__, i);
597                         continue;
598                 }
599                 if(sel_volt > volt){
600                         if(i > 0)
601                                 return vd->volt_list[i-1];
602                         else
603                                 return -EINVAL;
604                 }       
605         }
606         
607         return -EINVAL;
608 }
609
610 // >=volt
611 static int vd_regulator_round_volt(struct vd_node *vd, int volt, int flags)
612 {
613         if(!vd->n_voltages)
614                 return -EINVAL;
615         if(flags == VD_LIST_RELATION_L)
616                 return vd_regulator_round_volt_min(vd, volt);
617         else
618                 return vd_regulator_round_volt_max(vd, volt);   
619 }
620
621 static void dvfs_table_round_volt(struct dvfs_node *clk_dvfs_node)
622 {
623         int i, test_volt;
624
625         if(!clk_dvfs_node->dvfs_table || !clk_dvfs_node->vd || 
626                 IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator))
627                 return;
628
629         for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
630
631                 test_volt = vd_regulator_round_volt(clk_dvfs_node->vd, clk_dvfs_node->dvfs_table[i].index, VD_LIST_RELATION_H);
632                 if(test_volt <= 0)
633                 {       
634                         DVFS_WARNING("%s: clk(%s) round volt(%d) but list <=0\n",
635                                 __func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].index);
636                         break;
637                 }
638                 DVFS_DBG("clk %s:round_volt %d to %d\n",
639                         clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].index, test_volt);
640                 
641                 clk_dvfs_node->dvfs_table[i].index=test_volt;           
642         }
643 }
644
645 static void dvfs_vd_get_regulator_volt_time_info(struct vd_node *vd)
646 {
647         if(vd->volt_time_flag <= 0){// check regulator support get uping vol timer
648                 vd->volt_time_flag = dvfs_regulator_set_voltage_time(vd->regulator, vd->cur_volt, vd->cur_volt+200*1000);
649                 if(vd->volt_time_flag < 0){
650                         DVFS_DBG("%s,vd %s volt_time is no support\n",
651                                 __func__, vd->name);
652                 }
653                 else{
654                         DVFS_DBG("%s,vd %s volt_time is support,up 200mv need delay %d us\n",
655                                 __func__, vd->name, vd->volt_time_flag);
656                 }       
657         }
658 }
659 #if 0
660 static void dvfs_vd_get_regulator_mode_info(struct vd_node *vd)
661 {
662         //REGULATOR_MODE_FAST
663         if(vd->mode_flag <= 0){// check regulator support get uping vol timer{
664                 vd->mode_flag = dvfs_regulator_get_mode(vd->regulator);
665                 if(vd->mode_flag==REGULATOR_MODE_FAST || vd->mode_flag==REGULATOR_MODE_NORMAL
666                         || vd->mode_flag == REGULATOR_MODE_IDLE || vd->mode_flag==REGULATOR_MODE_STANDBY){
667                         
668                         if(dvfs_regulator_set_mode(vd->regulator, vd->mode_flag) < 0){
669                                 vd->mode_flag = 0;// check again
670                         }
671                 }
672                 if(vd->mode_flag > 0){
673                         DVFS_DBG("%s,vd %s mode(now is %d) support\n",
674                                 __func__, vd->name, vd->mode_flag);
675                 }
676                 else{
677                         DVFS_DBG("%s,vd %s mode is not support now check\n",
678                                 __func__, vd->name);
679                 }
680         }
681 }
682 #endif
683
684 struct regulator *dvfs_get_regulator(char *regulator_name) 
685 {
686         struct vd_node *vd;
687
688         mutex_lock(&rk_dvfs_mutex);
689         list_for_each_entry(vd, &rk_dvfs_tree, node) {
690                 if (strcmp(regulator_name, vd->regulator_name) == 0) {
691                         mutex_unlock(&rk_dvfs_mutex);
692                         return vd->regulator;
693                 }
694         }
695         mutex_unlock(&rk_dvfs_mutex);
696         return NULL;
697 }
698
699 static int dvfs_get_rate_range(struct dvfs_node *clk_dvfs_node)
700 {
701         struct cpufreq_frequency_table *table;
702         int i = 0;
703
704         if (!clk_dvfs_node)
705                 return -EINVAL;
706
707         clk_dvfs_node->min_rate = 0;
708         clk_dvfs_node->max_rate = 0;
709
710         table = clk_dvfs_node->dvfs_table;
711         for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
712                 clk_dvfs_node->max_rate = table[i].frequency / 1000 * 1000 * 1000;
713                 if (i == 0)
714                         clk_dvfs_node->min_rate = table[i].frequency / 1000 * 1000 * 1000;
715         }
716
717         DVFS_DBG("%s: clk %s, limit rate [min, max] = [%u, %u]\n",
718                         __func__, clk_dvfs_node->name, clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
719
720         return 0;
721 }
722
723 static void dvfs_table_round_clk_rate(struct dvfs_node  *clk_dvfs_node)
724 {
725         int i, rate, temp_rate, flags;
726         
727         if(!clk_dvfs_node || !clk_dvfs_node->dvfs_table || !clk_dvfs_node->clk)
728                 return;
729
730         for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
731                 //ddr rate = real rate+flags
732                 flags = clk_dvfs_node->dvfs_table[i].frequency%1000;
733                 rate = (clk_dvfs_node->dvfs_table[i].frequency/1000)*1000;
734                 temp_rate = __clk_round_rate(clk_dvfs_node->clk, rate*1000);
735                 if(temp_rate <= 0){     
736                         DVFS_WARNING("%s: clk(%s) rate %d round return %d\n",
737                                 __func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
738                         continue;
739                 }
740                 
741                 /* Set rate unit as MHZ */
742                 if (temp_rate % MHz != 0)
743                         temp_rate = (temp_rate / MHz + 1) * MHz;
744
745                 temp_rate = (temp_rate / 1000) + flags;
746                 
747                 DVFS_DBG("clk %s round_clk_rate %d to %d\n",
748                         clk_dvfs_node->name,clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
749                 
750                 clk_dvfs_node->dvfs_table[i].frequency = temp_rate;             
751         }
752 }
753
754 static int clk_dvfs_node_get_ref_volt(struct dvfs_node *clk_dvfs_node, int rate_khz,
755                 struct cpufreq_frequency_table *clk_fv)
756 {
757         int i = 0;
758         
759         if (rate_khz == 0 || !clk_dvfs_node || !clk_dvfs_node->dvfs_table) {
760                 /* since no need */
761                 return -EINVAL;
762         }
763         clk_fv->frequency = rate_khz;
764         clk_fv->index = 0;
765
766         for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
767                 if (clk_dvfs_node->dvfs_table[i].frequency >= rate_khz) {
768                         clk_fv->frequency = clk_dvfs_node->dvfs_table[i].frequency;
769                         clk_fv->index = clk_dvfs_node->dvfs_table[i].index;
770                          //printk("%s,%s rate=%ukhz(vol=%d)\n",__func__,clk_dvfs_node->name,
771                          //clk_fv->frequency, clk_fv->index);
772                         return 0;
773                 }
774         }
775         clk_fv->frequency = 0;
776         clk_fv->index = 0;
777         //DVFS_DBG("%s get corresponding voltage error! out of bound\n", clk_dvfs_node->name);
778         return -EINVAL;
779 }
780
781 static int dvfs_pd_get_newvolt_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
782 {
783         int volt_max = 0;
784
785         if (clk_dvfs_node->enable_count && (clk_dvfs_node->set_volt >= pd->cur_volt)) {
786                 return clk_dvfs_node->set_volt;
787         }
788
789         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
790                 if (clk_dvfs_node->enable_count)
791                         volt_max = max(volt_max, clk_dvfs_node->set_volt);
792         }
793         return volt_max;
794 }
795
796 static void dvfs_update_clk_pds_volt(struct dvfs_node *clk_dvfs_node)
797 {
798         struct pd_node *pd;
799         
800         if (!clk_dvfs_node)
801                 return;
802         
803         pd = clk_dvfs_node->pd;
804         if (!pd)
805                 return;
806         
807         pd->cur_volt = dvfs_pd_get_newvolt_byclk(pd, clk_dvfs_node);
808 }
809
810 static int dvfs_vd_get_newvolt_bypd(struct vd_node *vd)
811 {
812         int volt_max_vd = 0;
813         struct pd_node *pd;
814
815         if (!vd)
816                 return -EINVAL;
817         
818         list_for_each_entry(pd, &vd->pd_list, node) {
819                 volt_max_vd = max(volt_max_vd, pd->cur_volt);
820         }
821
822         return volt_max_vd;
823 }
824
825 static int dvfs_vd_get_newvolt_byclk(struct dvfs_node *clk_dvfs_node)
826 {
827         if (!clk_dvfs_node)
828                 return -EINVAL;
829
830         dvfs_update_clk_pds_volt(clk_dvfs_node);
831         return  dvfs_vd_get_newvolt_bypd(clk_dvfs_node->vd);
832 }
833
834 #if 0
835 static void dvfs_temp_limit_work_func(struct work_struct *work)
836 {
837         unsigned long delay = HZ / 10; // 100ms
838         struct vd_node *vd;
839         struct pd_node *pd;
840         struct dvfs_node *clk_dvfs_node;
841
842         queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
843
844         mutex_lock(&rk_dvfs_mutex);
845         list_for_each_entry(vd, &rk_dvfs_tree, node) {
846                 mutex_lock(&vd->mutex);
847                 list_for_each_entry(pd, &vd->pd_list, node) {
848                         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
849                                 if (clk_dvfs_node->temp_limit_table) {
850                                         clk_dvfs_node->temp = rockchip_tsadc_get_temp(clk_dvfs_node->temp_channel);
851                                         clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
852                                 }
853                         }
854                 }
855                 mutex_unlock(&vd->mutex);
856         }
857         mutex_unlock(&rk_dvfs_mutex);
858 }
859 #endif
860
861 static void dvfs_virt_temp_limit_work_func(void)
862 {
863         const struct cpufreq_frequency_table *limits_table = NULL;
864         unsigned int new_temp_limit_rate = -1;
865         unsigned int nr_cpus = num_online_cpus();
866         static bool in_perf;
867         int i;
868
869         if (!soc_is_rk3126() && !soc_is_rk3128())
870                 return;
871
872         if (rockchip_get_system_status() & SYS_STATUS_PERFORMANCE) {
873                 in_perf = true;
874         } else if (in_perf) {
875                 in_perf = false;
876         } else {
877                 static u64 last_time_in_idle;
878                 static u64 last_time_in_idle_timestamp;
879                 u64 time_in_idle = 0, now;
880                 u32 delta_idle;
881                 u32 delta_time;
882                 unsigned cpu, busy_cpus;
883
884                 for_each_online_cpu(cpu) {
885                         time_in_idle += get_cpu_idle_time_us(cpu, &now);
886                 }
887                 delta_time = now - last_time_in_idle_timestamp;
888                 delta_idle = time_in_idle - last_time_in_idle;
889                 last_time_in_idle = time_in_idle;
890                 last_time_in_idle_timestamp = now;
891                 delta_idle += delta_time >> 4; /* +6.25% */
892                 if (delta_idle > (nr_cpus - 1)
893                     * delta_time && delta_idle < (nr_cpus + 1) * delta_time)
894                         busy_cpus = 1;
895                 else if (delta_idle > (nr_cpus - 2) * delta_time)
896                         busy_cpus = 2;
897                 else if (delta_idle > (nr_cpus - 3) * delta_time)
898                         busy_cpus = 3;
899                 else
900                         busy_cpus = 4;
901
902                 limits_table = clk_cpu_dvfs_node->virt_temp_limit_table[busy_cpus-1];
903                 DVFS_DBG("delta time %6u us idle %6u us %u cpus select table %d\n",
904                          delta_time, delta_idle, nr_cpus, busy_cpus);
905         }
906
907         if (limits_table) {
908                 new_temp_limit_rate = limits_table[0].frequency;
909                 for (i = 0; limits_table[i].frequency != CPUFREQ_TABLE_END; i++) {
910                         if (target_temp >= limits_table[i].index)
911                                 new_temp_limit_rate = limits_table[i].frequency;
912                 }
913         }
914
915         if (clk_cpu_dvfs_node->temp_limit_rate != new_temp_limit_rate) {
916                 clk_cpu_dvfs_node->temp_limit_rate = new_temp_limit_rate;
917                 dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
918                 DVFS_DBG("temp_limit_rate:%d\n", (int)clk_cpu_dvfs_node->temp_limit_rate);
919         }
920 }
921
922 static void dvfs_temp_limit_work_func(struct work_struct *work)
923 {
924         int temp=0, delta_temp=0;
925         unsigned long delay = HZ/10;
926         unsigned long arm_rate_step=0;
927         static int old_temp=0;
928         int i;
929
930         queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
931
932         temp = rockchip_tsadc_get_temp(1);
933
934         if (temp == INVALID_TEMP)
935                 return dvfs_virt_temp_limit_work_func();
936
937         //debounce
938         delta_temp = (old_temp>temp) ? (old_temp-temp) : (temp-old_temp);
939         if (delta_temp <= 1)
940                 return;
941
942         if (ROCKCHIP_PM_POLICY_PERFORMANCE == rockchip_pm_get_policy()) {
943                 if (!clk_cpu_dvfs_node->per_temp_limit_table) {
944                         return;
945                 }
946
947                 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
948                 for (i=0; clk_cpu_dvfs_node->per_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
949                         if (temp > clk_cpu_dvfs_node->per_temp_limit_table[i].index) {
950                                 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->per_temp_limit_table[i].frequency;
951                         }
952                 }
953                 dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
954         } else if (ROCKCHIP_PM_POLICY_NORMAL == rockchip_pm_get_policy()){
955                 if (!clk_cpu_dvfs_node->nor_temp_limit_table) {
956                         return;
957                 }
958
959                 if (temp > target_temp) {
960                         if (temp > old_temp) {
961                                 delta_temp = temp - target_temp;
962                                 for (i=0; clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
963                                         if (delta_temp > clk_cpu_dvfs_node->nor_temp_limit_table[i].index) {
964                                                 arm_rate_step = clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency;
965                                         }
966                                 }
967                                 if (arm_rate_step && (clk_cpu_dvfs_node->temp_limit_rate > arm_rate_step)) {
968                                         clk_cpu_dvfs_node->temp_limit_rate -= arm_rate_step;
969                                         dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
970                                 }
971                         }
972                 } else {
973                         if (clk_cpu_dvfs_node->temp_limit_rate < clk_cpu_dvfs_node->max_rate) {
974                                 delta_temp = target_temp - temp;
975                                 for (i=0; clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
976                                         if (delta_temp > clk_cpu_dvfs_node->nor_temp_limit_table[i].index) {
977                                                 arm_rate_step = clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency;
978                                         }
979                                 }
980
981                                 if (arm_rate_step) {
982                                         clk_cpu_dvfs_node->temp_limit_rate += arm_rate_step;
983                                         if (clk_cpu_dvfs_node->temp_limit_rate > clk_cpu_dvfs_node->max_rate) {
984                                                 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
985                                         }
986                                         dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
987                                 }
988                         }
989                 }
990         }
991
992         DVFS_DBG("cur temp: %d, temp_limit_core_rate: %lu\n", temp, clk_cpu_dvfs_node->temp_limit_rate);
993
994         old_temp = temp;
995 }
996 static DECLARE_DELAYED_WORK(dvfs_temp_limit_work, dvfs_temp_limit_work_func);
997
998
999 int dvfs_clk_enable_limit(struct dvfs_node *clk_dvfs_node, unsigned int min_rate, unsigned int max_rate)
1000 {
1001         u32 rate = 0, ret = 0;
1002
1003         if (!clk_dvfs_node || (min_rate > max_rate))
1004                 return -EINVAL;
1005         
1006         if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
1007                 mutex_lock(&clk_dvfs_node->vd->mutex);
1008                 
1009                 /* To reset clk_dvfs_node->min_rate/max_rate */
1010                 dvfs_get_rate_range(clk_dvfs_node);
1011                 clk_dvfs_node->freq_limit_en = 1;
1012
1013                 if ((min_rate >= clk_dvfs_node->min_rate) && (min_rate <= clk_dvfs_node->max_rate)) {
1014                         clk_dvfs_node->min_rate = min_rate;
1015                 }
1016                 
1017                 if ((max_rate >= clk_dvfs_node->min_rate) && (max_rate <= clk_dvfs_node->max_rate)) {
1018                         clk_dvfs_node->max_rate = max_rate;
1019                 }
1020
1021                 if (clk_dvfs_node->last_set_rate == 0)
1022                         rate = __clk_get_rate(clk_dvfs_node->clk);
1023                 else
1024                         rate = clk_dvfs_node->last_set_rate;
1025                 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
1026
1027                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1028
1029         }
1030
1031         DVFS_DBG("%s:clk(%s) last_set_rate=%u; [min_rate, max_rate]=[%u, %u]\n",
1032                         __func__, __clk_get_name(clk_dvfs_node->clk), clk_dvfs_node->last_set_rate, 
1033                         clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
1034
1035         return 0;
1036 }
1037 EXPORT_SYMBOL(dvfs_clk_enable_limit);
1038
1039 int dvfs_clk_disable_limit(struct dvfs_node *clk_dvfs_node)
1040 {
1041         u32 ret = 0;
1042
1043         if (!clk_dvfs_node)
1044                 return -EINVAL;
1045         
1046         if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
1047                 mutex_lock(&clk_dvfs_node->vd->mutex);
1048                 
1049                 /* To reset clk_dvfs_node->min_rate/max_rate */
1050                 dvfs_get_rate_range(clk_dvfs_node);
1051                 clk_dvfs_node->freq_limit_en = 0;
1052                 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
1053
1054                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1055         }
1056
1057         DVFS_DBG("%s: clk(%s) last_set_rate=%u; [min_rate, max_rate]=[%u, %u]\n",
1058                         __func__, __clk_get_name(clk_dvfs_node->clk), clk_dvfs_node->last_set_rate, clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
1059         return 0;
1060 }
1061 EXPORT_SYMBOL(dvfs_clk_disable_limit);
1062
1063 void dvfs_disable_temp_limit(void) {
1064         temp_limit_enable = 0;
1065         cancel_delayed_work_sync(&dvfs_temp_limit_work);
1066 }
1067
1068 int dvfs_clk_get_limit(struct dvfs_node *clk_dvfs_node, unsigned int *min_rate, unsigned int *max_rate) 
1069 {
1070         int freq_limit_en;
1071
1072         if (!clk_dvfs_node)
1073                 return -EINVAL;
1074
1075         mutex_lock(&clk_dvfs_node->vd->mutex);
1076
1077         *min_rate = clk_dvfs_node->min_rate;
1078         *max_rate = clk_dvfs_node->max_rate;
1079         freq_limit_en = clk_dvfs_node->freq_limit_en;
1080
1081         mutex_unlock(&clk_dvfs_node->vd->mutex);
1082
1083         return freq_limit_en;
1084 }
1085 EXPORT_SYMBOL(dvfs_clk_get_limit);
1086
1087 int dvfs_clk_register_set_rate_callback(struct dvfs_node *clk_dvfs_node, clk_set_rate_callback clk_dvfs_target)
1088 {
1089         if (!clk_dvfs_node)
1090                 return -EINVAL;
1091                         
1092         mutex_lock(&clk_dvfs_node->vd->mutex);
1093         clk_dvfs_node->clk_dvfs_target = clk_dvfs_target;
1094         mutex_unlock(&clk_dvfs_node->vd->mutex);
1095
1096         return 0;
1097 }
1098 EXPORT_SYMBOL(dvfs_clk_register_set_rate_callback);
1099
1100 struct cpufreq_frequency_table *dvfs_get_freq_volt_table(struct dvfs_node *clk_dvfs_node) 
1101 {
1102         struct cpufreq_frequency_table *table;
1103
1104         if (!clk_dvfs_node)
1105                 return NULL;
1106
1107         mutex_lock(&clk_dvfs_node->vd->mutex);
1108         table = clk_dvfs_node->dvfs_table;
1109         mutex_unlock(&clk_dvfs_node->vd->mutex);
1110         
1111         return table;
1112 }
1113 EXPORT_SYMBOL(dvfs_get_freq_volt_table);
1114
1115 int dvfs_set_freq_volt_table(struct dvfs_node *clk_dvfs_node, struct cpufreq_frequency_table *table)
1116 {
1117         if (!clk_dvfs_node)
1118                 return -EINVAL;
1119
1120         if (IS_ERR_OR_NULL(table)){
1121                 DVFS_ERR("%s:invalid table!\n", __func__);
1122                 return -EINVAL;
1123         }
1124         
1125         mutex_lock(&clk_dvfs_node->vd->mutex);
1126         clk_dvfs_node->dvfs_table = table;
1127         dvfs_get_rate_range(clk_dvfs_node);
1128         dvfs_table_round_clk_rate(clk_dvfs_node);
1129         dvfs_table_round_volt(clk_dvfs_node);
1130         mutex_unlock(&clk_dvfs_node->vd->mutex);
1131
1132         return 0;
1133 }
1134 EXPORT_SYMBOL(dvfs_set_freq_volt_table);
1135
1136 int clk_enable_dvfs(struct dvfs_node *clk_dvfs_node)
1137 {
1138         struct cpufreq_frequency_table clk_fv;
1139         int volt_new;
1140         unsigned int mode;
1141         int ret;
1142
1143
1144         if (!clk_dvfs_node)
1145                 return -EINVAL;
1146         
1147         DVFS_DBG("%s: dvfs clk(%s) enable dvfs!\n", 
1148                 __func__, __clk_get_name(clk_dvfs_node->clk));
1149
1150         if (!clk_dvfs_node->vd) {
1151                 DVFS_ERR("%s: dvfs node(%s) has no vd node!\n", 
1152                         __func__, clk_dvfs_node->name);
1153                 return -EINVAL;
1154         }
1155         mutex_lock(&clk_dvfs_node->vd->mutex);
1156         if (clk_dvfs_node->enable_count == 0) {
1157                 if (IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator)) {
1158                         if (clk_dvfs_node->vd->regulator_name)
1159                                 clk_dvfs_node->vd->regulator = dvfs_regulator_get(NULL, clk_dvfs_node->vd->regulator_name);
1160                         if (!IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator)) {
1161                                 DVFS_DBG("%s: vd(%s) get regulator(%s) ok\n",
1162                                         __func__, clk_dvfs_node->vd->name, clk_dvfs_node->vd->regulator_name);
1163                                 clk_enable_dvfs_regulator_check(clk_dvfs_node->vd);
1164                                 dvfs_get_vd_regulator_volt_list(clk_dvfs_node->vd);
1165                                 dvfs_vd_get_regulator_volt_time_info(clk_dvfs_node->vd);
1166                         } else {
1167                                 clk_dvfs_node->enable_count = 0;
1168                                 DVFS_ERR("%s: vd(%s) can't get regulator(%s)!\n", 
1169                                         __func__, clk_dvfs_node->vd->name, clk_dvfs_node->vd->regulator_name);
1170                                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1171                                 return -ENXIO;
1172                         }
1173                 } else {
1174                         clk_enable_dvfs_regulator_check(clk_dvfs_node->vd);
1175                 }
1176                 
1177                 DVFS_DBG("%s: vd(%s) cur volt=%d\n",
1178                         __func__, clk_dvfs_node->name, clk_dvfs_node->vd->cur_volt);
1179
1180                 dvfs_table_round_clk_rate(clk_dvfs_node);
1181                 dvfs_get_rate_range(clk_dvfs_node);
1182                 clk_dvfs_node->freq_limit_en = 1;
1183                 dvfs_table_round_volt(clk_dvfs_node);
1184                 clk_dvfs_node->set_freq = clk_dvfs_node_get_rate_kz(clk_dvfs_node->clk);
1185                 clk_dvfs_node->last_set_rate = clk_dvfs_node->set_freq*1000;
1186                 
1187                 DVFS_DBG("%s: %s get freq %u!\n", 
1188                         __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1189
1190                 if (clk_dvfs_node_get_ref_volt(clk_dvfs_node, clk_dvfs_node->set_freq, &clk_fv)) {
1191                         if (clk_dvfs_node->dvfs_table[0].frequency == CPUFREQ_TABLE_END) {
1192                                 DVFS_ERR("%s: table empty\n", __func__);
1193                                 clk_dvfs_node->enable_count = 0;
1194                                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1195                                 return -EINVAL;
1196                         } else {
1197                                 DVFS_WARNING("%s: clk(%s) freq table all value are smaller than default(%d), use default, just enable dvfs\n", 
1198                                         __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1199                                 clk_dvfs_node->enable_count++;
1200                                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1201                                 return 0;
1202                         }
1203                 }
1204                 clk_dvfs_node->enable_count++;
1205                 clk_dvfs_node->set_volt = clk_fv.index;
1206                 volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1207                 DVFS_DBG("%s: %s, freq %u(ref vol %u)\n",
1208                         __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt);
1209 #if 0
1210                 if (clk_dvfs_node->dvfs_nb) {
1211                         // must unregister when clk disable
1212                         clk_notifier_register(clk, clk_dvfs_node->dvfs_nb);
1213                 }
1214 #endif
1215                 if(clk_dvfs_node->vd->cur_volt != volt_new) {
1216                         ret = dvfs_regulator_set_voltage_readback(clk_dvfs_node->vd->regulator, volt_new, volt_new);
1217                         dvfs_volt_up_delay(clk_dvfs_node->vd,volt_new, clk_dvfs_node->vd->cur_volt);
1218                         if (ret < 0) {
1219                                 clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
1220                                 clk_dvfs_node->enable_count = 0;
1221                                 DVFS_ERR("dvfs enable clk %s,set volt error \n", clk_dvfs_node->name);
1222                                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1223                                 return -EAGAIN;
1224                         }
1225                         clk_dvfs_node->vd->cur_volt = volt_new;
1226                         clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
1227                 }
1228
1229         } else {
1230                 DVFS_DBG("%s: dvfs already enable clk enable = %d!\n",
1231                         __func__, clk_dvfs_node->enable_count);
1232                 clk_dvfs_node->enable_count++;
1233         }
1234
1235         if (clk_dvfs_node->regu_mode_en) {
1236                 ret = dvfs_regu_mode_table_constrain(clk_dvfs_node);
1237                 if (ret) {
1238                         DVFS_ERR("%s: clk(%s) regu_mode_table is unvalid, set regu_mode_en=0!\n",
1239                                         __func__, clk_dvfs_node->name);
1240                         clk_dvfs_node->regu_mode_en = 0;
1241                         mutex_unlock(&clk_dvfs_node->vd->mutex);
1242                         return ret;
1243                 }
1244
1245                 ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, clk_dvfs_node->set_freq*1000, &mode);
1246                 if (ret < 0) {
1247                         DVFS_ERR("%s: clk(%s) rate %dKhz get regu_mode fail\n",
1248                                         __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1249                         mutex_unlock(&clk_dvfs_node->vd->mutex);
1250                         return ret;
1251                 } else
1252                         clk_dvfs_node->regu_mode = mode;
1253
1254                 dvfs_update_clk_pds_mode(clk_dvfs_node);
1255         }
1256
1257         mutex_unlock(&clk_dvfs_node->vd->mutex);
1258         
1259         return 0;
1260 }
1261 EXPORT_SYMBOL(clk_enable_dvfs);
1262
1263 int clk_disable_dvfs(struct dvfs_node *clk_dvfs_node)
1264 {
1265         int volt_new;
1266
1267         if (!clk_dvfs_node)
1268                 return -EINVAL;
1269
1270         DVFS_DBG("%s:dvfs clk(%s) disable dvfs!\n", 
1271                 __func__, __clk_get_name(clk_dvfs_node->clk));
1272
1273         mutex_lock(&clk_dvfs_node->vd->mutex);
1274         if (!clk_dvfs_node->enable_count) {
1275                 DVFS_WARNING("%s:clk(%s) is already closed!\n", 
1276                         __func__, __clk_get_name(clk_dvfs_node->clk));
1277                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1278                 return 0;
1279         } else {
1280                 clk_dvfs_node->enable_count--;
1281                 if (0 == clk_dvfs_node->enable_count) {
1282                         DVFS_DBG("%s:dvfs clk(%s) disable dvfs ok!\n",
1283                                 __func__, __clk_get_name(clk_dvfs_node->clk));
1284                         volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1285                         dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1286
1287 #if 0
1288                         clk_notifier_unregister(clk, clk_dvfs_node->dvfs_nb);
1289                         DVFS_DBG("clk unregister nb!\n");
1290 #endif
1291                 }
1292         }
1293         mutex_unlock(&clk_dvfs_node->vd->mutex);
1294         return 0;
1295 }
1296 EXPORT_SYMBOL(clk_disable_dvfs);
1297
1298 static unsigned long dvfs_get_limit_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1299 {
1300         unsigned long limit_rate;
1301
1302         limit_rate = rate;
1303         if (clk_dvfs_node->freq_limit_en) {
1304                 //dvfs table limit
1305                 if (rate < clk_dvfs_node->min_rate) {
1306                         limit_rate = clk_dvfs_node->min_rate;
1307                 } else if (rate > clk_dvfs_node->max_rate) {
1308                         limit_rate = clk_dvfs_node->max_rate;
1309                 }
1310                 if (temp_limit_enable) {
1311                         if (limit_rate > clk_dvfs_node->temp_limit_rate) {
1312                                 limit_rate = clk_dvfs_node->temp_limit_rate;
1313                         }
1314                 }
1315         }
1316
1317         DVFS_DBG("%s: rate:%ld, limit_rate:%ld,\n", __func__, rate, limit_rate);
1318
1319         return limit_rate;
1320 }
1321
1322 static int dvfs_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1323 {
1324         struct cpufreq_frequency_table clk_fv;
1325         unsigned long old_rate = 0, new_rate = 0, volt_new = 0, clk_volt_store = 0;
1326         struct clk *clk = clk_dvfs_node->clk;
1327         int ret;
1328
1329         if (!clk)
1330                 return -EINVAL;
1331
1332         if (!clk_dvfs_node->enable_count){
1333                 DVFS_WARNING("%s:dvfs(%s) is disable\n", 
1334                         __func__, clk_dvfs_node->name);
1335                 return 0;
1336         }
1337         
1338         if (clk_dvfs_node->vd->volt_set_flag == DVFS_SET_VOLT_FAILURE) {
1339                 /* It means the last time set voltage error */
1340                 ret = dvfs_reset_volt(clk_dvfs_node->vd);
1341                 if (ret < 0) {
1342                         return -EAGAIN;
1343                 }
1344         }
1345
1346         rate = dvfs_get_limit_rate(clk_dvfs_node, rate);
1347         new_rate = __clk_round_rate(clk, rate);
1348         old_rate = __clk_get_rate(clk);
1349         if (new_rate == old_rate)
1350                 return 0;
1351
1352         DVFS_DBG("enter %s: clk(%s) new_rate = %lu Hz, old_rate =  %lu Hz\n", 
1353                 __func__, clk_dvfs_node->name, rate, old_rate); 
1354
1355         /* find the clk corresponding voltage */
1356         ret = clk_dvfs_node_get_ref_volt(clk_dvfs_node, new_rate / 1000, &clk_fv);
1357         if (ret) {
1358                 DVFS_ERR("%s:dvfs clk(%s) rate %luhz is not support\n",
1359                         __func__, clk_dvfs_node->name, new_rate);
1360                 return ret;
1361         }
1362         clk_volt_store = clk_dvfs_node->set_volt;
1363         clk_dvfs_node->set_volt = clk_fv.index;
1364         volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1365         DVFS_DBG("%s:%s new rate=%lu(was=%lu),new volt=%lu,(was=%d)\n",
1366                 __func__, clk_dvfs_node->name, new_rate, old_rate, volt_new,clk_dvfs_node->vd->cur_volt);
1367
1368
1369         /* if up the rate */
1370         if (new_rate > old_rate) {
1371                 ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
1372                 if (ret)
1373                         DVFS_ERR("%s: dvfs clk(%s) rate %luhz set mode err\n",
1374                                 __func__, clk_dvfs_node->name, new_rate);
1375
1376                 ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1377                 if (ret)
1378                         goto fail_roll_back;
1379         }
1380
1381         /* scale rate */
1382         if (clk_dvfs_node->clk_dvfs_target) {
1383                 ret = clk_dvfs_node->clk_dvfs_target(clk, rate);
1384         } else {
1385                 ret = clk_set_rate(clk, rate);
1386         }
1387
1388         if (ret) {
1389                 DVFS_ERR("%s:clk(%s) set rate err\n", 
1390                         __func__, __clk_get_name(clk));
1391                 goto fail_roll_back;
1392         }
1393         clk_dvfs_node->set_freq = new_rate / 1000;
1394
1395         DVFS_DBG("%s:dvfs clk(%s) set rate %lu ok\n", 
1396                 __func__, clk_dvfs_node->name, __clk_get_rate(clk));
1397
1398         /* if down the rate */
1399         if (new_rate < old_rate) {
1400                 ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1401                 if (ret)
1402                         goto out;
1403
1404                 ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
1405                 if (ret)
1406                         DVFS_ERR("%s:dvfs clk(%s) rate %luhz set mode err\n",
1407                         __func__, clk_dvfs_node->name, new_rate);
1408         }
1409
1410         return 0;
1411 fail_roll_back:
1412         clk_dvfs_node->set_volt = clk_volt_store;
1413 out:
1414         return ret;
1415 }
1416
1417 unsigned long dvfs_clk_round_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1418 {
1419         return __clk_round_rate(clk_dvfs_node->clk, rate);
1420 }
1421 EXPORT_SYMBOL_GPL(dvfs_clk_round_rate);
1422
1423 unsigned long dvfs_clk_get_rate(struct dvfs_node *clk_dvfs_node)
1424 {
1425         return __clk_get_rate(clk_dvfs_node->clk);
1426 }
1427 EXPORT_SYMBOL_GPL(dvfs_clk_get_rate);
1428
1429 unsigned long dvfs_clk_get_last_set_rate(struct dvfs_node *clk_dvfs_node)
1430 {
1431         unsigned long last_set_rate;
1432
1433         mutex_lock(&clk_dvfs_node->vd->mutex);
1434         last_set_rate = clk_dvfs_node->last_set_rate;
1435         mutex_unlock(&clk_dvfs_node->vd->mutex);
1436
1437         return last_set_rate;
1438 }
1439 EXPORT_SYMBOL_GPL(dvfs_clk_get_last_set_rate);
1440
1441
1442 int dvfs_clk_enable(struct dvfs_node *clk_dvfs_node)
1443 {
1444         return clk_enable(clk_dvfs_node->clk);
1445 }
1446 EXPORT_SYMBOL_GPL(dvfs_clk_enable);
1447
1448 void dvfs_clk_disable(struct dvfs_node *clk_dvfs_node)
1449 {
1450         return clk_disable(clk_dvfs_node->clk);
1451 }
1452 EXPORT_SYMBOL_GPL(dvfs_clk_disable);
1453
1454 struct dvfs_node *clk_get_dvfs_node(char *clk_name)
1455 {
1456         struct vd_node *vd;
1457         struct pd_node *pd;
1458         struct dvfs_node *clk_dvfs_node;
1459
1460         mutex_lock(&rk_dvfs_mutex);
1461         list_for_each_entry(vd, &rk_dvfs_tree, node) {
1462                 mutex_lock(&vd->mutex);
1463                 list_for_each_entry(pd, &vd->pd_list, node) {
1464                         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
1465                                 if (0 == strcmp(clk_dvfs_node->name, clk_name)) {
1466                                         mutex_unlock(&vd->mutex);
1467                                         mutex_unlock(&rk_dvfs_mutex);
1468                                         return clk_dvfs_node;
1469                                 }
1470                         }
1471                 }
1472                 mutex_unlock(&vd->mutex);
1473         }
1474         mutex_unlock(&rk_dvfs_mutex);
1475         
1476         return NULL;    
1477 }
1478 EXPORT_SYMBOL_GPL(clk_get_dvfs_node);
1479
1480 void clk_put_dvfs_node(struct dvfs_node *clk_dvfs_node)
1481 {
1482         return;
1483 }
1484 EXPORT_SYMBOL_GPL(clk_put_dvfs_node);
1485
1486 int dvfs_clk_prepare_enable(struct dvfs_node *clk_dvfs_node)
1487 {
1488         return clk_prepare_enable(clk_dvfs_node->clk);
1489 }
1490 EXPORT_SYMBOL_GPL(dvfs_clk_prepare_enable);
1491
1492
1493 void dvfs_clk_disable_unprepare(struct dvfs_node *clk_dvfs_node)
1494 {
1495         clk_disable_unprepare(clk_dvfs_node->clk);
1496 }
1497 EXPORT_SYMBOL_GPL(dvfs_clk_disable_unprepare);
1498
1499 int dvfs_clk_set_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1500 {
1501         int ret = -EINVAL;
1502         
1503         if (!clk_dvfs_node)
1504                 return -EINVAL;
1505         
1506         DVFS_DBG("%s:dvfs node(%s) set rate(%lu)\n", 
1507                 __func__, clk_dvfs_node->name, rate);
1508         
1509         #if 0 // judge by reference func in rk
1510         if (dvfs_support_clk_set_rate(dvfs_info)==false) {
1511                 DVFS_ERR("dvfs func:%s is not support!\n", __func__);
1512                 return ret;
1513         }
1514         #endif
1515
1516         if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target) {
1517                 mutex_lock(&clk_dvfs_node->vd->mutex);
1518                 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
1519                 clk_dvfs_node->last_set_rate = rate;
1520                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1521         } else {
1522                 DVFS_ERR("%s:dvfs node(%s) has no vd node or target callback!\n", 
1523                         __func__, clk_dvfs_node->name);
1524         }
1525                 
1526         return ret;     
1527 }
1528 EXPORT_SYMBOL_GPL(dvfs_clk_set_rate);
1529
1530
1531 int rk_regist_vd(struct vd_node *vd)
1532 {
1533         if (!vd)
1534                 return -EINVAL;
1535
1536         vd->mode_flag=0;
1537         vd->volt_time_flag=0;
1538         vd->n_voltages=0;
1539         INIT_LIST_HEAD(&vd->pd_list);
1540         mutex_lock(&rk_dvfs_mutex);
1541         list_add(&vd->node, &rk_dvfs_tree);
1542         mutex_unlock(&rk_dvfs_mutex);
1543
1544         return 0;
1545 }
1546 EXPORT_SYMBOL_GPL(rk_regist_vd);
1547
1548 int rk_regist_pd(struct pd_node *pd)
1549 {
1550         struct vd_node  *vd;
1551
1552         if (!pd)
1553                 return -EINVAL;
1554
1555         vd = pd->vd;
1556         if (!vd)
1557                 return -EINVAL;
1558
1559         INIT_LIST_HEAD(&pd->clk_list);
1560         mutex_lock(&vd->mutex);
1561         list_add(&pd->node, &vd->pd_list);
1562         mutex_unlock(&vd->mutex);
1563         
1564         return 0;
1565 }
1566 EXPORT_SYMBOL_GPL(rk_regist_pd);
1567
1568 int rk_regist_clk(struct dvfs_node *clk_dvfs_node)
1569 {
1570         struct vd_node  *vd;
1571         struct pd_node  *pd;
1572
1573         if (!clk_dvfs_node)
1574                 return -EINVAL;
1575
1576         vd = clk_dvfs_node->vd;
1577         pd = clk_dvfs_node->pd;
1578         if (!vd || !pd)
1579                 return -EINVAL;
1580
1581         mutex_lock(&vd->mutex);
1582         list_add(&clk_dvfs_node->node, &pd->clk_list);
1583         mutex_unlock(&vd->mutex);
1584         
1585         return 0;
1586 }
1587 EXPORT_SYMBOL_GPL(rk_regist_clk);
1588
1589 static int rk_convert_cpufreq_table(struct dvfs_node *dvfs_node)
1590 {
1591         struct opp *opp;
1592         struct device *dev;
1593         struct cpufreq_frequency_table *table;
1594         int i;
1595
1596         table = dvfs_node->dvfs_table;
1597         dev = &dvfs_node->dev;
1598
1599         for (i = 0; table[i].frequency!= CPUFREQ_TABLE_END; i++){
1600                 opp = opp_find_freq_exact(dev, table[i].frequency * 1000, true);
1601                 if (IS_ERR(opp))
1602                         return PTR_ERR(opp);
1603                 table[i].index = opp_get_voltage(opp);
1604         }
1605         return 0;
1606 }
1607
1608 static struct cpufreq_frequency_table *of_get_temp_limit_table(struct device_node *dev_node, const char *propname)
1609 {
1610         struct cpufreq_frequency_table *temp_limt_table = NULL;
1611         const struct property *prop;
1612         const __be32 *val;
1613         int nr, i;
1614
1615         prop = of_find_property(dev_node, propname, NULL);
1616         if (!prop)
1617                 return NULL;
1618         if (!prop->value)
1619                 return NULL;
1620
1621         nr = prop->length / sizeof(u32);
1622         if (nr % 2) {
1623                 pr_err("%s: Invalid freq list\n", __func__);
1624                 return NULL;
1625         }
1626
1627         temp_limt_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
1628                              (nr/2 + 1), GFP_KERNEL);
1629
1630         val = prop->value;
1631
1632         for (i=0; i<nr/2; i++){
1633                 temp_limt_table[i].index = be32_to_cpup(val++);
1634                 temp_limt_table[i].frequency = be32_to_cpup(val++) * 1000;
1635         }
1636
1637         temp_limt_table[i].index = 0;
1638         temp_limt_table[i].frequency = CPUFREQ_TABLE_END;
1639
1640         return temp_limt_table;
1641
1642 }
1643
1644 int of_dvfs_init(void)
1645 {
1646         struct vd_node *vd;
1647         struct pd_node *pd;
1648         struct device_node *dvfs_dev_node, *clk_dev_node, *vd_dev_node, *pd_dev_node;
1649         struct dvfs_node *dvfs_node;
1650         struct clk *clk;
1651         const __be32 *val;
1652         int ret;
1653
1654         DVFS_DBG("%s\n", __func__);
1655
1656         dvfs_dev_node = of_find_node_by_name(NULL, "dvfs");
1657         if (IS_ERR_OR_NULL(dvfs_dev_node)) {
1658                 DVFS_ERR("%s get dvfs dev node err\n", __func__);
1659                 return PTR_ERR(dvfs_dev_node);
1660         }
1661
1662         for_each_available_child_of_node(dvfs_dev_node, vd_dev_node) {
1663                 vd = kzalloc(sizeof(struct vd_node), GFP_KERNEL);
1664                 if (!vd)
1665                         return -ENOMEM;
1666
1667                 mutex_init(&vd->mutex);
1668                 vd->name = vd_dev_node->name;
1669                 ret = of_property_read_string(vd_dev_node, "regulator_name", &vd->regulator_name);
1670                 if (ret) {
1671                         DVFS_ERR("%s:vd(%s) get regulator_name err, ret:%d\n", 
1672                                 __func__, vd_dev_node->name, ret);
1673                         kfree(vd);
1674                         continue;
1675                 }
1676                 
1677                 vd->suspend_volt = 0;
1678                 
1679                 vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
1680                 vd->vd_dvfs_target = dvfs_target;
1681                 ret = rk_regist_vd(vd);
1682                 if (ret){
1683                         DVFS_ERR("%s:vd(%s) register err:%d\n", __func__, vd->name, ret);
1684                         kfree(vd);
1685                         continue;
1686                 }
1687
1688                 DVFS_DBG("%s:vd(%s) register ok, regulator name:%s,suspend volt:%d\n", 
1689                         __func__, vd->name, vd->regulator_name, vd->suspend_volt);
1690                 
1691                 for_each_available_child_of_node(vd_dev_node, pd_dev_node) {            
1692                         pd = kzalloc(sizeof(struct pd_node), GFP_KERNEL);
1693                         if (!pd)
1694                                 return -ENOMEM;
1695
1696                         pd->vd = vd;
1697                         pd->name = pd_dev_node->name;
1698                         
1699                         ret = rk_regist_pd(pd);
1700                         if (ret){
1701                                 DVFS_ERR("%s:pd(%s) register err:%d\n", __func__, pd->name, ret);
1702                                 kfree(pd);
1703                                 continue;
1704                         }
1705                         DVFS_DBG("%s:pd(%s) register ok, parent vd:%s\n", 
1706                                 __func__, pd->name, vd->name);                  
1707                         for_each_available_child_of_node(pd_dev_node, clk_dev_node) {
1708                                 if (!of_device_is_available(clk_dev_node))
1709                                         continue;
1710                                 
1711                                 dvfs_node = kzalloc(sizeof(struct dvfs_node), GFP_KERNEL);
1712                                 if (!dvfs_node)
1713                                         return -ENOMEM;
1714                                 
1715                                 dvfs_node->name = clk_dev_node->name;
1716                                 dvfs_node->pd = pd;
1717                                 dvfs_node->vd = vd;
1718
1719                                 val = of_get_property(clk_dev_node, "regu-mode-en", NULL);
1720                                 if (val)
1721                                         dvfs_node->regu_mode_en = be32_to_cpup(val);
1722                                 if (dvfs_node->regu_mode_en)
1723                                         dvfs_node->regu_mode_table = of_get_regu_mode_table(clk_dev_node);
1724                                 else
1725                                         dvfs_node->regu_mode_table = NULL;
1726
1727                                 val = of_get_property(clk_dev_node, "temp-limit-enable", NULL);
1728                                 if (val)
1729                                         temp_limit_enable = be32_to_cpup(val);
1730                                 if (temp_limit_enable) {
1731                                         val = of_get_property(clk_dev_node, "target-temp", NULL);
1732                                         if (val)
1733                                                 target_temp = be32_to_cpup(val);
1734                                         val = of_get_property(clk_dev_node, "temp-channel", NULL);
1735                                         if (val)
1736                                                 dvfs_node->temp_channel = be32_to_cpup(val);
1737
1738                                         dvfs_node->nor_temp_limit_table = of_get_temp_limit_table(clk_dev_node, "normal-temp-limit");
1739                                         dvfs_node->per_temp_limit_table = of_get_temp_limit_table(clk_dev_node, "performance-temp-limit");
1740                                         dvfs_node->virt_temp_limit_table[0] =
1741                                                 of_get_temp_limit_table(clk_dev_node, "virt-temp-limit-1-cpu-busy");
1742                                         dvfs_node->virt_temp_limit_table[1] =
1743                                                 of_get_temp_limit_table(clk_dev_node, "virt-temp-limit-2-cpu-busy");
1744                                         dvfs_node->virt_temp_limit_table[2] =
1745                                                 of_get_temp_limit_table(clk_dev_node, "virt-temp-limit-3-cpu-busy");
1746                                         dvfs_node->virt_temp_limit_table[3] =
1747                                                 of_get_temp_limit_table(clk_dev_node, "virt-temp-limit-4-cpu-busy");
1748                                 }
1749                                 dvfs_node->temp_limit_rate = -1;
1750                                 dvfs_node->dev.of_node = clk_dev_node;
1751                                 ret = of_init_opp_table(&dvfs_node->dev);
1752                                 if (ret) {
1753                                         DVFS_ERR("%s:clk(%s) get opp table err:%d\n", __func__, dvfs_node->name, ret);
1754                                         kfree(dvfs_node);
1755                                         continue;
1756                                 }
1757                                 
1758                                 ret = opp_init_cpufreq_table(&dvfs_node->dev, &dvfs_node->dvfs_table);
1759                                 if (ret) {
1760                                         DVFS_ERR("%s:clk(%s) get cpufreq table err:%d\n", __func__, dvfs_node->name, ret);
1761                                         kfree(dvfs_node);
1762                                         continue;
1763                                 }
1764                                 ret = rk_convert_cpufreq_table(dvfs_node);
1765                                 if (ret) {
1766                                         kfree(dvfs_node);
1767                                         continue;
1768                                 }
1769                                 
1770                                 clk = clk_get(NULL, clk_dev_node->name);
1771                                 if (IS_ERR(clk)){
1772                                         DVFS_ERR("%s:get clk(%s) err:%ld\n", __func__, dvfs_node->name, PTR_ERR(clk));
1773                                         kfree(dvfs_node);
1774                                         continue;
1775                                         
1776                                 }
1777                                 
1778                                 dvfs_node->clk = clk;
1779                                 ret = rk_regist_clk(dvfs_node);
1780                                 if (ret){
1781                                         DVFS_ERR("%s:dvfs_node(%s) register err:%d\n", __func__, dvfs_node->name, ret);
1782                                         return ret;
1783                                 }
1784
1785                                 DVFS_DBG("%s:dvfs_node(%s) register ok, parent pd:%s\n", 
1786                                         __func__, clk_dev_node->name, pd->name);        
1787
1788                         }
1789                 }       
1790         }
1791         return 0;
1792 }
1793
1794 /*********************************************************************************/
1795 /**
1796  * dump_dbg_map() : Draw all informations of dvfs while debug
1797  */
1798 static int dump_dbg_map(char *buf)
1799 {
1800         int i;
1801         struct vd_node  *vd;
1802         struct pd_node  *pd;
1803         struct dvfs_node        *clk_dvfs_node;
1804         char *s = buf;
1805         
1806         mutex_lock(&rk_dvfs_mutex);
1807         printk( "-------------DVFS TREE-----------\n\n\n");
1808         printk( "DVFS TREE:\n");
1809
1810         list_for_each_entry(vd, &rk_dvfs_tree, node) {
1811                 mutex_lock(&vd->mutex);
1812                 printk( "|\n|- voltage domain:%s\n", vd->name);
1813                 printk( "|- current voltage:%d\n", vd->cur_volt);
1814                 printk( "|- current regu_mode:%s\n", dvfs_regu_mode_to_string(vd->regu_mode));
1815
1816                 list_for_each_entry(pd, &vd->pd_list, node) {
1817                         printk( "|  |\n|  |- power domain:%s, status = %s, current volt = %d, current regu_mode = %s\n",
1818                                         pd->name, (pd->pd_status == 1) ? "ON" : "OFF", pd->cur_volt,
1819                                         dvfs_regu_mode_to_string(pd->regu_mode));
1820
1821                         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
1822                                 printk( "|  |  |\n|  |  |- clock: %s current: rate %d, volt = %d,"
1823                                                 " enable_dvfs = %s\n",
1824                                                 clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt,
1825                                                 clk_dvfs_node->enable_count == 0 ? "DISABLE" : "ENABLE");
1826                                 printk( "|  |  |- clk limit(%s):[%u, %u]; last set rate = %lu\n",
1827                                                 clk_dvfs_node->freq_limit_en ? "enable" : "disable",
1828                                                 clk_dvfs_node->min_rate, clk_dvfs_node->max_rate,
1829                                                 clk_dvfs_node->last_set_rate/1000);
1830                                 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
1831                                         printk( "|  |  |  |- freq = %d, volt = %d\n",
1832                                                         clk_dvfs_node->dvfs_table[i].frequency,
1833                                                         clk_dvfs_node->dvfs_table[i].index);
1834
1835                                 }
1836                                 printk( "|  |  |- clock: %s current: rate %d, regu_mode = %s,"
1837                                                 " regu_mode_en = %d\n",
1838                                                 clk_dvfs_node->name, clk_dvfs_node->set_freq,
1839                                                 dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode),
1840                                                 clk_dvfs_node->regu_mode_en);
1841                                 if (clk_dvfs_node->regu_mode_table) {
1842                                         for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
1843                                                 printk( "|  |  |  |- freq = %d, regu_mode = %s\n",
1844                                                                 clk_dvfs_node->regu_mode_table[i].frequency/1000,
1845                                                                 dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode_table[i].index));
1846                                         }
1847                                 }
1848                         }
1849                 }
1850                 mutex_unlock(&vd->mutex);
1851         }
1852         
1853         printk( "-------------DVFS TREE END------------\n");
1854         mutex_unlock(&rk_dvfs_mutex);
1855         
1856         return s - buf;
1857 }
1858
1859 /*********************************************************************************/
1860 static struct kobject *dvfs_kobj;
1861 struct dvfs_attribute {
1862         struct attribute        attr;
1863         ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
1864                         char *buf);
1865         ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
1866                         const char *buf, size_t n);
1867 };
1868
1869 static ssize_t dvfs_tree_store(struct kobject *kobj, struct kobj_attribute *attr,
1870                const char *buf, size_t n)
1871 {
1872        return n;
1873 }
1874 static ssize_t dvfs_tree_show(struct kobject *kobj, struct kobj_attribute *attr,
1875                char *buf)
1876 {
1877        return dump_dbg_map(buf);
1878 }
1879
1880
1881 static struct dvfs_attribute dvfs_attrs[] = {
1882         /*     node_name        permision               show_func       store_func */
1883 //#ifdef CONFIG_RK_CLOCK_PROC
1884         __ATTR(dvfs_tree,       S_IRUSR | S_IRGRP | S_IWUSR,    dvfs_tree_show, dvfs_tree_store),
1885 //#endif
1886 };
1887
1888 static int __init dvfs_init(void)
1889 {
1890         int i, ret = 0;
1891
1892         dvfs_kobj = kobject_create_and_add("dvfs", NULL);
1893         if (!dvfs_kobj)
1894                 return -ENOMEM;
1895         for (i = 0; i < ARRAY_SIZE(dvfs_attrs); i++) {
1896                 ret = sysfs_create_file(dvfs_kobj, &dvfs_attrs[i].attr);
1897                 if (ret != 0) {
1898                         DVFS_ERR("create index %d error\n", i);
1899                         return ret;
1900                 }
1901         }
1902
1903         if (temp_limit_enable) {
1904                 clk_cpu_dvfs_node = clk_get_dvfs_node("clk_core");
1905                 if (!clk_cpu_dvfs_node){
1906                         return -EINVAL;
1907                 }
1908
1909                 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
1910                 dvfs_wq = alloc_workqueue("dvfs", WQ_NON_REENTRANT | WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
1911                 queue_delayed_work_on(0, dvfs_wq, &dvfs_temp_limit_work, 0*HZ);
1912         }
1913
1914         vdd_gpu_regulator = dvfs_get_regulator("vdd_gpu");
1915         if (!IS_ERR_OR_NULL(vdd_gpu_regulator)) {
1916                 struct clk *clk = clk_get(NULL, "pd_gpu");
1917
1918                 if (clk)
1919                         rk_clk_pd_notifier_register(clk, &clk_pd_gpu_notifier);
1920
1921                 fb_register_client(&early_suspend_notifier);
1922                 register_reboot_notifier(&vdd_gpu_reboot_notifier);
1923         }
1924
1925         return ret;
1926 }
1927
1928 late_initcall(dvfs_init);