rockchip: dvfs: switch regulator mode dynamically as rate changes
[firefly-linux-kernel-4.4.55.git] / arch / arm / mach-rockchip / dvfs.c
1 /* arch/arm/mach-rk30/rk30_dvfs.c
2  *
3  * Copyright (C) 2012 ROCKCHIP, Inc.
4  *
5  * This software is licensed under the terms of the GNU General Public
6  * License version 2, as published by the Free Software Foundation, and
7  * may be copied, distributed, and modified under those terms.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  */
15 #include <linux/slab.h>
16 #include <linux/clk.h>
17 #include <linux/regulator/consumer.h>
18 #include <linux/delay.h>
19 #include <linux/stat.h>
20 #include <linux/of.h>
21 #include <linux/opp.h>
22 #include <linux/rockchip/dvfs.h>
23 #include <linux/rockchip/common.h>
24 #include <linux/fb.h>
25 #include <linux/reboot.h>
26 #include "../../../drivers/clk/rockchip/clk-pd.h"
27
28 extern int rockchip_tsadc_get_temp(int chn);
29
30 #define MHz     (1000 * 1000)
31 static LIST_HEAD(rk_dvfs_tree);
32 static DEFINE_MUTEX(rk_dvfs_mutex);
33 static struct workqueue_struct *dvfs_wq;
34 static struct dvfs_node *clk_cpu_dvfs_node;
35 static unsigned int target_temp = 80;
36 static int temp_limit_enable = 1;
37
38 static int pd_gpu_off, early_suspend;
39 static DEFINE_MUTEX(switch_vdd_gpu_mutex);
40 struct regulator *vdd_gpu_regulator;
41
42 static int vdd_gpu_reboot_notifier_event(struct notifier_block *this,
43         unsigned long event, void *ptr)
44 {
45         int ret;
46
47         DVFS_DBG("%s: enable vdd_gpu\n", __func__);
48         mutex_lock(&switch_vdd_gpu_mutex);
49         if (!regulator_is_enabled(vdd_gpu_regulator))
50                 ret = regulator_enable(vdd_gpu_regulator);
51         mutex_unlock(&switch_vdd_gpu_mutex);
52
53         return NOTIFY_OK;
54 }
55
56 static struct notifier_block vdd_gpu_reboot_notifier = {
57         .notifier_call = vdd_gpu_reboot_notifier_event,
58 };
59
60 static int clk_pd_gpu_notifier_call(struct notifier_block *nb,
61         unsigned long event, void *ptr)
62 {
63         int ret;
64
65         switch (event) {
66         case RK_CLK_PD_PREPARE:
67                 mutex_lock(&switch_vdd_gpu_mutex);
68                 pd_gpu_off = 0;
69                 if (early_suspend) {
70                         if (!regulator_is_enabled(vdd_gpu_regulator))
71                                 ret = regulator_enable(vdd_gpu_regulator);
72                 }
73                 mutex_unlock(&switch_vdd_gpu_mutex);
74                 break;
75         case RK_CLK_PD_UNPREPARE:
76                 mutex_lock(&switch_vdd_gpu_mutex);
77                 pd_gpu_off = 1;
78                 if (early_suspend) {
79                         if (regulator_is_enabled(vdd_gpu_regulator))
80                                 ret = regulator_disable(vdd_gpu_regulator);
81                 }
82                 mutex_unlock(&switch_vdd_gpu_mutex);
83                 break;
84         default:
85                 break;
86         }
87
88         return NOTIFY_OK;
89 }
90
91 static struct notifier_block clk_pd_gpu_notifier = {
92         .notifier_call = clk_pd_gpu_notifier_call,
93 };
94
95
96 static int early_suspend_notifier_call(struct notifier_block *self,
97                                 unsigned long action, void *data)
98 {
99         struct fb_event *event = data;
100         int blank_mode = *((int *)event->data);
101         int ret;
102
103         mutex_lock(&switch_vdd_gpu_mutex);
104         if (action == FB_EARLY_EVENT_BLANK) {
105                 switch (blank_mode) {
106                 case FB_BLANK_UNBLANK:
107                         early_suspend = 0;
108                         if (pd_gpu_off) {
109                                 if (!regulator_is_enabled(vdd_gpu_regulator))
110                                         ret = regulator_enable(
111                                         vdd_gpu_regulator);
112                         }
113                         break;
114                 default:
115                         break;
116                 }
117         } else if (action == FB_EVENT_BLANK) {
118                 switch (blank_mode) {
119                 case FB_BLANK_POWERDOWN:
120                         early_suspend = 1;
121                         if (pd_gpu_off) {
122                                 if (regulator_is_enabled(vdd_gpu_regulator))
123                                         ret = regulator_disable(
124                                         vdd_gpu_regulator);
125                         }
126
127                         break;
128                 default:
129                         break;
130                 }
131         }
132         mutex_unlock(&switch_vdd_gpu_mutex);
133
134         return NOTIFY_OK;
135 }
136
137 static struct notifier_block early_suspend_notifier = {
138                 .notifier_call = early_suspend_notifier_call,
139 };
140
141 #define DVFS_REGULATOR_MODE_STANDBY     1
142 #define DVFS_REGULATOR_MODE_IDLE        2
143 #define DVFS_REGULATOR_MODE_NORMAL      3
144 #define DVFS_REGULATOR_MODE_FAST        4
145
146 static const char* dvfs_regu_mode_to_string(unsigned int mode)
147 {
148         switch (mode) {
149         case DVFS_REGULATOR_MODE_FAST:
150                 return "FAST";
151         case DVFS_REGULATOR_MODE_NORMAL:
152                 return "NORMAL";
153         case DVFS_REGULATOR_MODE_IDLE:
154                 return "IDLE";
155         case DVFS_REGULATOR_MODE_STANDBY:
156                 return "STANDBY";
157         default:
158                 return "UNKNOWN";
159         }
160 }
161
162 static int dvfs_regu_mode_convert(unsigned int mode)
163 {
164         switch (mode) {
165         case DVFS_REGULATOR_MODE_FAST:
166                 return REGULATOR_MODE_FAST;
167         case DVFS_REGULATOR_MODE_NORMAL:
168                 return REGULATOR_MODE_NORMAL;
169         case DVFS_REGULATOR_MODE_IDLE:
170                 return REGULATOR_MODE_IDLE;
171         case DVFS_REGULATOR_MODE_STANDBY:
172                 return REGULATOR_MODE_STANDBY;
173         default:
174                 return -EINVAL;
175         }
176 }
177
178 static int dvfs_regu_mode_deconvert(unsigned int mode)
179 {
180         switch (mode) {
181         case REGULATOR_MODE_FAST:
182                 return DVFS_REGULATOR_MODE_FAST;
183         case REGULATOR_MODE_NORMAL:
184                 return DVFS_REGULATOR_MODE_NORMAL;
185         case REGULATOR_MODE_IDLE:
186                 return DVFS_REGULATOR_MODE_IDLE;
187         case REGULATOR_MODE_STANDBY:
188                 return DVFS_REGULATOR_MODE_STANDBY;
189         default:
190                 return -EINVAL;
191         }
192 }
193
194 static struct cpufreq_frequency_table *of_get_regu_mode_table(struct device_node *dev_node)
195 {
196         struct cpufreq_frequency_table *regu_mode_table = NULL;
197         const struct property *prop;
198         const __be32 *val;
199         int nr, i;
200
201         prop = of_find_property(dev_node, "regu-mode-table", NULL);
202         if (!prop)
203                 return NULL;
204         if (!prop->value)
205                 return NULL;
206
207         nr = prop->length / sizeof(u32);
208         if (nr % 2) {
209                 pr_err("%s: Invalid freq list\n", __func__);
210                 return NULL;
211         }
212
213         regu_mode_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
214                              (nr/2+1), GFP_KERNEL);
215         if (!regu_mode_table) {
216                 pr_err("%s: could not allocate regu_mode_table!\n", __func__);
217                 return ERR_PTR(-ENOMEM);
218         }
219
220         val = prop->value;
221
222         for (i=0; i<nr/2; i++){
223                 regu_mode_table[i].frequency = be32_to_cpup(val++) * 1000;
224                 regu_mode_table[i].index = be32_to_cpup(val++);
225         }
226
227         if (regu_mode_table[i-1].frequency != 0) {
228                 pr_err("%s: Last freq of regu_mode_table is not 0!\n", __func__);
229                 kfree(regu_mode_table);
230                 return NULL;
231         }
232
233         regu_mode_table[i].index = 0;
234         regu_mode_table[i].frequency = CPUFREQ_TABLE_END;
235
236         return regu_mode_table;
237 }
238
239 static int dvfs_regu_mode_table_constrain(struct dvfs_node *clk_dvfs_node)
240 {
241         int i, ret;
242         int mode, convert_mode, valid_mode;
243
244         if (!clk_dvfs_node)
245                 return -EINVAL;
246
247         if (!clk_dvfs_node->regu_mode_table)
248                 return -EINVAL;
249
250         if (!clk_dvfs_node->vd)
251                 return -EINVAL;
252
253         if (IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator))
254                 return -EINVAL;
255
256         for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
257                 mode = clk_dvfs_node->regu_mode_table[i].index;
258                 convert_mode = dvfs_regu_mode_convert(mode);
259
260                 ret = regulator_is_supported_mode(clk_dvfs_node->vd->regulator,
261                                                 &convert_mode);
262                 if (ret) {
263                         DVFS_ERR("%s: find mode=%d unsupported\n", __func__,
264                                 mode);
265                         kfree(clk_dvfs_node->regu_mode_table);
266                         clk_dvfs_node->regu_mode_table = NULL;
267                         return ret;
268                 }
269
270                 valid_mode = dvfs_regu_mode_deconvert(convert_mode);
271                 if (valid_mode != mode) {
272                         DVFS_ERR("%s: round mode=%d to valid mode=%d!\n",
273                                 __func__, mode, valid_mode);
274                         clk_dvfs_node->regu_mode_table[i].index = valid_mode;
275                 }
276
277         }
278
279         return 0;
280 }
281
282 static int clk_dvfs_node_get_regu_mode(struct dvfs_node *clk_dvfs_node,
283         unsigned long rate, unsigned int *mode)
284 {
285         int i;
286
287
288         if ((!clk_dvfs_node) || (!clk_dvfs_node->regu_mode_table))
289                 return -EINVAL;
290
291         for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
292                 if (rate >= clk_dvfs_node->regu_mode_table[i].frequency) {
293                         *mode = clk_dvfs_node->regu_mode_table[i].index;
294                         return 0;
295                 }
296         }
297
298         return -EINVAL;
299 }
300
301 static int dvfs_pd_get_newmode_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
302 {
303         unsigned int mode_max = 0;
304
305
306         if (clk_dvfs_node->regu_mode_en && (clk_dvfs_node->regu_mode >= pd->regu_mode)) {
307                 return clk_dvfs_node->regu_mode;
308         }
309
310         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
311                 if (clk_dvfs_node->regu_mode_en)
312                         mode_max = max(mode_max, (clk_dvfs_node->regu_mode));
313         }
314
315         return mode_max;
316 }
317
318 static void dvfs_update_clk_pds_mode(struct dvfs_node *clk_dvfs_node)
319 {
320         struct pd_node *pd;
321
322         if (!clk_dvfs_node)
323                 return;
324
325         pd = clk_dvfs_node->pd;
326         if (!pd)
327                 return;
328
329         pd->regu_mode = dvfs_pd_get_newmode_byclk(pd, clk_dvfs_node);
330 }
331
332 static int dvfs_vd_get_newmode_bypd(struct vd_node *vd)
333 {
334         unsigned int mode_max_vd = 0;
335         struct pd_node *pd;
336
337         if (!vd)
338                 return -EINVAL;
339
340         list_for_each_entry(pd, &vd->pd_list, node) {
341                 mode_max_vd = max(mode_max_vd, pd->regu_mode);
342         }
343
344         return mode_max_vd;
345 }
346
347 static int dvfs_vd_get_newmode_byclk(struct dvfs_node *clk_dvfs_node)
348 {
349         if (!clk_dvfs_node)
350                 return -EINVAL;
351
352         dvfs_update_clk_pds_mode(clk_dvfs_node);
353
354         return  dvfs_vd_get_newmode_bypd(clk_dvfs_node->vd);
355 }
356
357 static int dvfs_regu_set_mode(struct vd_node *vd, unsigned int mode)
358 {
359         int convert_mode;
360         int ret = 0;
361
362
363         if (IS_ERR_OR_NULL(vd)) {
364                 DVFS_ERR("%s: vd_node error\n", __func__);
365                 return -EINVAL;
366         }
367
368         DVFS_DBG("%s: mode=%d(old=%d)\n", __func__, mode, vd->regu_mode);
369
370         convert_mode = dvfs_regu_mode_convert(mode);
371         if (convert_mode < 0) {
372                 DVFS_ERR("%s: mode %d convert error\n", __func__, mode);
373                 return convert_mode;
374         }
375
376         if (!IS_ERR_OR_NULL(vd->regulator)) {
377                 ret = dvfs_regulator_set_mode(vd->regulator, convert_mode);
378                 if (ret < 0) {
379                         DVFS_ERR("%s: %s set mode %d (was %d) error!\n", __func__,
380                                 vd->regulator_name, mode, vd->regu_mode);
381                         return -EAGAIN;
382                 }
383         } else {
384                 DVFS_ERR("%s: invalid regulator\n", __func__);
385                 return -EINVAL;
386         }
387
388         vd->regu_mode = mode;
389
390         return 0;
391 }
392
393 static int dvfs_regu_mode_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
394 {
395         int ret;
396         int mode;
397
398
399         if (!clk_dvfs_node)
400                 return -EINVAL;
401
402         if (!clk_dvfs_node->regu_mode_en)
403                 return 0;
404
405         ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, rate, &mode);
406         if (ret) {
407                 DVFS_ERR("%s: clk(%s) rate %luhz get mode fail\n",
408                         __func__, clk_dvfs_node->name, rate);
409                 return ret;
410         }
411         clk_dvfs_node->regu_mode = mode;
412
413         mode = dvfs_vd_get_newmode_byclk(clk_dvfs_node);
414         if (mode < 0)
415                 return mode;
416
417         ret = dvfs_regu_set_mode(clk_dvfs_node->vd, mode);
418
419         return ret;
420 }
421
422 static void dvfs_volt_up_delay(struct vd_node *vd, int new_volt, int old_volt)
423 {
424         int u_time;
425         
426         if(new_volt <= old_volt)
427                 return;
428         if(vd->volt_time_flag > 0)      
429                 u_time = regulator_set_voltage_time(vd->regulator, old_volt, new_volt);
430         else
431                 u_time = -1;            
432         if(u_time < 0) {// regulator is not suported time,useing default time
433                 DVFS_DBG("%s:vd %s is not suported getting delay time,so we use default\n",
434                                 __func__, vd->name);
435                 u_time = ((new_volt) - (old_volt)) >> 9;
436         }
437         
438         DVFS_DBG("%s: vd %s volt %d to %d delay %d us\n", 
439                 __func__, vd->name, old_volt, new_volt, u_time);
440         
441         if (u_time >= 1000) {
442                 mdelay(u_time / 1000);
443                 udelay(u_time % 1000);
444                 DVFS_WARNING("%s: regulator set vol delay is larger 1ms,old is %d,new is %d\n",
445                         __func__, old_volt, new_volt);
446         } else if (u_time) {
447                 udelay(u_time);
448         }                       
449 }
450
451 static int dvfs_regulator_set_voltage_readback(struct regulator *regulator, int min_uV, int max_uV)
452 {
453         int ret = 0, read_back = 0;
454         
455         ret = dvfs_regulator_set_voltage(regulator, max_uV, max_uV);
456         if (ret < 0) {
457                 DVFS_ERR("%s: now read back to check voltage\n", __func__);
458
459                 /* read back to judge if it is already effect */
460                 mdelay(2);
461                 read_back = dvfs_regulator_get_voltage(regulator);
462                 if (read_back == max_uV) {
463                         DVFS_ERR("%s: set ERROR but already effected, volt=%d\n", __func__, read_back);
464                         ret = 0;
465                 } else {
466                         DVFS_ERR("%s: set ERROR AND NOT effected, volt=%d\n", __func__, read_back);
467                 }
468         }
469         
470         return ret;
471 }
472
473 static int dvfs_scale_volt_direct(struct vd_node *vd_clk, int volt_new)
474 {
475         int ret = 0;
476         
477         DVFS_DBG("%s: volt=%d(old=%d)\n", __func__, volt_new, vd_clk->cur_volt);
478         
479         if (IS_ERR_OR_NULL(vd_clk)) {
480                 DVFS_ERR("%s: vd_node error\n", __func__);
481                 return -EINVAL;
482         }
483
484         if (!IS_ERR_OR_NULL(vd_clk->regulator)) {
485                 ret = dvfs_regulator_set_voltage_readback(vd_clk->regulator, volt_new, volt_new);
486                 dvfs_volt_up_delay(vd_clk,volt_new, vd_clk->cur_volt);
487                 if (ret < 0) {
488                         vd_clk->volt_set_flag = DVFS_SET_VOLT_FAILURE;
489                         DVFS_ERR("%s: %s set voltage up err ret = %d, Vnew = %d(was %d)mV\n",
490                                         __func__, vd_clk->name, ret, volt_new, vd_clk->cur_volt);
491                         return -EAGAIN;
492                 }
493
494         } else {
495                 DVFS_ERR("%s: invalid regulator\n", __func__);
496                 return -EINVAL;
497         }
498
499         vd_clk->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
500         vd_clk->cur_volt = volt_new;
501
502         return 0;
503
504 }
505
506 static int dvfs_reset_volt(struct vd_node *dvfs_vd)
507 {
508         int flag_set_volt_correct = 0;
509         if (!IS_ERR_OR_NULL(dvfs_vd->regulator))
510                 flag_set_volt_correct = dvfs_regulator_get_voltage(dvfs_vd->regulator);
511         else {
512                 DVFS_ERR("%s: invalid regulator\n", __func__);
513                 return -EINVAL;
514         }
515         if (flag_set_volt_correct <= 0) {
516                 DVFS_ERR("%s (vd:%s), try to reload volt ,by it is error again(%d)!!! stop scaling\n",
517                                 __func__, dvfs_vd->name, flag_set_volt_correct);
518                 return -EAGAIN;
519         }
520         dvfs_vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
521         DVFS_WARNING("%s:vd(%s) try to reload volt = %d\n",
522                         __func__, dvfs_vd->name, flag_set_volt_correct);
523
524         /* Reset vd's voltage */
525         dvfs_vd->cur_volt = flag_set_volt_correct;
526
527         return dvfs_vd->cur_volt;
528 }
529
530
531 // for clk enable case to get vd regulator info
532 static void clk_enable_dvfs_regulator_check(struct vd_node *vd)
533 {
534         vd->cur_volt = dvfs_regulator_get_voltage(vd->regulator);
535         if(vd->cur_volt <= 0){
536                 vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
537         }
538         vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
539 }
540
541 static void dvfs_get_vd_regulator_volt_list(struct vd_node *vd)
542 {
543         unsigned int i, selector = dvfs_regulator_count_voltages(vd->regulator);
544         int n = 0, sel_volt = 0;
545         
546         if(selector > VD_VOL_LIST_CNT)
547                 selector = VD_VOL_LIST_CNT;
548
549         for (i = 0; i < selector; i++) {
550                 sel_volt = dvfs_regulator_list_voltage(vd->regulator, i);
551                 if(sel_volt <= 0){      
552                         //DVFS_WARNING("%s: vd(%s) list volt selector=%u, but volt(%d) <=0\n",
553                         //      __func__, vd->name, i, sel_volt);
554                         continue;
555                 }
556                 vd->volt_list[n++] = sel_volt;  
557                 DVFS_DBG("%s: vd(%s) list volt selector=%u, n=%d, volt=%d\n", 
558                         __func__, vd->name, i, n, sel_volt);
559         }
560         
561         vd->n_voltages = n;
562 }
563
564 // >= volt
565 static int vd_regulator_round_volt_max(struct vd_node *vd, int volt)
566 {
567         int sel_volt;
568         int i;
569         
570         for (i = 0; i < vd->n_voltages; i++) {
571                 sel_volt = vd->volt_list[i];
572                 if(sel_volt <= 0){      
573                         DVFS_WARNING("%s: selector=%u, but volt <=0\n", 
574                                 __func__, i);
575                         continue;
576                 }
577                 if(sel_volt >= volt)
578                         return sel_volt;        
579         }
580         return -EINVAL;
581 }
582
583 // >=volt
584 static int vd_regulator_round_volt_min(struct vd_node *vd, int volt)
585 {
586         int sel_volt;
587         int i;
588         
589         for (i = 0; i < vd->n_voltages; i++) {
590                 sel_volt = vd->volt_list[i];
591                 if(sel_volt <= 0){      
592                         DVFS_WARNING("%s: selector=%u, but volt <=0\n", 
593                                 __func__, i);
594                         continue;
595                 }
596                 if(sel_volt > volt){
597                         if(i > 0)
598                                 return vd->volt_list[i-1];
599                         else
600                                 return -EINVAL;
601                 }       
602         }
603         
604         return -EINVAL;
605 }
606
607 // >=volt
608 static int vd_regulator_round_volt(struct vd_node *vd, int volt, int flags)
609 {
610         if(!vd->n_voltages)
611                 return -EINVAL;
612         if(flags == VD_LIST_RELATION_L)
613                 return vd_regulator_round_volt_min(vd, volt);
614         else
615                 return vd_regulator_round_volt_max(vd, volt);   
616 }
617
618 static void dvfs_table_round_volt(struct dvfs_node *clk_dvfs_node)
619 {
620         int i, test_volt;
621
622         if(!clk_dvfs_node->dvfs_table || !clk_dvfs_node->vd || 
623                 IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator))
624                 return;
625
626         for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
627
628                 test_volt = vd_regulator_round_volt(clk_dvfs_node->vd, clk_dvfs_node->dvfs_table[i].index, VD_LIST_RELATION_H);
629                 if(test_volt <= 0)
630                 {       
631                         DVFS_WARNING("%s: clk(%s) round volt(%d) but list <=0\n",
632                                 __func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].index);
633                         break;
634                 }
635                 DVFS_DBG("clk %s:round_volt %d to %d\n",
636                         clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].index, test_volt);
637                 
638                 clk_dvfs_node->dvfs_table[i].index=test_volt;           
639         }
640 }
641
642 static void dvfs_vd_get_regulator_volt_time_info(struct vd_node *vd)
643 {
644         if(vd->volt_time_flag <= 0){// check regulator support get uping vol timer
645                 vd->volt_time_flag = dvfs_regulator_set_voltage_time(vd->regulator, vd->cur_volt, vd->cur_volt+200*1000);
646                 if(vd->volt_time_flag < 0){
647                         DVFS_DBG("%s,vd %s volt_time is no support\n",
648                                 __func__, vd->name);
649                 }
650                 else{
651                         DVFS_DBG("%s,vd %s volt_time is support,up 200mv need delay %d us\n",
652                                 __func__, vd->name, vd->volt_time_flag);
653                 }       
654         }
655 }
656 #if 0
657 static void dvfs_vd_get_regulator_mode_info(struct vd_node *vd)
658 {
659         //REGULATOR_MODE_FAST
660         if(vd->mode_flag <= 0){// check regulator support get uping vol timer{
661                 vd->mode_flag = dvfs_regulator_get_mode(vd->regulator);
662                 if(vd->mode_flag==REGULATOR_MODE_FAST || vd->mode_flag==REGULATOR_MODE_NORMAL
663                         || vd->mode_flag == REGULATOR_MODE_IDLE || vd->mode_flag==REGULATOR_MODE_STANDBY){
664                         
665                         if(dvfs_regulator_set_mode(vd->regulator, vd->mode_flag) < 0){
666                                 vd->mode_flag = 0;// check again
667                         }
668                 }
669                 if(vd->mode_flag > 0){
670                         DVFS_DBG("%s,vd %s mode(now is %d) support\n",
671                                 __func__, vd->name, vd->mode_flag);
672                 }
673                 else{
674                         DVFS_DBG("%s,vd %s mode is not support now check\n",
675                                 __func__, vd->name);
676                 }
677         }
678 }
679 #endif
680
681 struct regulator *dvfs_get_regulator(char *regulator_name) 
682 {
683         struct vd_node *vd;
684
685         mutex_lock(&rk_dvfs_mutex);
686         list_for_each_entry(vd, &rk_dvfs_tree, node) {
687                 if (strcmp(regulator_name, vd->regulator_name) == 0) {
688                         mutex_unlock(&rk_dvfs_mutex);
689                         return vd->regulator;
690                 }
691         }
692         mutex_unlock(&rk_dvfs_mutex);
693         return NULL;
694 }
695
696 static int dvfs_get_rate_range(struct dvfs_node *clk_dvfs_node)
697 {
698         struct cpufreq_frequency_table *table;
699         int i = 0;
700
701         if (!clk_dvfs_node)
702                 return -EINVAL;
703
704         clk_dvfs_node->min_rate = 0;
705         clk_dvfs_node->max_rate = 0;
706
707         table = clk_dvfs_node->dvfs_table;
708         for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
709                 clk_dvfs_node->max_rate = table[i].frequency / 1000 * 1000 * 1000;
710                 if (i == 0)
711                         clk_dvfs_node->min_rate = table[i].frequency / 1000 * 1000 * 1000;
712         }
713
714         DVFS_DBG("%s: clk %s, limit rate [min, max] = [%u, %u]\n",
715                         __func__, clk_dvfs_node->name, clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
716
717         return 0;
718 }
719
720 static void dvfs_table_round_clk_rate(struct dvfs_node  *clk_dvfs_node)
721 {
722         int i, rate, temp_rate, flags;
723         
724         if(!clk_dvfs_node || !clk_dvfs_node->dvfs_table || !clk_dvfs_node->clk)
725                 return;
726
727         for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
728                 //ddr rate = real rate+flags
729                 flags = clk_dvfs_node->dvfs_table[i].frequency%1000;
730                 rate = (clk_dvfs_node->dvfs_table[i].frequency/1000)*1000;
731                 temp_rate = __clk_round_rate(clk_dvfs_node->clk, rate*1000);
732                 if(temp_rate <= 0){     
733                         DVFS_WARNING("%s: clk(%s) rate %d round return %d\n",
734                                 __func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
735                         continue;
736                 }
737                 
738                 /* Set rate unit as MHZ */
739                 if (temp_rate % MHz != 0)
740                         temp_rate = (temp_rate / MHz + 1) * MHz;
741
742                 temp_rate = (temp_rate / 1000) + flags;
743                 
744                 DVFS_DBG("clk %s round_clk_rate %d to %d\n",
745                         clk_dvfs_node->name,clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
746                 
747                 clk_dvfs_node->dvfs_table[i].frequency = temp_rate;             
748         }
749 }
750
751 static int clk_dvfs_node_get_ref_volt(struct dvfs_node *clk_dvfs_node, int rate_khz,
752                 struct cpufreq_frequency_table *clk_fv)
753 {
754         int i = 0;
755         
756         if (rate_khz == 0 || !clk_dvfs_node || !clk_dvfs_node->dvfs_table) {
757                 /* since no need */
758                 return -EINVAL;
759         }
760         clk_fv->frequency = rate_khz;
761         clk_fv->index = 0;
762
763         for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
764                 if (clk_dvfs_node->dvfs_table[i].frequency >= rate_khz) {
765                         clk_fv->frequency = clk_dvfs_node->dvfs_table[i].frequency;
766                         clk_fv->index = clk_dvfs_node->dvfs_table[i].index;
767                          //printk("%s,%s rate=%ukhz(vol=%d)\n",__func__,clk_dvfs_node->name,
768                          //clk_fv->frequency, clk_fv->index);
769                         return 0;
770                 }
771         }
772         clk_fv->frequency = 0;
773         clk_fv->index = 0;
774         //DVFS_DBG("%s get corresponding voltage error! out of bound\n", clk_dvfs_node->name);
775         return -EINVAL;
776 }
777
778 static int dvfs_pd_get_newvolt_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
779 {
780         int volt_max = 0;
781
782         if (clk_dvfs_node->enable_count && (clk_dvfs_node->set_volt >= pd->cur_volt)) {
783                 return clk_dvfs_node->set_volt;
784         }
785
786         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
787                 if (clk_dvfs_node->enable_count)
788                         volt_max = max(volt_max, clk_dvfs_node->set_volt);
789         }
790         return volt_max;
791 }
792
793 static void dvfs_update_clk_pds_volt(struct dvfs_node *clk_dvfs_node)
794 {
795         struct pd_node *pd;
796         
797         if (!clk_dvfs_node)
798                 return;
799         
800         pd = clk_dvfs_node->pd;
801         if (!pd)
802                 return;
803         
804         pd->cur_volt = dvfs_pd_get_newvolt_byclk(pd, clk_dvfs_node);
805 }
806
807 static int dvfs_vd_get_newvolt_bypd(struct vd_node *vd)
808 {
809         int volt_max_vd = 0;
810         struct pd_node *pd;
811
812         if (!vd)
813                 return -EINVAL;
814         
815         list_for_each_entry(pd, &vd->pd_list, node) {
816                 volt_max_vd = max(volt_max_vd, pd->cur_volt);
817         }
818
819         return volt_max_vd;
820 }
821
822 static int dvfs_vd_get_newvolt_byclk(struct dvfs_node *clk_dvfs_node)
823 {
824         if (!clk_dvfs_node)
825                 return -EINVAL;
826
827         dvfs_update_clk_pds_volt(clk_dvfs_node);
828         return  dvfs_vd_get_newvolt_bypd(clk_dvfs_node->vd);
829 }
830
831 #if 0
832 static void dvfs_temp_limit_work_func(struct work_struct *work)
833 {
834         unsigned long delay = HZ / 10; // 100ms
835         struct vd_node *vd;
836         struct pd_node *pd;
837         struct dvfs_node *clk_dvfs_node;
838
839         queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
840
841         mutex_lock(&rk_dvfs_mutex);
842         list_for_each_entry(vd, &rk_dvfs_tree, node) {
843                 mutex_lock(&vd->mutex);
844                 list_for_each_entry(pd, &vd->pd_list, node) {
845                         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
846                                 if (clk_dvfs_node->temp_limit_table) {
847                                         clk_dvfs_node->temp = rockchip_tsadc_get_temp(clk_dvfs_node->temp_channel);
848                                         clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
849                                 }
850                         }
851                 }
852                 mutex_unlock(&vd->mutex);
853         }
854         mutex_unlock(&rk_dvfs_mutex);
855 }
856 #endif
857
858 static void dvfs_temp_limit_work_func(struct work_struct *work)
859 {
860         int temp=0, delta_temp=0;
861         unsigned long delay = HZ/10;
862         unsigned long arm_rate_step=0;
863         static int old_temp=0;
864         int i;
865
866         queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
867
868         temp = rockchip_tsadc_get_temp(1);
869
870         //debounce
871         delta_temp = (old_temp>temp) ? (old_temp-temp) : (temp-old_temp);
872         if (delta_temp <= 1)
873                 return;
874
875         if (ROCKCHIP_PM_POLICY_PERFORMANCE == rockchip_pm_get_policy()) {
876                 if (!clk_cpu_dvfs_node->per_temp_limit_table) {
877                         return;
878                 }
879
880                 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
881                 for (i=0; clk_cpu_dvfs_node->per_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
882                         if (temp > clk_cpu_dvfs_node->per_temp_limit_table[i].index) {
883                                 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->per_temp_limit_table[i].frequency;
884                         }
885                 }
886                 dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
887         } else if (ROCKCHIP_PM_POLICY_NORMAL == rockchip_pm_get_policy()){
888                 if (!clk_cpu_dvfs_node->nor_temp_limit_table) {
889                         return;
890                 }
891
892                 if (temp > target_temp) {
893                         if (temp > old_temp) {
894                                 delta_temp = temp - target_temp;
895                                 for (i=0; clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
896                                         if (delta_temp > clk_cpu_dvfs_node->nor_temp_limit_table[i].index) {
897                                                 arm_rate_step = clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency;
898                                         }
899                                 }
900                                 if (arm_rate_step && (clk_cpu_dvfs_node->temp_limit_rate > arm_rate_step)) {
901                                         clk_cpu_dvfs_node->temp_limit_rate -= arm_rate_step;
902                                         dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
903                                 }
904                         }
905                 } else {
906                         if (clk_cpu_dvfs_node->temp_limit_rate < clk_cpu_dvfs_node->max_rate) {
907                                 delta_temp = target_temp - temp;
908                                 for (i=0; clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
909                                         if (delta_temp > clk_cpu_dvfs_node->nor_temp_limit_table[i].index) {
910                                                 arm_rate_step = clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency;
911                                         }
912                                 }
913
914                                 if (arm_rate_step) {
915                                         clk_cpu_dvfs_node->temp_limit_rate += arm_rate_step;
916                                         if (clk_cpu_dvfs_node->temp_limit_rate > clk_cpu_dvfs_node->max_rate) {
917                                                 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
918                                         }
919                                         dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
920                                 }
921                         }
922                 }
923         }
924
925         DVFS_DBG("cur temp: %d, temp_limit_core_rate: %lu\n", temp, clk_cpu_dvfs_node->temp_limit_rate);
926
927         old_temp = temp;
928 }
929 static DECLARE_DELAYED_WORK(dvfs_temp_limit_work, dvfs_temp_limit_work_func);
930
931
932 int dvfs_clk_enable_limit(struct dvfs_node *clk_dvfs_node, unsigned int min_rate, unsigned int max_rate)
933 {
934         u32 rate = 0, ret = 0;
935
936         if (!clk_dvfs_node || (min_rate > max_rate))
937                 return -EINVAL;
938         
939         if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
940                 mutex_lock(&clk_dvfs_node->vd->mutex);
941                 
942                 /* To reset clk_dvfs_node->min_rate/max_rate */
943                 dvfs_get_rate_range(clk_dvfs_node);
944                 clk_dvfs_node->freq_limit_en = 1;
945
946                 if ((min_rate >= clk_dvfs_node->min_rate) && (min_rate <= clk_dvfs_node->max_rate)) {
947                         clk_dvfs_node->min_rate = min_rate;
948                 }
949                 
950                 if ((max_rate >= clk_dvfs_node->min_rate) && (max_rate <= clk_dvfs_node->max_rate)) {
951                         clk_dvfs_node->max_rate = max_rate;
952                 }
953
954                 if (clk_dvfs_node->last_set_rate == 0)
955                         rate = __clk_get_rate(clk_dvfs_node->clk);
956                 else
957                         rate = clk_dvfs_node->last_set_rate;
958                 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
959
960                 mutex_unlock(&clk_dvfs_node->vd->mutex);
961
962         }
963
964         DVFS_DBG("%s:clk(%s) last_set_rate=%u; [min_rate, max_rate]=[%u, %u]\n",
965                         __func__, __clk_get_name(clk_dvfs_node->clk), clk_dvfs_node->last_set_rate, 
966                         clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
967
968         return 0;
969 }
970 EXPORT_SYMBOL(dvfs_clk_enable_limit);
971
972 int dvfs_clk_disable_limit(struct dvfs_node *clk_dvfs_node)
973 {
974         u32 ret = 0;
975
976         if (!clk_dvfs_node)
977                 return -EINVAL;
978         
979         if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
980                 mutex_lock(&clk_dvfs_node->vd->mutex);
981                 
982                 /* To reset clk_dvfs_node->min_rate/max_rate */
983                 dvfs_get_rate_range(clk_dvfs_node);
984                 clk_dvfs_node->freq_limit_en = 0;
985                 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
986
987                 mutex_unlock(&clk_dvfs_node->vd->mutex);
988         }
989
990         DVFS_DBG("%s: clk(%s) last_set_rate=%u; [min_rate, max_rate]=[%u, %u]\n",
991                         __func__, __clk_get_name(clk_dvfs_node->clk), clk_dvfs_node->last_set_rate, clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
992         return 0;
993 }
994 EXPORT_SYMBOL(dvfs_clk_disable_limit);
995
996 void dvfs_disable_temp_limit(void) {
997         temp_limit_enable = 0;
998         cancel_delayed_work_sync(&dvfs_temp_limit_work);
999 }
1000
1001 int dvfs_clk_get_limit(struct dvfs_node *clk_dvfs_node, unsigned int *min_rate, unsigned int *max_rate) 
1002 {
1003         int freq_limit_en;
1004
1005         if (!clk_dvfs_node)
1006                 return -EINVAL;
1007
1008         mutex_lock(&clk_dvfs_node->vd->mutex);
1009
1010         *min_rate = clk_dvfs_node->min_rate;
1011         *max_rate = clk_dvfs_node->max_rate;
1012         freq_limit_en = clk_dvfs_node->freq_limit_en;
1013
1014         mutex_unlock(&clk_dvfs_node->vd->mutex);
1015
1016         return freq_limit_en;
1017 }
1018 EXPORT_SYMBOL(dvfs_clk_get_limit);
1019
1020 int dvfs_clk_register_set_rate_callback(struct dvfs_node *clk_dvfs_node, clk_set_rate_callback clk_dvfs_target)
1021 {
1022         if (!clk_dvfs_node)
1023                 return -EINVAL;
1024                         
1025         mutex_lock(&clk_dvfs_node->vd->mutex);
1026         clk_dvfs_node->clk_dvfs_target = clk_dvfs_target;
1027         mutex_unlock(&clk_dvfs_node->vd->mutex);
1028
1029         return 0;
1030 }
1031 EXPORT_SYMBOL(dvfs_clk_register_set_rate_callback);
1032
1033 struct cpufreq_frequency_table *dvfs_get_freq_volt_table(struct dvfs_node *clk_dvfs_node) 
1034 {
1035         struct cpufreq_frequency_table *table;
1036
1037         if (!clk_dvfs_node)
1038                 return NULL;
1039
1040         mutex_lock(&clk_dvfs_node->vd->mutex);
1041         table = clk_dvfs_node->dvfs_table;
1042         mutex_unlock(&clk_dvfs_node->vd->mutex);
1043         
1044         return table;
1045 }
1046 EXPORT_SYMBOL(dvfs_get_freq_volt_table);
1047
1048 int dvfs_set_freq_volt_table(struct dvfs_node *clk_dvfs_node, struct cpufreq_frequency_table *table)
1049 {
1050         if (!clk_dvfs_node)
1051                 return -EINVAL;
1052
1053         if (IS_ERR_OR_NULL(table)){
1054                 DVFS_ERR("%s:invalid table!\n", __func__);
1055                 return -EINVAL;
1056         }
1057         
1058         mutex_lock(&clk_dvfs_node->vd->mutex);
1059         clk_dvfs_node->dvfs_table = table;
1060         dvfs_get_rate_range(clk_dvfs_node);
1061         dvfs_table_round_clk_rate(clk_dvfs_node);
1062         dvfs_table_round_volt(clk_dvfs_node);
1063         mutex_unlock(&clk_dvfs_node->vd->mutex);
1064
1065         return 0;
1066 }
1067 EXPORT_SYMBOL(dvfs_set_freq_volt_table);
1068
1069 int clk_enable_dvfs(struct dvfs_node *clk_dvfs_node)
1070 {
1071         struct cpufreq_frequency_table clk_fv;
1072         int volt_new;
1073         unsigned int mode;
1074         int ret;
1075
1076
1077         if (!clk_dvfs_node)
1078                 return -EINVAL;
1079         
1080         DVFS_DBG("%s: dvfs clk(%s) enable dvfs!\n", 
1081                 __func__, __clk_get_name(clk_dvfs_node->clk));
1082
1083         if (!clk_dvfs_node->vd) {
1084                 DVFS_ERR("%s: dvfs node(%s) has no vd node!\n", 
1085                         __func__, clk_dvfs_node->name);
1086                 return -EINVAL;
1087         }
1088         mutex_lock(&clk_dvfs_node->vd->mutex);
1089         if (clk_dvfs_node->enable_count == 0) {
1090                 if (IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator)) {
1091                         if (clk_dvfs_node->vd->regulator_name)
1092                                 clk_dvfs_node->vd->regulator = dvfs_regulator_get(NULL, clk_dvfs_node->vd->regulator_name);
1093                         if (!IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator)) {
1094                                 DVFS_DBG("%s: vd(%s) get regulator(%s) ok\n",
1095                                         __func__, clk_dvfs_node->vd->name, clk_dvfs_node->vd->regulator_name);
1096                                 clk_enable_dvfs_regulator_check(clk_dvfs_node->vd);
1097                                 dvfs_get_vd_regulator_volt_list(clk_dvfs_node->vd);
1098                                 dvfs_vd_get_regulator_volt_time_info(clk_dvfs_node->vd);
1099                         } else {
1100                                 clk_dvfs_node->enable_count = 0;
1101                                 DVFS_ERR("%s: vd(%s) can't get regulator(%s)!\n", 
1102                                         __func__, clk_dvfs_node->vd->name, clk_dvfs_node->vd->regulator_name);
1103                                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1104                                 return -ENXIO;
1105                         }
1106                 } else {
1107                         clk_enable_dvfs_regulator_check(clk_dvfs_node->vd);
1108                 }
1109                 
1110                 DVFS_DBG("%s: vd(%s) cur volt=%d\n",
1111                         __func__, clk_dvfs_node->name, clk_dvfs_node->vd->cur_volt);
1112
1113                 dvfs_table_round_clk_rate(clk_dvfs_node);
1114                 dvfs_get_rate_range(clk_dvfs_node);
1115                 clk_dvfs_node->freq_limit_en = 1;
1116                 dvfs_table_round_volt(clk_dvfs_node);
1117                 clk_dvfs_node->set_freq = clk_dvfs_node_get_rate_kz(clk_dvfs_node->clk);
1118                 clk_dvfs_node->last_set_rate = clk_dvfs_node->set_freq*1000;
1119                 
1120                 DVFS_DBG("%s: %s get freq %u!\n", 
1121                         __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1122
1123                 if (clk_dvfs_node_get_ref_volt(clk_dvfs_node, clk_dvfs_node->set_freq, &clk_fv)) {
1124                         if (clk_dvfs_node->dvfs_table[0].frequency == CPUFREQ_TABLE_END) {
1125                                 DVFS_ERR("%s: table empty\n", __func__);
1126                                 clk_dvfs_node->enable_count = 0;
1127                                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1128                                 return -EINVAL;
1129                         } else {
1130                                 DVFS_WARNING("%s: clk(%s) freq table all value are smaller than default(%d), use default, just enable dvfs\n", 
1131                                         __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1132                                 clk_dvfs_node->enable_count++;
1133                                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1134                                 return 0;
1135                         }
1136                 }
1137                 clk_dvfs_node->enable_count++;
1138                 clk_dvfs_node->set_volt = clk_fv.index;
1139                 volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1140                 DVFS_DBG("%s: %s, freq %u(ref vol %u)\n",
1141                         __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt);
1142 #if 0
1143                 if (clk_dvfs_node->dvfs_nb) {
1144                         // must unregister when clk disable
1145                         clk_notifier_register(clk, clk_dvfs_node->dvfs_nb);
1146                 }
1147 #endif
1148                 if(clk_dvfs_node->vd->cur_volt != volt_new) {
1149                         ret = dvfs_regulator_set_voltage_readback(clk_dvfs_node->vd->regulator, volt_new, volt_new);
1150                         dvfs_volt_up_delay(clk_dvfs_node->vd,volt_new, clk_dvfs_node->vd->cur_volt);
1151                         if (ret < 0) {
1152                                 clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
1153                                 clk_dvfs_node->enable_count = 0;
1154                                 DVFS_ERR("dvfs enable clk %s,set volt error \n", clk_dvfs_node->name);
1155                                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1156                                 return -EAGAIN;
1157                         }
1158                         clk_dvfs_node->vd->cur_volt = volt_new;
1159                         clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
1160                 }
1161
1162         } else {
1163                 DVFS_DBG("%s: dvfs already enable clk enable = %d!\n",
1164                         __func__, clk_dvfs_node->enable_count);
1165                 clk_dvfs_node->enable_count++;
1166         }
1167
1168         if (clk_dvfs_node->regu_mode_en) {
1169                 ret = dvfs_regu_mode_table_constrain(clk_dvfs_node);
1170                 if (ret) {
1171                         DVFS_ERR("%s: clk(%s) regu_mode_table is unvalid, set regu_mode_en=0!\n",
1172                                         __func__, clk_dvfs_node->name);
1173                         clk_dvfs_node->regu_mode_en = 0;
1174                         mutex_unlock(&clk_dvfs_node->vd->mutex);
1175                         return ret;
1176                 }
1177
1178                 ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, clk_dvfs_node->set_freq*1000, &mode);
1179                 if (ret < 0) {
1180                         DVFS_ERR("%s: clk(%s) rate %dKhz get regu_mode fail\n",
1181                                         __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1182                         mutex_unlock(&clk_dvfs_node->vd->mutex);
1183                         return ret;
1184                 } else
1185                         clk_dvfs_node->regu_mode = mode;
1186
1187                 dvfs_update_clk_pds_mode(clk_dvfs_node);
1188         }
1189
1190         mutex_unlock(&clk_dvfs_node->vd->mutex);
1191         
1192         return 0;
1193 }
1194 EXPORT_SYMBOL(clk_enable_dvfs);
1195
1196 int clk_disable_dvfs(struct dvfs_node *clk_dvfs_node)
1197 {
1198         int volt_new;
1199
1200         if (!clk_dvfs_node)
1201                 return -EINVAL;
1202
1203         DVFS_DBG("%s:dvfs clk(%s) disable dvfs!\n", 
1204                 __func__, __clk_get_name(clk_dvfs_node->clk));
1205
1206         mutex_lock(&clk_dvfs_node->vd->mutex);
1207         if (!clk_dvfs_node->enable_count) {
1208                 DVFS_WARNING("%s:clk(%s) is already closed!\n", 
1209                         __func__, __clk_get_name(clk_dvfs_node->clk));
1210                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1211                 return 0;
1212         } else {
1213                 clk_dvfs_node->enable_count--;
1214                 if (0 == clk_dvfs_node->enable_count) {
1215                         DVFS_DBG("%s:dvfs clk(%s) disable dvfs ok!\n",
1216                                 __func__, __clk_get_name(clk_dvfs_node->clk));
1217                         volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1218                         dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1219
1220 #if 0
1221                         clk_notifier_unregister(clk, clk_dvfs_node->dvfs_nb);
1222                         DVFS_DBG("clk unregister nb!\n");
1223 #endif
1224                 }
1225         }
1226         mutex_unlock(&clk_dvfs_node->vd->mutex);
1227         return 0;
1228 }
1229 EXPORT_SYMBOL(clk_disable_dvfs);
1230
1231 static unsigned long dvfs_get_limit_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1232 {
1233         unsigned long limit_rate;
1234
1235         limit_rate = rate;
1236         if (clk_dvfs_node->freq_limit_en) {
1237                 //dvfs table limit
1238                 if (rate < clk_dvfs_node->min_rate) {
1239                         limit_rate = clk_dvfs_node->min_rate;
1240                 } else if (rate > clk_dvfs_node->max_rate) {
1241                         limit_rate = clk_dvfs_node->max_rate;
1242                 }
1243                 if (temp_limit_enable) {
1244                         if (limit_rate > clk_dvfs_node->temp_limit_rate) {
1245                                 limit_rate = clk_dvfs_node->temp_limit_rate;
1246                         }
1247                 }
1248         }
1249
1250         DVFS_DBG("%s: rate:%ld, limit_rate:%ld,\n", __func__, rate, limit_rate);
1251
1252         return limit_rate;
1253 }
1254
1255 static int dvfs_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1256 {
1257         struct cpufreq_frequency_table clk_fv;
1258         unsigned long old_rate = 0, new_rate = 0, volt_new = 0, clk_volt_store = 0;
1259         struct clk *clk = clk_dvfs_node->clk;
1260         int ret;
1261
1262         if (!clk)
1263                 return -EINVAL;
1264
1265         if (!clk_dvfs_node->enable_count){
1266                 DVFS_WARNING("%s:dvfs(%s) is disable\n", 
1267                         __func__, clk_dvfs_node->name);
1268                 return 0;
1269         }
1270         
1271         if (clk_dvfs_node->vd->volt_set_flag == DVFS_SET_VOLT_FAILURE) {
1272                 /* It means the last time set voltage error */
1273                 ret = dvfs_reset_volt(clk_dvfs_node->vd);
1274                 if (ret < 0) {
1275                         return -EAGAIN;
1276                 }
1277         }
1278
1279         rate = dvfs_get_limit_rate(clk_dvfs_node, rate);
1280         new_rate = __clk_round_rate(clk, rate);
1281         old_rate = __clk_get_rate(clk);
1282         if (new_rate == old_rate)
1283                 return 0;
1284
1285         DVFS_DBG("enter %s: clk(%s) new_rate = %lu Hz, old_rate =  %lu Hz\n", 
1286                 __func__, clk_dvfs_node->name, rate, old_rate); 
1287
1288         /* find the clk corresponding voltage */
1289         ret = clk_dvfs_node_get_ref_volt(clk_dvfs_node, new_rate / 1000, &clk_fv);
1290         if (ret) {
1291                 DVFS_ERR("%s:dvfs clk(%s) rate %luhz is not support\n",
1292                         __func__, clk_dvfs_node->name, new_rate);
1293                 return ret;
1294         }
1295         clk_volt_store = clk_dvfs_node->set_volt;
1296         clk_dvfs_node->set_volt = clk_fv.index;
1297         volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1298         DVFS_DBG("%s:%s new rate=%lu(was=%lu),new volt=%lu,(was=%d)\n",
1299                 __func__, clk_dvfs_node->name, new_rate, old_rate, volt_new,clk_dvfs_node->vd->cur_volt);
1300
1301
1302         /* if up the rate */
1303         if (new_rate > old_rate) {
1304                 ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
1305                 if (ret)
1306                         DVFS_ERR("%s: dvfs clk(%s) rate %luhz set mode err\n",
1307                                 __func__, clk_dvfs_node->name, new_rate);
1308
1309                 ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1310                 if (ret)
1311                         goto fail_roll_back;
1312         }
1313
1314         /* scale rate */
1315         if (clk_dvfs_node->clk_dvfs_target) {
1316                 ret = clk_dvfs_node->clk_dvfs_target(clk, rate);
1317         } else {
1318                 ret = clk_set_rate(clk, rate);
1319         }
1320
1321         if (ret) {
1322                 DVFS_ERR("%s:clk(%s) set rate err\n", 
1323                         __func__, __clk_get_name(clk));
1324                 goto fail_roll_back;
1325         }
1326         clk_dvfs_node->set_freq = new_rate / 1000;
1327
1328         DVFS_DBG("%s:dvfs clk(%s) set rate %lu ok\n", 
1329                 __func__, clk_dvfs_node->name, __clk_get_rate(clk));
1330
1331         /* if down the rate */
1332         if (new_rate < old_rate) {
1333                 ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1334                 if (ret)
1335                         goto out;
1336
1337                 ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
1338                 if (ret)
1339                         DVFS_ERR("%s:dvfs clk(%s) rate %luhz set mode err\n",
1340                         __func__, clk_dvfs_node->name, new_rate);
1341         }
1342
1343         return 0;
1344 fail_roll_back:
1345         clk_dvfs_node->set_volt = clk_volt_store;
1346 out:
1347         return ret;
1348 }
1349
1350 unsigned long dvfs_clk_round_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1351 {
1352         return __clk_round_rate(clk_dvfs_node->clk, rate);
1353 }
1354 EXPORT_SYMBOL_GPL(dvfs_clk_round_rate);
1355
1356 unsigned long dvfs_clk_get_rate(struct dvfs_node *clk_dvfs_node)
1357 {
1358         return __clk_get_rate(clk_dvfs_node->clk);
1359 }
1360 EXPORT_SYMBOL_GPL(dvfs_clk_get_rate);
1361
1362 unsigned long dvfs_clk_get_last_set_rate(struct dvfs_node *clk_dvfs_node)
1363 {
1364         unsigned long last_set_rate;
1365
1366         mutex_lock(&clk_dvfs_node->vd->mutex);
1367         last_set_rate = clk_dvfs_node->last_set_rate;
1368         mutex_unlock(&clk_dvfs_node->vd->mutex);
1369
1370         return last_set_rate;
1371 }
1372 EXPORT_SYMBOL_GPL(dvfs_clk_get_last_set_rate);
1373
1374
1375 int dvfs_clk_enable(struct dvfs_node *clk_dvfs_node)
1376 {
1377         return clk_enable(clk_dvfs_node->clk);
1378 }
1379 EXPORT_SYMBOL_GPL(dvfs_clk_enable);
1380
1381 void dvfs_clk_disable(struct dvfs_node *clk_dvfs_node)
1382 {
1383         return clk_disable(clk_dvfs_node->clk);
1384 }
1385 EXPORT_SYMBOL_GPL(dvfs_clk_disable);
1386
1387 struct dvfs_node *clk_get_dvfs_node(char *clk_name)
1388 {
1389         struct vd_node *vd;
1390         struct pd_node *pd;
1391         struct dvfs_node *clk_dvfs_node;
1392
1393         mutex_lock(&rk_dvfs_mutex);
1394         list_for_each_entry(vd, &rk_dvfs_tree, node) {
1395                 mutex_lock(&vd->mutex);
1396                 list_for_each_entry(pd, &vd->pd_list, node) {
1397                         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
1398                                 if (0 == strcmp(clk_dvfs_node->name, clk_name)) {
1399                                         mutex_unlock(&vd->mutex);
1400                                         mutex_unlock(&rk_dvfs_mutex);
1401                                         return clk_dvfs_node;
1402                                 }
1403                         }
1404                 }
1405                 mutex_unlock(&vd->mutex);
1406         }
1407         mutex_unlock(&rk_dvfs_mutex);
1408         
1409         return NULL;    
1410 }
1411 EXPORT_SYMBOL_GPL(clk_get_dvfs_node);
1412
1413 void clk_put_dvfs_node(struct dvfs_node *clk_dvfs_node)
1414 {
1415         return;
1416 }
1417 EXPORT_SYMBOL_GPL(clk_put_dvfs_node);
1418
1419 int dvfs_clk_prepare_enable(struct dvfs_node *clk_dvfs_node)
1420 {
1421         return clk_prepare_enable(clk_dvfs_node->clk);
1422 }
1423 EXPORT_SYMBOL_GPL(dvfs_clk_prepare_enable);
1424
1425
1426 void dvfs_clk_disable_unprepare(struct dvfs_node *clk_dvfs_node)
1427 {
1428         clk_disable_unprepare(clk_dvfs_node->clk);
1429 }
1430 EXPORT_SYMBOL_GPL(dvfs_clk_disable_unprepare);
1431
1432 int dvfs_clk_set_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1433 {
1434         int ret = -EINVAL;
1435         
1436         if (!clk_dvfs_node)
1437                 return -EINVAL;
1438         
1439         DVFS_DBG("%s:dvfs node(%s) set rate(%lu)\n", 
1440                 __func__, clk_dvfs_node->name, rate);
1441         
1442         #if 0 // judge by reference func in rk
1443         if (dvfs_support_clk_set_rate(dvfs_info)==false) {
1444                 DVFS_ERR("dvfs func:%s is not support!\n", __func__);
1445                 return ret;
1446         }
1447         #endif
1448
1449         if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target) {
1450                 mutex_lock(&clk_dvfs_node->vd->mutex);
1451                 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
1452                 clk_dvfs_node->last_set_rate = rate;
1453                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1454         } else {
1455                 DVFS_ERR("%s:dvfs node(%s) has no vd node or target callback!\n", 
1456                         __func__, clk_dvfs_node->name);
1457         }
1458                 
1459         return ret;     
1460 }
1461 EXPORT_SYMBOL_GPL(dvfs_clk_set_rate);
1462
1463
1464 int rk_regist_vd(struct vd_node *vd)
1465 {
1466         if (!vd)
1467                 return -EINVAL;
1468
1469         vd->mode_flag=0;
1470         vd->volt_time_flag=0;
1471         vd->n_voltages=0;
1472         INIT_LIST_HEAD(&vd->pd_list);
1473         mutex_lock(&rk_dvfs_mutex);
1474         list_add(&vd->node, &rk_dvfs_tree);
1475         mutex_unlock(&rk_dvfs_mutex);
1476
1477         return 0;
1478 }
1479 EXPORT_SYMBOL_GPL(rk_regist_vd);
1480
1481 int rk_regist_pd(struct pd_node *pd)
1482 {
1483         struct vd_node  *vd;
1484
1485         if (!pd)
1486                 return -EINVAL;
1487
1488         vd = pd->vd;
1489         if (!vd)
1490                 return -EINVAL;
1491
1492         INIT_LIST_HEAD(&pd->clk_list);
1493         mutex_lock(&vd->mutex);
1494         list_add(&pd->node, &vd->pd_list);
1495         mutex_unlock(&vd->mutex);
1496         
1497         return 0;
1498 }
1499 EXPORT_SYMBOL_GPL(rk_regist_pd);
1500
1501 int rk_regist_clk(struct dvfs_node *clk_dvfs_node)
1502 {
1503         struct vd_node  *vd;
1504         struct pd_node  *pd;
1505
1506         if (!clk_dvfs_node)
1507                 return -EINVAL;
1508
1509         vd = clk_dvfs_node->vd;
1510         pd = clk_dvfs_node->pd;
1511         if (!vd || !pd)
1512                 return -EINVAL;
1513
1514         mutex_lock(&vd->mutex);
1515         list_add(&clk_dvfs_node->node, &pd->clk_list);
1516         mutex_unlock(&vd->mutex);
1517         
1518         return 0;
1519 }
1520 EXPORT_SYMBOL_GPL(rk_regist_clk);
1521
1522 static int rk_convert_cpufreq_table(struct dvfs_node *dvfs_node)
1523 {
1524         struct opp *opp;
1525         struct device *dev;
1526         struct cpufreq_frequency_table *table;
1527         int i;
1528
1529         table = dvfs_node->dvfs_table;
1530         dev = &dvfs_node->dev;
1531
1532         for (i = 0; table[i].frequency!= CPUFREQ_TABLE_END; i++){
1533                 opp = opp_find_freq_exact(dev, table[i].frequency * 1000, true);
1534                 if (IS_ERR(opp))
1535                         return PTR_ERR(opp);
1536                 table[i].index = opp_get_voltage(opp);
1537         }
1538         return 0;
1539 }
1540
1541 static struct cpufreq_frequency_table *of_get_temp_limit_table(struct device_node *dev_node, const char *propname)
1542 {
1543         struct cpufreq_frequency_table *temp_limt_table = NULL;
1544         const struct property *prop;
1545         const __be32 *val;
1546         int nr, i;
1547
1548         prop = of_find_property(dev_node, propname, NULL);
1549         if (!prop)
1550                 return NULL;
1551         if (!prop->value)
1552                 return NULL;
1553
1554         nr = prop->length / sizeof(u32);
1555         if (nr % 2) {
1556                 pr_err("%s: Invalid freq list\n", __func__);
1557                 return NULL;
1558         }
1559
1560         temp_limt_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
1561                              (nr/2 + 1), GFP_KERNEL);
1562
1563         val = prop->value;
1564
1565         for (i=0; i<nr/2; i++){
1566                 temp_limt_table[i].index = be32_to_cpup(val++);
1567                 temp_limt_table[i].frequency = be32_to_cpup(val++) * 1000;
1568         }
1569
1570         temp_limt_table[i].index = 0;
1571         temp_limt_table[i].frequency = CPUFREQ_TABLE_END;
1572
1573         return temp_limt_table;
1574
1575 }
1576
1577 int of_dvfs_init(void)
1578 {
1579         struct vd_node *vd;
1580         struct pd_node *pd;
1581         struct device_node *dvfs_dev_node, *clk_dev_node, *vd_dev_node, *pd_dev_node;
1582         struct dvfs_node *dvfs_node;
1583         struct clk *clk;
1584         const __be32 *val;
1585         int ret;
1586
1587         DVFS_DBG("%s\n", __func__);
1588
1589         dvfs_dev_node = of_find_node_by_name(NULL, "dvfs");
1590         if (IS_ERR_OR_NULL(dvfs_dev_node)) {
1591                 DVFS_ERR("%s get dvfs dev node err\n", __func__);
1592                 return PTR_ERR(dvfs_dev_node);
1593         }
1594
1595         val = of_get_property(dvfs_dev_node, "target-temp", NULL);
1596         if (val) {
1597                 target_temp = be32_to_cpup(val);
1598         }
1599
1600         val = of_get_property(dvfs_dev_node, "temp-limit-enable", NULL);
1601         if (val) {
1602                 temp_limit_enable = be32_to_cpup(val);
1603         }
1604
1605         for_each_available_child_of_node(dvfs_dev_node, vd_dev_node) {
1606                 vd = kzalloc(sizeof(struct vd_node), GFP_KERNEL);
1607                 if (!vd)
1608                         return -ENOMEM;
1609
1610                 mutex_init(&vd->mutex);
1611                 vd->name = vd_dev_node->name;
1612                 ret = of_property_read_string(vd_dev_node, "regulator_name", &vd->regulator_name);
1613                 if (ret) {
1614                         DVFS_ERR("%s:vd(%s) get regulator_name err, ret:%d\n", 
1615                                 __func__, vd_dev_node->name, ret);
1616                         kfree(vd);
1617                         continue;
1618                 }
1619                 
1620                 vd->suspend_volt = 0;
1621                 
1622                 vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
1623                 vd->vd_dvfs_target = dvfs_target;
1624                 ret = rk_regist_vd(vd);
1625                 if (ret){
1626                         DVFS_ERR("%s:vd(%s) register err:%d\n", __func__, vd->name, ret);
1627                         kfree(vd);
1628                         continue;
1629                 }
1630
1631                 DVFS_DBG("%s:vd(%s) register ok, regulator name:%s,suspend volt:%d\n", 
1632                         __func__, vd->name, vd->regulator_name, vd->suspend_volt);
1633                 
1634                 for_each_available_child_of_node(vd_dev_node, pd_dev_node) {            
1635                         pd = kzalloc(sizeof(struct pd_node), GFP_KERNEL);
1636                         if (!pd)
1637                                 return -ENOMEM;
1638
1639                         pd->vd = vd;
1640                         pd->name = pd_dev_node->name;
1641                         
1642                         ret = rk_regist_pd(pd);
1643                         if (ret){
1644                                 DVFS_ERR("%s:pd(%s) register err:%d\n", __func__, pd->name, ret);
1645                                 kfree(pd);
1646                                 continue;
1647                         }
1648                         DVFS_DBG("%s:pd(%s) register ok, parent vd:%s\n", 
1649                                 __func__, pd->name, vd->name);                  
1650                         for_each_available_child_of_node(pd_dev_node, clk_dev_node) {
1651                                 if (!of_device_is_available(clk_dev_node))
1652                                         continue;
1653                                 
1654                                 dvfs_node = kzalloc(sizeof(struct dvfs_node), GFP_KERNEL);
1655                                 if (!dvfs_node)
1656                                         return -ENOMEM;
1657                                 
1658                                 dvfs_node->name = clk_dev_node->name;
1659                                 dvfs_node->pd = pd;
1660                                 dvfs_node->vd = vd;
1661
1662                                 val = of_get_property(clk_dev_node, "regu-mode-en", NULL);
1663                                 if (val)
1664                                         dvfs_node->regu_mode_en = be32_to_cpup(val);
1665                                 if (dvfs_node->regu_mode_en)
1666                                         dvfs_node->regu_mode_table = of_get_regu_mode_table(clk_dev_node);
1667                                 else
1668                                         dvfs_node->regu_mode_table = NULL;
1669
1670                                 if (temp_limit_enable) {
1671                                         val = of_get_property(clk_dev_node, "temp-channel", NULL);
1672                                         if (val) {
1673                                                 dvfs_node->temp_channel = be32_to_cpup(val);
1674                                         }
1675                                         dvfs_node->nor_temp_limit_table = of_get_temp_limit_table(clk_dev_node, "normal-temp-limit");
1676                                         dvfs_node->per_temp_limit_table = of_get_temp_limit_table(clk_dev_node, "performance-temp-limit");
1677                                 }
1678                                 dvfs_node->temp_limit_rate = -1;
1679                                 dvfs_node->dev.of_node = clk_dev_node;
1680                                 ret = of_init_opp_table(&dvfs_node->dev);
1681                                 if (ret) {
1682                                         DVFS_ERR("%s:clk(%s) get opp table err:%d\n", __func__, dvfs_node->name, ret);
1683                                         kfree(dvfs_node);
1684                                         continue;
1685                                 }
1686                                 
1687                                 ret = opp_init_cpufreq_table(&dvfs_node->dev, &dvfs_node->dvfs_table);
1688                                 if (ret) {
1689                                         DVFS_ERR("%s:clk(%s) get cpufreq table err:%d\n", __func__, dvfs_node->name, ret);
1690                                         kfree(dvfs_node);
1691                                         continue;
1692                                 }
1693                                 ret = rk_convert_cpufreq_table(dvfs_node);
1694                                 if (ret) {
1695                                         kfree(dvfs_node);
1696                                         continue;
1697                                 }
1698                                 
1699                                 clk = clk_get(NULL, clk_dev_node->name);
1700                                 if (IS_ERR(clk)){
1701                                         DVFS_ERR("%s:get clk(%s) err:%ld\n", __func__, dvfs_node->name, PTR_ERR(clk));
1702                                         kfree(dvfs_node);
1703                                         continue;
1704                                         
1705                                 }
1706                                 
1707                                 dvfs_node->clk = clk;
1708                                 ret = rk_regist_clk(dvfs_node);
1709                                 if (ret){
1710                                         DVFS_ERR("%s:dvfs_node(%s) register err:%d\n", __func__, dvfs_node->name, ret);
1711                                         return ret;
1712                                 }
1713
1714                                 DVFS_DBG("%s:dvfs_node(%s) register ok, parent pd:%s\n", 
1715                                         __func__, clk_dev_node->name, pd->name);        
1716
1717                         }
1718                 }       
1719         }
1720         return 0;
1721 }
1722
1723 /*********************************************************************************/
1724 /**
1725  * dump_dbg_map() : Draw all informations of dvfs while debug
1726  */
1727 static int dump_dbg_map(char *buf)
1728 {
1729         int i;
1730         struct vd_node  *vd;
1731         struct pd_node  *pd;
1732         struct dvfs_node        *clk_dvfs_node;
1733         char *s = buf;
1734         
1735         mutex_lock(&rk_dvfs_mutex);
1736         printk( "-------------DVFS TREE-----------\n\n\n");
1737         printk( "DVFS TREE:\n");
1738
1739         list_for_each_entry(vd, &rk_dvfs_tree, node) {
1740                 mutex_lock(&vd->mutex);
1741                 printk( "|\n|- voltage domain:%s\n", vd->name);
1742                 printk( "|- current voltage:%d\n", vd->cur_volt);
1743                 printk( "|- current regu_mode:%s\n", dvfs_regu_mode_to_string(vd->regu_mode));
1744
1745                 list_for_each_entry(pd, &vd->pd_list, node) {
1746                         printk( "|  |\n|  |- power domain:%s, status = %s, current volt = %d, current regu_mode = %s\n",
1747                                         pd->name, (pd->pd_status == 1) ? "ON" : "OFF", pd->cur_volt,
1748                                         dvfs_regu_mode_to_string(pd->regu_mode));
1749
1750                         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
1751                                 printk( "|  |  |\n|  |  |- clock: %s current: rate %d, volt = %d,"
1752                                                 " enable_dvfs = %s\n",
1753                                                 clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt,
1754                                                 clk_dvfs_node->enable_count == 0 ? "DISABLE" : "ENABLE");
1755                                 printk( "|  |  |- clk limit(%s):[%u, %u]; last set rate = %lu\n",
1756                                                 clk_dvfs_node->freq_limit_en ? "enable" : "disable",
1757                                                 clk_dvfs_node->min_rate, clk_dvfs_node->max_rate,
1758                                                 clk_dvfs_node->last_set_rate/1000);
1759                                 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
1760                                         printk( "|  |  |  |- freq = %d, volt = %d\n",
1761                                                         clk_dvfs_node->dvfs_table[i].frequency,
1762                                                         clk_dvfs_node->dvfs_table[i].index);
1763
1764                                 }
1765                                 printk( "|  |  |- clock: %s current: rate %d, regu_mode = %s,"
1766                                                 " regu_mode_en = %d\n",
1767                                                 clk_dvfs_node->name, clk_dvfs_node->set_freq,
1768                                                 dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode),
1769                                                 clk_dvfs_node->regu_mode_en);
1770                                 if (clk_dvfs_node->regu_mode_table) {
1771                                         for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
1772                                                 printk( "|  |  |  |- freq = %d, regu_mode = %s\n",
1773                                                                 clk_dvfs_node->regu_mode_table[i].frequency/1000,
1774                                                                 dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode_table[i].index));
1775                                         }
1776                                 }
1777                         }
1778                 }
1779                 mutex_unlock(&vd->mutex);
1780         }
1781         
1782         printk( "-------------DVFS TREE END------------\n");
1783         mutex_unlock(&rk_dvfs_mutex);
1784         
1785         return s - buf;
1786 }
1787
1788 /*********************************************************************************/
1789 static struct kobject *dvfs_kobj;
1790 struct dvfs_attribute {
1791         struct attribute        attr;
1792         ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
1793                         char *buf);
1794         ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
1795                         const char *buf, size_t n);
1796 };
1797
1798 static ssize_t dvfs_tree_store(struct kobject *kobj, struct kobj_attribute *attr,
1799                const char *buf, size_t n)
1800 {
1801        return n;
1802 }
1803 static ssize_t dvfs_tree_show(struct kobject *kobj, struct kobj_attribute *attr,
1804                char *buf)
1805 {
1806        return dump_dbg_map(buf);
1807 }
1808
1809
1810 static struct dvfs_attribute dvfs_attrs[] = {
1811         /*     node_name        permision               show_func       store_func */
1812 //#ifdef CONFIG_RK_CLOCK_PROC
1813         __ATTR(dvfs_tree,       S_IRUSR | S_IRGRP | S_IWUSR,    dvfs_tree_show, dvfs_tree_store),
1814 //#endif
1815 };
1816
1817 static int __init dvfs_init(void)
1818 {
1819         int i, ret = 0;
1820
1821         dvfs_kobj = kobject_create_and_add("dvfs", NULL);
1822         if (!dvfs_kobj)
1823                 return -ENOMEM;
1824         for (i = 0; i < ARRAY_SIZE(dvfs_attrs); i++) {
1825                 ret = sysfs_create_file(dvfs_kobj, &dvfs_attrs[i].attr);
1826                 if (ret != 0) {
1827                         DVFS_ERR("create index %d error\n", i);
1828                         return ret;
1829                 }
1830         }
1831
1832         if (temp_limit_enable) {
1833                 clk_cpu_dvfs_node = clk_get_dvfs_node("clk_core");
1834                 if (!clk_cpu_dvfs_node){
1835                         return -EINVAL;
1836                 }
1837
1838                 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
1839                 dvfs_wq = alloc_workqueue("dvfs", WQ_NON_REENTRANT | WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
1840                 queue_delayed_work_on(0, dvfs_wq, &dvfs_temp_limit_work, 0*HZ);
1841         }
1842
1843         vdd_gpu_regulator = dvfs_get_regulator("vdd_gpu");
1844         if (!IS_ERR_OR_NULL(vdd_gpu_regulator)) {
1845                 struct clk *clk = clk_get(NULL, "pd_gpu");
1846
1847                 if (clk)
1848                         rk_clk_pd_notifier_register(clk, &clk_pd_gpu_notifier);
1849
1850                 fb_register_client(&early_suspend_notifier);
1851                 register_reboot_notifier(&vdd_gpu_reboot_notifier);
1852         }
1853
1854         return ret;
1855 }
1856
1857 late_initcall(dvfs_init);