f5679cd0723bd55d7709f2bc4b1467eceb19aad8
[firefly-linux-kernel-4.4.55.git] / arch / arm / mach-rockchip / dvfs.c
1 /* arch/arm/mach-rk30/rk30_dvfs.c
2  *
3  * Copyright (C) 2012 ROCKCHIP, Inc.
4  *
5  * This software is licensed under the terms of the GNU General Public
6  * License version 2, as published by the Free Software Foundation, and
7  * may be copied, distributed, and modified under those terms.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  */
15 #include <linux/slab.h>
16 #include <linux/clk.h>
17 #include <linux/regulator/consumer.h>
18 #include <linux/delay.h>
19 #include <linux/stat.h>
20 #include <linux/of.h>
21 #include <linux/opp.h>
22 #include <linux/rockchip/dvfs.h>
23 #include <linux/rockchip/common.h>
24 #include <linux/fb.h>
25 #include <linux/reboot.h>
26 #include <linux/rockchip/cpu.h>
27 #include <linux/tick.h>
28 #include <dt-bindings/clock/rk_system_status.h>
29 #include "../../../drivers/clk/rockchip/clk-pd.h"
30 #include "efuse.h"
31
32 extern int rockchip_tsadc_get_temp(int chn);
33
34 #define MHz     (1000 * 1000)
35 static LIST_HEAD(rk_dvfs_tree);
36 static DEFINE_MUTEX(rk_dvfs_mutex);
37 static struct workqueue_struct *dvfs_wq;
38 static struct dvfs_node *clk_cpu_dvfs_node;
39 static unsigned int target_temp = 80;
40 static int temp_limit_enable;
41
42 static int pd_gpu_off, early_suspend;
43 static DEFINE_MUTEX(switch_vdd_gpu_mutex);
44 struct regulator *vdd_gpu_regulator;
45
46 static int vdd_gpu_reboot_notifier_event(struct notifier_block *this,
47         unsigned long event, void *ptr)
48 {
49         int ret;
50
51         DVFS_DBG("%s: enable vdd_gpu\n", __func__);
52         mutex_lock(&switch_vdd_gpu_mutex);
53         if (!regulator_is_enabled(vdd_gpu_regulator))
54                 ret = regulator_enable(vdd_gpu_regulator);
55         mutex_unlock(&switch_vdd_gpu_mutex);
56
57         return NOTIFY_OK;
58 }
59
60 static struct notifier_block vdd_gpu_reboot_notifier = {
61         .notifier_call = vdd_gpu_reboot_notifier_event,
62 };
63
64 static int clk_pd_gpu_notifier_call(struct notifier_block *nb,
65         unsigned long event, void *ptr)
66 {
67         int ret;
68
69         switch (event) {
70         case RK_CLK_PD_PREPARE:
71                 mutex_lock(&switch_vdd_gpu_mutex);
72                 pd_gpu_off = 0;
73                 if (early_suspend) {
74                         if (!regulator_is_enabled(vdd_gpu_regulator))
75                                 ret = regulator_enable(vdd_gpu_regulator);
76                 }
77                 mutex_unlock(&switch_vdd_gpu_mutex);
78                 break;
79         case RK_CLK_PD_UNPREPARE:
80                 mutex_lock(&switch_vdd_gpu_mutex);
81                 pd_gpu_off = 1;
82                 if (early_suspend) {
83                         if (regulator_is_enabled(vdd_gpu_regulator))
84                                 ret = regulator_disable(vdd_gpu_regulator);
85                 }
86                 mutex_unlock(&switch_vdd_gpu_mutex);
87                 break;
88         default:
89                 break;
90         }
91
92         return NOTIFY_OK;
93 }
94
95 static struct notifier_block clk_pd_gpu_notifier = {
96         .notifier_call = clk_pd_gpu_notifier_call,
97 };
98
99
100 static int early_suspend_notifier_call(struct notifier_block *self,
101                                 unsigned long action, void *data)
102 {
103         struct fb_event *event = data;
104         int blank_mode = *((int *)event->data);
105         int ret;
106
107         mutex_lock(&switch_vdd_gpu_mutex);
108         if (action == FB_EARLY_EVENT_BLANK) {
109                 switch (blank_mode) {
110                 case FB_BLANK_UNBLANK:
111                         early_suspend = 0;
112                         if (pd_gpu_off) {
113                                 if (!regulator_is_enabled(vdd_gpu_regulator))
114                                         ret = regulator_enable(
115                                         vdd_gpu_regulator);
116                         }
117                         break;
118                 default:
119                         break;
120                 }
121         } else if (action == FB_EVENT_BLANK) {
122                 switch (blank_mode) {
123                 case FB_BLANK_POWERDOWN:
124                         early_suspend = 1;
125                         if (pd_gpu_off) {
126                                 if (regulator_is_enabled(vdd_gpu_regulator))
127                                         ret = regulator_disable(
128                                         vdd_gpu_regulator);
129                         }
130
131                         break;
132                 default:
133                         break;
134                 }
135         }
136         mutex_unlock(&switch_vdd_gpu_mutex);
137
138         return NOTIFY_OK;
139 }
140
141 static struct notifier_block early_suspend_notifier = {
142                 .notifier_call = early_suspend_notifier_call,
143 };
144
145 #define DVFS_REGULATOR_MODE_STANDBY     1
146 #define DVFS_REGULATOR_MODE_IDLE        2
147 #define DVFS_REGULATOR_MODE_NORMAL      3
148 #define DVFS_REGULATOR_MODE_FAST        4
149
150 static const char* dvfs_regu_mode_to_string(unsigned int mode)
151 {
152         switch (mode) {
153         case DVFS_REGULATOR_MODE_FAST:
154                 return "FAST";
155         case DVFS_REGULATOR_MODE_NORMAL:
156                 return "NORMAL";
157         case DVFS_REGULATOR_MODE_IDLE:
158                 return "IDLE";
159         case DVFS_REGULATOR_MODE_STANDBY:
160                 return "STANDBY";
161         default:
162                 return "UNKNOWN";
163         }
164 }
165
166 static int dvfs_regu_mode_convert(unsigned int mode)
167 {
168         switch (mode) {
169         case DVFS_REGULATOR_MODE_FAST:
170                 return REGULATOR_MODE_FAST;
171         case DVFS_REGULATOR_MODE_NORMAL:
172                 return REGULATOR_MODE_NORMAL;
173         case DVFS_REGULATOR_MODE_IDLE:
174                 return REGULATOR_MODE_IDLE;
175         case DVFS_REGULATOR_MODE_STANDBY:
176                 return REGULATOR_MODE_STANDBY;
177         default:
178                 return -EINVAL;
179         }
180 }
181
182 static int dvfs_regu_mode_deconvert(unsigned int mode)
183 {
184         switch (mode) {
185         case REGULATOR_MODE_FAST:
186                 return DVFS_REGULATOR_MODE_FAST;
187         case REGULATOR_MODE_NORMAL:
188                 return DVFS_REGULATOR_MODE_NORMAL;
189         case REGULATOR_MODE_IDLE:
190                 return DVFS_REGULATOR_MODE_IDLE;
191         case REGULATOR_MODE_STANDBY:
192                 return DVFS_REGULATOR_MODE_STANDBY;
193         default:
194                 return -EINVAL;
195         }
196 }
197
198 static struct cpufreq_frequency_table *of_get_regu_mode_table(struct device_node *dev_node)
199 {
200         struct cpufreq_frequency_table *regu_mode_table = NULL;
201         const struct property *prop;
202         const __be32 *val;
203         int nr, i;
204
205         prop = of_find_property(dev_node, "regu-mode-table", NULL);
206         if (!prop)
207                 return NULL;
208         if (!prop->value)
209                 return NULL;
210
211         nr = prop->length / sizeof(u32);
212         if (nr % 2) {
213                 pr_err("%s: Invalid freq list\n", __func__);
214                 return NULL;
215         }
216
217         regu_mode_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
218                              (nr/2+1), GFP_KERNEL);
219         if (!regu_mode_table) {
220                 pr_err("%s: could not allocate regu_mode_table!\n", __func__);
221                 return ERR_PTR(-ENOMEM);
222         }
223
224         val = prop->value;
225
226         for (i=0; i<nr/2; i++){
227                 regu_mode_table[i].frequency = be32_to_cpup(val++) * 1000;
228                 regu_mode_table[i].index = be32_to_cpup(val++);
229         }
230
231         if (regu_mode_table[i-1].frequency != 0) {
232                 pr_err("%s: Last freq of regu_mode_table is not 0!\n", __func__);
233                 kfree(regu_mode_table);
234                 return NULL;
235         }
236
237         regu_mode_table[i].index = 0;
238         regu_mode_table[i].frequency = CPUFREQ_TABLE_END;
239
240         return regu_mode_table;
241 }
242
243 static int dvfs_regu_mode_table_constrain(struct dvfs_node *clk_dvfs_node)
244 {
245         int i, ret;
246         int mode, convert_mode, valid_mode;
247
248         if (!clk_dvfs_node)
249                 return -EINVAL;
250
251         if (!clk_dvfs_node->regu_mode_table)
252                 return -EINVAL;
253
254         if (!clk_dvfs_node->vd)
255                 return -EINVAL;
256
257         if (IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator))
258                 return -EINVAL;
259
260         for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
261                 mode = clk_dvfs_node->regu_mode_table[i].index;
262                 convert_mode = dvfs_regu_mode_convert(mode);
263
264                 ret = regulator_is_supported_mode(clk_dvfs_node->vd->regulator,
265                                                 &convert_mode);
266                 if (ret) {
267                         DVFS_ERR("%s: find mode=%d unsupported\n", __func__,
268                                 mode);
269                         kfree(clk_dvfs_node->regu_mode_table);
270                         clk_dvfs_node->regu_mode_table = NULL;
271                         return ret;
272                 }
273
274                 valid_mode = dvfs_regu_mode_deconvert(convert_mode);
275                 if (valid_mode != mode) {
276                         DVFS_ERR("%s: round mode=%d to valid mode=%d!\n",
277                                 __func__, mode, valid_mode);
278                         clk_dvfs_node->regu_mode_table[i].index = valid_mode;
279                 }
280
281         }
282
283         return 0;
284 }
285
286 static int clk_dvfs_node_get_regu_mode(struct dvfs_node *clk_dvfs_node,
287         unsigned long rate, unsigned int *mode)
288 {
289         int i;
290
291
292         if ((!clk_dvfs_node) || (!clk_dvfs_node->regu_mode_table))
293                 return -EINVAL;
294
295         for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
296                 if (rate >= clk_dvfs_node->regu_mode_table[i].frequency) {
297                         *mode = clk_dvfs_node->regu_mode_table[i].index;
298                         return 0;
299                 }
300         }
301
302         return -EINVAL;
303 }
304
305 static int dvfs_pd_get_newmode_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
306 {
307         unsigned int mode_max = 0;
308
309
310         if (clk_dvfs_node->regu_mode_en && (clk_dvfs_node->regu_mode >= pd->regu_mode)) {
311                 return clk_dvfs_node->regu_mode;
312         }
313
314         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
315                 if (clk_dvfs_node->regu_mode_en)
316                         mode_max = max(mode_max, (clk_dvfs_node->regu_mode));
317         }
318
319         return mode_max;
320 }
321
322 static void dvfs_update_clk_pds_mode(struct dvfs_node *clk_dvfs_node)
323 {
324         struct pd_node *pd;
325
326         if (!clk_dvfs_node)
327                 return;
328
329         pd = clk_dvfs_node->pd;
330         if (!pd)
331                 return;
332
333         pd->regu_mode = dvfs_pd_get_newmode_byclk(pd, clk_dvfs_node);
334 }
335
336 static int dvfs_vd_get_newmode_bypd(struct vd_node *vd)
337 {
338         unsigned int mode_max_vd = 0;
339         struct pd_node *pd;
340
341         if (!vd)
342                 return -EINVAL;
343
344         list_for_each_entry(pd, &vd->pd_list, node) {
345                 mode_max_vd = max(mode_max_vd, pd->regu_mode);
346         }
347
348         return mode_max_vd;
349 }
350
351 static int dvfs_vd_get_newmode_byclk(struct dvfs_node *clk_dvfs_node)
352 {
353         if (!clk_dvfs_node)
354                 return -EINVAL;
355
356         dvfs_update_clk_pds_mode(clk_dvfs_node);
357
358         return  dvfs_vd_get_newmode_bypd(clk_dvfs_node->vd);
359 }
360
361 static int dvfs_regu_set_mode(struct vd_node *vd, unsigned int mode)
362 {
363         int convert_mode;
364         int ret = 0;
365
366
367         if (IS_ERR_OR_NULL(vd)) {
368                 DVFS_ERR("%s: vd_node error\n", __func__);
369                 return -EINVAL;
370         }
371
372         DVFS_DBG("%s: mode=%d(old=%d)\n", __func__, mode, vd->regu_mode);
373
374         convert_mode = dvfs_regu_mode_convert(mode);
375         if (convert_mode < 0) {
376                 DVFS_ERR("%s: mode %d convert error\n", __func__, mode);
377                 return convert_mode;
378         }
379
380         if (!IS_ERR_OR_NULL(vd->regulator)) {
381                 ret = dvfs_regulator_set_mode(vd->regulator, convert_mode);
382                 if (ret < 0) {
383                         DVFS_ERR("%s: %s set mode %d (was %d) error!\n", __func__,
384                                 vd->regulator_name, mode, vd->regu_mode);
385                         return -EAGAIN;
386                 }
387         } else {
388                 DVFS_ERR("%s: invalid regulator\n", __func__);
389                 return -EINVAL;
390         }
391
392         vd->regu_mode = mode;
393
394         return 0;
395 }
396
397 static int dvfs_regu_mode_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
398 {
399         int ret;
400         int mode;
401
402
403         if (!clk_dvfs_node)
404                 return -EINVAL;
405
406         if (!clk_dvfs_node->regu_mode_en)
407                 return 0;
408
409         ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, rate, &mode);
410         if (ret) {
411                 DVFS_ERR("%s: clk(%s) rate %luhz get mode fail\n",
412                         __func__, clk_dvfs_node->name, rate);
413                 return ret;
414         }
415         clk_dvfs_node->regu_mode = mode;
416
417         mode = dvfs_vd_get_newmode_byclk(clk_dvfs_node);
418         if (mode < 0)
419                 return mode;
420
421         ret = dvfs_regu_set_mode(clk_dvfs_node->vd, mode);
422
423         return ret;
424 }
425
426 static void dvfs_volt_up_delay(struct vd_node *vd, int new_volt, int old_volt)
427 {
428         int u_time;
429         
430         if(new_volt <= old_volt)
431                 return;
432         if(vd->volt_time_flag > 0)      
433                 u_time = regulator_set_voltage_time(vd->regulator, old_volt, new_volt);
434         else
435                 u_time = -1;            
436         if(u_time < 0) {// regulator is not suported time,useing default time
437                 DVFS_DBG("%s:vd %s is not suported getting delay time,so we use default\n",
438                                 __func__, vd->name);
439                 u_time = ((new_volt) - (old_volt)) >> 9;
440         }
441         
442         DVFS_DBG("%s: vd %s volt %d to %d delay %d us\n", 
443                 __func__, vd->name, old_volt, new_volt, u_time);
444         
445         if (u_time >= 1000) {
446                 mdelay(u_time / 1000);
447                 udelay(u_time % 1000);
448                 DVFS_WARNING("%s: regulator set vol delay is larger 1ms,old is %d,new is %d\n",
449                         __func__, old_volt, new_volt);
450         } else if (u_time) {
451                 udelay(u_time);
452         }                       
453 }
454
455 static int dvfs_regulator_set_voltage_readback(struct regulator *regulator, int min_uV, int max_uV)
456 {
457         int ret = 0, read_back = 0;
458         
459         ret = dvfs_regulator_set_voltage(regulator, max_uV, max_uV);
460         if (ret < 0) {
461                 DVFS_ERR("%s: now read back to check voltage\n", __func__);
462
463                 /* read back to judge if it is already effect */
464                 mdelay(2);
465                 read_back = dvfs_regulator_get_voltage(regulator);
466                 if (read_back == max_uV) {
467                         DVFS_ERR("%s: set ERROR but already effected, volt=%d\n", __func__, read_back);
468                         ret = 0;
469                 } else {
470                         DVFS_ERR("%s: set ERROR AND NOT effected, volt=%d\n", __func__, read_back);
471                 }
472         }
473         
474         return ret;
475 }
476
477 static int dvfs_scale_volt_direct(struct vd_node *vd_clk, int volt_new)
478 {
479         int ret = 0;
480         
481         DVFS_DBG("%s: volt=%d(old=%d)\n", __func__, volt_new, vd_clk->cur_volt);
482         
483         if (IS_ERR_OR_NULL(vd_clk)) {
484                 DVFS_ERR("%s: vd_node error\n", __func__);
485                 return -EINVAL;
486         }
487
488         if (!IS_ERR_OR_NULL(vd_clk->regulator)) {
489                 ret = dvfs_regulator_set_voltage_readback(vd_clk->regulator, volt_new, volt_new);
490                 dvfs_volt_up_delay(vd_clk,volt_new, vd_clk->cur_volt);
491                 if (ret < 0) {
492                         vd_clk->volt_set_flag = DVFS_SET_VOLT_FAILURE;
493                         DVFS_ERR("%s: %s set voltage up err ret = %d, Vnew = %d(was %d)mV\n",
494                                         __func__, vd_clk->name, ret, volt_new, vd_clk->cur_volt);
495                         return -EAGAIN;
496                 }
497
498         } else {
499                 DVFS_ERR("%s: invalid regulator\n", __func__);
500                 return -EINVAL;
501         }
502
503         vd_clk->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
504         vd_clk->cur_volt = volt_new;
505
506         return 0;
507
508 }
509
510 static int dvfs_reset_volt(struct vd_node *dvfs_vd)
511 {
512         int flag_set_volt_correct = 0;
513         if (!IS_ERR_OR_NULL(dvfs_vd->regulator))
514                 flag_set_volt_correct = dvfs_regulator_get_voltage(dvfs_vd->regulator);
515         else {
516                 DVFS_ERR("%s: invalid regulator\n", __func__);
517                 return -EINVAL;
518         }
519         if (flag_set_volt_correct <= 0) {
520                 DVFS_ERR("%s (vd:%s), try to reload volt ,by it is error again(%d)!!! stop scaling\n",
521                                 __func__, dvfs_vd->name, flag_set_volt_correct);
522                 return -EAGAIN;
523         }
524         dvfs_vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
525         DVFS_WARNING("%s:vd(%s) try to reload volt = %d\n",
526                         __func__, dvfs_vd->name, flag_set_volt_correct);
527
528         /* Reset vd's voltage */
529         dvfs_vd->cur_volt = flag_set_volt_correct;
530
531         return dvfs_vd->cur_volt;
532 }
533
534
535 // for clk enable case to get vd regulator info
536 static void clk_enable_dvfs_regulator_check(struct vd_node *vd)
537 {
538         vd->cur_volt = dvfs_regulator_get_voltage(vd->regulator);
539         if(vd->cur_volt <= 0){
540                 vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
541         }
542         vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
543 }
544
545 static void dvfs_get_vd_regulator_volt_list(struct vd_node *vd)
546 {
547         unsigned int i, selector = dvfs_regulator_count_voltages(vd->regulator);
548         int n = 0, sel_volt = 0;
549         
550         if(selector > VD_VOL_LIST_CNT)
551                 selector = VD_VOL_LIST_CNT;
552
553         for (i = 0; i < selector; i++) {
554                 sel_volt = dvfs_regulator_list_voltage(vd->regulator, i);
555                 if(sel_volt <= 0){      
556                         //DVFS_WARNING("%s: vd(%s) list volt selector=%u, but volt(%d) <=0\n",
557                         //      __func__, vd->name, i, sel_volt);
558                         continue;
559                 }
560                 vd->volt_list[n++] = sel_volt;  
561                 DVFS_DBG("%s: vd(%s) list volt selector=%u, n=%d, volt=%d\n", 
562                         __func__, vd->name, i, n, sel_volt);
563         }
564         
565         vd->n_voltages = n;
566 }
567
568 // >= volt
569 static int vd_regulator_round_volt_max(struct vd_node *vd, int volt)
570 {
571         int sel_volt;
572         int i;
573         
574         for (i = 0; i < vd->n_voltages; i++) {
575                 sel_volt = vd->volt_list[i];
576                 if(sel_volt <= 0){      
577                         DVFS_WARNING("%s: selector=%u, but volt <=0\n", 
578                                 __func__, i);
579                         continue;
580                 }
581                 if(sel_volt >= volt)
582                         return sel_volt;        
583         }
584         return -EINVAL;
585 }
586
587 // >=volt
588 static int vd_regulator_round_volt_min(struct vd_node *vd, int volt)
589 {
590         int sel_volt;
591         int i;
592         
593         for (i = 0; i < vd->n_voltages; i++) {
594                 sel_volt = vd->volt_list[i];
595                 if(sel_volt <= 0){      
596                         DVFS_WARNING("%s: selector=%u, but volt <=0\n", 
597                                 __func__, i);
598                         continue;
599                 }
600                 if(sel_volt > volt){
601                         if(i > 0)
602                                 return vd->volt_list[i-1];
603                         else
604                                 return -EINVAL;
605                 }       
606         }
607         
608         return -EINVAL;
609 }
610
611 // >=volt
612 static int vd_regulator_round_volt(struct vd_node *vd, int volt, int flags)
613 {
614         if(!vd->n_voltages)
615                 return -EINVAL;
616         if(flags == VD_LIST_RELATION_L)
617                 return vd_regulator_round_volt_min(vd, volt);
618         else
619                 return vd_regulator_round_volt_max(vd, volt);   
620 }
621
622 static void dvfs_table_round_volt(struct dvfs_node *clk_dvfs_node)
623 {
624         int i, test_volt;
625
626         if(!clk_dvfs_node->dvfs_table || !clk_dvfs_node->vd || 
627                 IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator))
628                 return;
629
630         for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
631
632                 test_volt = vd_regulator_round_volt(clk_dvfs_node->vd, clk_dvfs_node->dvfs_table[i].index, VD_LIST_RELATION_H);
633                 if(test_volt <= 0)
634                 {       
635                         DVFS_WARNING("%s: clk(%s) round volt(%d) but list <=0\n",
636                                 __func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].index);
637                         break;
638                 }
639                 DVFS_DBG("clk %s:round_volt %d to %d\n",
640                         clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].index, test_volt);
641                 
642                 clk_dvfs_node->dvfs_table[i].index=test_volt;           
643         }
644 }
645
646 static void dvfs_vd_get_regulator_volt_time_info(struct vd_node *vd)
647 {
648         if(vd->volt_time_flag <= 0){// check regulator support get uping vol timer
649                 vd->volt_time_flag = dvfs_regulator_set_voltage_time(vd->regulator, vd->cur_volt, vd->cur_volt+200*1000);
650                 if(vd->volt_time_flag < 0){
651                         DVFS_DBG("%s,vd %s volt_time is no support\n",
652                                 __func__, vd->name);
653                 }
654                 else{
655                         DVFS_DBG("%s,vd %s volt_time is support,up 200mv need delay %d us\n",
656                                 __func__, vd->name, vd->volt_time_flag);
657                 }       
658         }
659 }
660 #if 0
661 static void dvfs_vd_get_regulator_mode_info(struct vd_node *vd)
662 {
663         //REGULATOR_MODE_FAST
664         if(vd->mode_flag <= 0){// check regulator support get uping vol timer{
665                 vd->mode_flag = dvfs_regulator_get_mode(vd->regulator);
666                 if(vd->mode_flag==REGULATOR_MODE_FAST || vd->mode_flag==REGULATOR_MODE_NORMAL
667                         || vd->mode_flag == REGULATOR_MODE_IDLE || vd->mode_flag==REGULATOR_MODE_STANDBY){
668                         
669                         if(dvfs_regulator_set_mode(vd->regulator, vd->mode_flag) < 0){
670                                 vd->mode_flag = 0;// check again
671                         }
672                 }
673                 if(vd->mode_flag > 0){
674                         DVFS_DBG("%s,vd %s mode(now is %d) support\n",
675                                 __func__, vd->name, vd->mode_flag);
676                 }
677                 else{
678                         DVFS_DBG("%s,vd %s mode is not support now check\n",
679                                 __func__, vd->name);
680                 }
681         }
682 }
683 #endif
684
685 struct regulator *dvfs_get_regulator(char *regulator_name) 
686 {
687         struct vd_node *vd;
688
689         mutex_lock(&rk_dvfs_mutex);
690         list_for_each_entry(vd, &rk_dvfs_tree, node) {
691                 if (strcmp(regulator_name, vd->regulator_name) == 0) {
692                         mutex_unlock(&rk_dvfs_mutex);
693                         return vd->regulator;
694                 }
695         }
696         mutex_unlock(&rk_dvfs_mutex);
697         return NULL;
698 }
699
700 static int dvfs_get_rate_range(struct dvfs_node *clk_dvfs_node)
701 {
702         struct cpufreq_frequency_table *table;
703         int i = 0;
704
705         if (!clk_dvfs_node)
706                 return -EINVAL;
707
708         clk_dvfs_node->min_rate = 0;
709         clk_dvfs_node->max_rate = 0;
710
711         table = clk_dvfs_node->dvfs_table;
712         for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
713                 clk_dvfs_node->max_rate = table[i].frequency / 1000 * 1000 * 1000;
714                 if (i == 0)
715                         clk_dvfs_node->min_rate = table[i].frequency / 1000 * 1000 * 1000;
716         }
717
718         DVFS_DBG("%s: clk %s, limit rate [min, max] = [%u, %u]\n",
719                         __func__, clk_dvfs_node->name, clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
720
721         return 0;
722 }
723
724 static void dvfs_table_round_clk_rate(struct dvfs_node  *clk_dvfs_node)
725 {
726         int i, rate, temp_rate, flags;
727         
728         if(!clk_dvfs_node || !clk_dvfs_node->dvfs_table || !clk_dvfs_node->clk)
729                 return;
730
731         for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
732                 //ddr rate = real rate+flags
733                 flags = clk_dvfs_node->dvfs_table[i].frequency%1000;
734                 rate = (clk_dvfs_node->dvfs_table[i].frequency/1000)*1000;
735                 temp_rate = __clk_round_rate(clk_dvfs_node->clk, rate*1000);
736                 if(temp_rate <= 0){     
737                         DVFS_WARNING("%s: clk(%s) rate %d round return %d\n",
738                                 __func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
739                         continue;
740                 }
741                 
742                 /* Set rate unit as MHZ */
743                 if (temp_rate % MHz != 0)
744                         temp_rate = (temp_rate / MHz + 1) * MHz;
745
746                 temp_rate = (temp_rate / 1000) + flags;
747                 
748                 DVFS_DBG("clk %s round_clk_rate %d to %d\n",
749                         clk_dvfs_node->name,clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
750                 
751                 clk_dvfs_node->dvfs_table[i].frequency = temp_rate;             
752         }
753 }
754
755 static int clk_dvfs_node_get_ref_volt(struct dvfs_node *clk_dvfs_node, int rate_khz,
756                 struct cpufreq_frequency_table *clk_fv)
757 {
758         int i = 0;
759         
760         if (rate_khz == 0 || !clk_dvfs_node || !clk_dvfs_node->dvfs_table) {
761                 /* since no need */
762                 return -EINVAL;
763         }
764         clk_fv->frequency = rate_khz;
765         clk_fv->index = 0;
766
767         for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
768                 if (clk_dvfs_node->dvfs_table[i].frequency >= rate_khz) {
769                         clk_fv->frequency = clk_dvfs_node->dvfs_table[i].frequency;
770                         clk_fv->index = clk_dvfs_node->dvfs_table[i].index;
771                          //printk("%s,%s rate=%ukhz(vol=%d)\n",__func__,clk_dvfs_node->name,
772                          //clk_fv->frequency, clk_fv->index);
773                         return 0;
774                 }
775         }
776         clk_fv->frequency = 0;
777         clk_fv->index = 0;
778         //DVFS_DBG("%s get corresponding voltage error! out of bound\n", clk_dvfs_node->name);
779         return -EINVAL;
780 }
781
782 static int dvfs_pd_get_newvolt_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
783 {
784         int volt_max = 0;
785
786         if (clk_dvfs_node->enable_count && (clk_dvfs_node->set_volt >= pd->cur_volt)) {
787                 return clk_dvfs_node->set_volt;
788         }
789
790         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
791                 if (clk_dvfs_node->enable_count)
792                         volt_max = max(volt_max, clk_dvfs_node->set_volt);
793         }
794         return volt_max;
795 }
796
797 static void dvfs_update_clk_pds_volt(struct dvfs_node *clk_dvfs_node)
798 {
799         struct pd_node *pd;
800         
801         if (!clk_dvfs_node)
802                 return;
803         
804         pd = clk_dvfs_node->pd;
805         if (!pd)
806                 return;
807         
808         pd->cur_volt = dvfs_pd_get_newvolt_byclk(pd, clk_dvfs_node);
809 }
810
811 static int dvfs_vd_get_newvolt_bypd(struct vd_node *vd)
812 {
813         int volt_max_vd = 0;
814         struct pd_node *pd;
815
816         if (!vd)
817                 return -EINVAL;
818         
819         list_for_each_entry(pd, &vd->pd_list, node) {
820                 volt_max_vd = max(volt_max_vd, pd->cur_volt);
821         }
822
823         return volt_max_vd;
824 }
825
826 static int dvfs_vd_get_newvolt_byclk(struct dvfs_node *clk_dvfs_node)
827 {
828         if (!clk_dvfs_node)
829                 return -EINVAL;
830
831         dvfs_update_clk_pds_volt(clk_dvfs_node);
832         return  dvfs_vd_get_newvolt_bypd(clk_dvfs_node->vd);
833 }
834
835 #if 0
836 static void dvfs_temp_limit_work_func(struct work_struct *work)
837 {
838         unsigned long delay = HZ / 10; // 100ms
839         struct vd_node *vd;
840         struct pd_node *pd;
841         struct dvfs_node *clk_dvfs_node;
842
843         queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
844
845         mutex_lock(&rk_dvfs_mutex);
846         list_for_each_entry(vd, &rk_dvfs_tree, node) {
847                 mutex_lock(&vd->mutex);
848                 list_for_each_entry(pd, &vd->pd_list, node) {
849                         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
850                                 if (clk_dvfs_node->temp_limit_table) {
851                                         clk_dvfs_node->temp = rockchip_tsadc_get_temp(clk_dvfs_node->temp_channel);
852                                         clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
853                                 }
854                         }
855                 }
856                 mutex_unlock(&vd->mutex);
857         }
858         mutex_unlock(&rk_dvfs_mutex);
859 }
860 #endif
861
862 static struct cpufreq_frequency_table rk3288v0_arm_pvtm_table[] = {
863         {.frequency = 216000,  .index = 4006},
864         {.frequency = 408000,  .index = 6518},
865         {.frequency = 600000,  .index = 8345},
866         {.frequency = 816000,  .index = 11026},
867         {.frequency = 1008000,  .index = 12906},
868         {.frequency = 1200000,  .index = 15532},
869         {.frequency = 1416000,  .index = 18076},
870         {.frequency = 1608000,  .index = 21282},
871         {.frequency = CPUFREQ_TABLE_END, .index = 1},
872 };
873
874 static struct pvtm_info rk3288v0_arm_pvtm_info = {
875         .compatible = "rockchip,rk3288",
876         .pvtm_table = rk3288v0_arm_pvtm_table,
877         .channel = ARM_DVFS_CH,
878         .process_version = RK3288_PROCESS_V0,
879         .scan_rate_hz = 216000000,
880         .sample_time_us = 1000,
881         .volt_step_uv = 12500,
882         .delta_pvtm_by_volt = 400,
883         .delta_pvtm_by_temp = 14,
884         .volt_margin_uv = 25000,
885         .min_volt_uv = 850000,
886         .max_volt_uv = 1400000,
887 };
888
889 static struct cpufreq_frequency_table rk3288v1_arm_pvtm_table[] = {
890         {.frequency = 216000,  .index = 4710},
891         {.frequency = 408000,  .index = 7200},
892         {.frequency = 600000,  .index = 9192},
893         {.frequency = 816000,  .index = 12560},
894         {.frequency = 1008000,  .index = 14741},
895         {.frequency = 1200000,  .index = 16886},
896         {.frequency = 1416000,  .index = 20081},
897         {.frequency = 1608000,  .index = 24061},
898         {.frequency = CPUFREQ_TABLE_END, .index = 1},
899 };
900
901 static struct pvtm_info rk3288v1_arm_pvtm_info = {
902         .compatible = "rockchip,rk3288",
903         .pvtm_table = rk3288v1_arm_pvtm_table,
904         .channel = ARM_DVFS_CH,
905         .process_version = RK3288_PROCESS_V1,
906         .scan_rate_hz = 216000000,
907         .sample_time_us = 1000,
908         .volt_step_uv = 12500,
909         .delta_pvtm_by_volt = 450,
910         .delta_pvtm_by_temp = 7,
911         .volt_margin_uv = 25000,
912         .min_volt_uv = 850000,
913         .max_volt_uv = 1400000,
914 };
915
916 static struct cpufreq_frequency_table rk3288v2_arm_pvtm_table[] = {
917         {.frequency = 216000,  .index = 5369},
918         {.frequency = 408000,  .index = 6984},
919         {.frequency = 600000,  .index = 8771},
920         {.frequency = 816000,  .index = 11434},
921         {.frequency = 1008000,  .index = 14178},
922         {.frequency = 1200000,  .index = 16797},
923         {.frequency = 1416000,  .index = 20178},
924         {.frequency = 1608000,  .index = 23303},
925         {.frequency = CPUFREQ_TABLE_END, .index = 1},
926 };
927
928 static struct pvtm_info rk3288v2_arm_pvtm_info = {
929         .compatible = "rockchip,rk3288",
930         .pvtm_table = rk3288v2_arm_pvtm_table,
931         .channel = ARM_DVFS_CH,
932         .process_version = RK3288_PROCESS_V2,
933         .scan_rate_hz = 216000000,
934         .sample_time_us = 1000,
935         .volt_step_uv = 12500,
936         .delta_pvtm_by_volt = 430,
937         .delta_pvtm_by_temp = 12,
938         .volt_margin_uv = 25000,
939         .min_volt_uv = 900000,
940         .max_volt_uv = 1400000,
941 };
942
943 static struct pvtm_info *pvtm_info_table[] = {
944         &rk3288v0_arm_pvtm_info,
945         &rk3288v1_arm_pvtm_info,
946         &rk3288v2_arm_pvtm_info
947 };
948
949 static int pvtm_set_single_dvfs(struct dvfs_node *dvfs_node, u32 idx,
950                                 struct pvtm_info *info, int *pvtm_list,
951                                 u32 min_pvtm)
952 {
953         struct cpufreq_frequency_table *dvfs_table = dvfs_node->dvfs_table;
954         struct cpufreq_frequency_table *pvtm_table = dvfs_node->pvtm_table;
955         int target_pvtm, pvtm_margin, volt_margin;
956         unsigned int n_voltages = dvfs_node->vd->n_voltages;
957         int *volt_list = dvfs_node->vd->volt_list;
958         int n, temp;
959
960         volt_margin = info->volt_margin_uv + pvtm_table[idx].index;
961         n = volt_margin/info->volt_step_uv;
962         if (volt_margin%info->volt_step_uv)
963                 n++;
964
965         pvtm_margin = n*info->delta_pvtm_by_volt;
966         temp = rockchip_tsadc_get_temp(1);
967         target_pvtm = min_pvtm+temp * info->delta_pvtm_by_temp + pvtm_margin;
968
969         DVFS_DBG("=====%s: temp:%d, freq:%d, target pvtm:%d=====\n",
970                  __func__, temp, dvfs_table[idx].frequency, target_pvtm);
971
972         for (n = 0; n < n_voltages; n++) {
973                 if (pvtm_list[n] >= target_pvtm) {
974                         dvfs_table[idx].index = volt_list[n];
975                         DVFS_DBG("freq[%d]=%d, volt=%d\n",
976                                  idx, dvfs_table[idx].frequency, volt_list[n]);
977
978                         return 0;
979                 }
980         }
981
982         return -EINVAL;
983
984         return 0;
985 }
986
987 static void pvtm_set_dvfs_table(struct dvfs_node *dvfs_node)
988 {
989         struct cpufreq_frequency_table *dvfs_table = dvfs_node->dvfs_table;
990         struct pvtm_info *info = dvfs_node->pvtm_info;
991         struct regulator *regulator = dvfs_node->vd->regulator;
992         int i, j;
993         int ret = 0;
994         int pvtm_list[VD_VOL_LIST_CNT] = {0};
995         unsigned int n_voltages = dvfs_node->vd->n_voltages;
996         int *volt_list = dvfs_node->vd->volt_list;
997
998         if (!info)
999                 return;
1000
1001         clk_set_rate(dvfs_node->clk, info->scan_rate_hz);
1002         DVFS_DBG("%s:%lu\n", __func__, clk_get_rate(dvfs_node->clk));
1003
1004         for (i = 0; i < n_voltages; i++) {
1005                 if ((volt_list[i] >= info->min_volt_uv) &&
1006                     (volt_list[i] <= info->max_volt_uv)) {
1007                         regulator_set_voltage(regulator, volt_list[i],
1008                                               volt_list[i]);
1009                         pvtm_list[i] = pvtm_get_value(info->channel,
1010                                                       info->sample_time_us);
1011                 }
1012         }
1013
1014         for (i = 0; dvfs_table[i].frequency != CPUFREQ_TABLE_END; i++) {
1015                 for (j = 0; info->pvtm_table[j].frequency !=
1016                      CPUFREQ_TABLE_END; j++)
1017                         if (info->pvtm_table[j].frequency >=
1018                             dvfs_table[i].frequency) {
1019                                 int min_pvtm = info->pvtm_table[j].index;
1020
1021                                 ret = pvtm_set_single_dvfs(dvfs_node,
1022                                                            i,
1023                                                            info,
1024                                                            pvtm_list,
1025                                                            min_pvtm);
1026                                 break;
1027                         }
1028
1029                 if (ret) {
1030                         DVFS_WARNING("freq: %d can not reach target pvtm\n",
1031                                      dvfs_table[i].frequency);
1032                         break;
1033                 }
1034
1035                 if (info->pvtm_table[j].frequency == CPUFREQ_TABLE_END) {
1036                         DVFS_WARNING("not support freq :%d, max freq is %d\n",
1037                                      dvfs_table[i].frequency,
1038                                      info->pvtm_table[j-1].frequency);
1039                         break;
1040                 }
1041         }
1042 }
1043
1044 static void dvfs_virt_temp_limit_work_func(void)
1045 {
1046         const struct cpufreq_frequency_table *limits_table = NULL;
1047         unsigned int new_temp_limit_rate = -1;
1048         unsigned int nr_cpus = num_online_cpus();
1049         static bool in_perf;
1050         int i;
1051
1052         if (!cpu_is_rk312x())
1053                 return;
1054
1055         if (rockchip_get_system_status() & SYS_STATUS_PERFORMANCE) {
1056                 in_perf = true;
1057         } else if (in_perf) {
1058                 in_perf = false;
1059         } else {
1060                 static u64 last_time_in_idle;
1061                 static u64 last_time_in_idle_timestamp;
1062                 u64 time_in_idle = 0, now;
1063                 u32 delta_idle;
1064                 u32 delta_time;
1065                 unsigned cpu, busy_cpus;
1066
1067                 for_each_online_cpu(cpu) {
1068                         time_in_idle += get_cpu_idle_time_us(cpu, &now);
1069                 }
1070                 delta_time = now - last_time_in_idle_timestamp;
1071                 delta_idle = time_in_idle - last_time_in_idle;
1072                 last_time_in_idle = time_in_idle;
1073                 last_time_in_idle_timestamp = now;
1074                 delta_idle += delta_time >> 4; /* +6.25% */
1075                 if (delta_idle > (nr_cpus - 1)
1076                     * delta_time && delta_idle < (nr_cpus + 1) * delta_time)
1077                         busy_cpus = 1;
1078                 else if (delta_idle > (nr_cpus - 2) * delta_time)
1079                         busy_cpus = 2;
1080                 else if (delta_idle > (nr_cpus - 3) * delta_time)
1081                         busy_cpus = 3;
1082                 else
1083                         busy_cpus = 4;
1084
1085                 limits_table = clk_cpu_dvfs_node->virt_temp_limit_table[busy_cpus-1];
1086                 DVFS_DBG("delta time %6u us idle %6u us %u cpus select table %d\n",
1087                          delta_time, delta_idle, nr_cpus, busy_cpus);
1088         }
1089
1090         if (limits_table) {
1091                 new_temp_limit_rate = limits_table[0].frequency;
1092                 for (i = 0; limits_table[i].frequency != CPUFREQ_TABLE_END; i++) {
1093                         if (target_temp >= limits_table[i].index)
1094                                 new_temp_limit_rate = limits_table[i].frequency;
1095                 }
1096         }
1097
1098         if (clk_cpu_dvfs_node->temp_limit_rate != new_temp_limit_rate) {
1099                 clk_cpu_dvfs_node->temp_limit_rate = new_temp_limit_rate;
1100                 dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
1101                 DVFS_DBG("temp_limit_rate:%d\n", (int)clk_cpu_dvfs_node->temp_limit_rate);
1102         }
1103 }
1104
1105 static void dvfs_temp_limit_work_func(struct work_struct *work)
1106 {
1107         int temp=0, delta_temp=0;
1108         unsigned long delay = HZ/10;
1109         unsigned long arm_rate_step=0;
1110         static int old_temp=0;
1111         int i;
1112
1113         queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
1114
1115         temp = rockchip_tsadc_get_temp(1);
1116
1117         if (temp == INVALID_TEMP)
1118                 return dvfs_virt_temp_limit_work_func();
1119
1120         //debounce
1121         delta_temp = (old_temp>temp) ? (old_temp-temp) : (temp-old_temp);
1122         if (delta_temp <= 1)
1123                 return;
1124
1125         if (ROCKCHIP_PM_POLICY_PERFORMANCE == rockchip_pm_get_policy()) {
1126                 if (!clk_cpu_dvfs_node->per_temp_limit_table) {
1127                         return;
1128                 }
1129
1130                 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
1131                 for (i=0; clk_cpu_dvfs_node->per_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
1132                         if (temp > clk_cpu_dvfs_node->per_temp_limit_table[i].index) {
1133                                 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->per_temp_limit_table[i].frequency;
1134                         }
1135                 }
1136                 dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
1137         } else if (ROCKCHIP_PM_POLICY_NORMAL == rockchip_pm_get_policy()){
1138                 if (!clk_cpu_dvfs_node->nor_temp_limit_table) {
1139                         return;
1140                 }
1141
1142                 if (temp > target_temp) {
1143                         if (temp > old_temp) {
1144                                 delta_temp = temp - target_temp;
1145                                 for (i=0; clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
1146                                         if (delta_temp > clk_cpu_dvfs_node->nor_temp_limit_table[i].index) {
1147                                                 arm_rate_step = clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency;
1148                                         }
1149                                 }
1150                                 if (arm_rate_step && (clk_cpu_dvfs_node->temp_limit_rate > arm_rate_step)) {
1151                                         clk_cpu_dvfs_node->temp_limit_rate -= arm_rate_step;
1152                                         dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
1153                                 }
1154                         }
1155                 } else {
1156                         if (clk_cpu_dvfs_node->temp_limit_rate < clk_cpu_dvfs_node->max_rate) {
1157                                 delta_temp = target_temp - temp;
1158                                 for (i=0; clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency != CPUFREQ_TABLE_END; i++) {
1159                                         if (delta_temp > clk_cpu_dvfs_node->nor_temp_limit_table[i].index) {
1160                                                 arm_rate_step = clk_cpu_dvfs_node->nor_temp_limit_table[i].frequency;
1161                                         }
1162                                 }
1163
1164                                 if (arm_rate_step) {
1165                                         clk_cpu_dvfs_node->temp_limit_rate += arm_rate_step;
1166                                         if (clk_cpu_dvfs_node->temp_limit_rate > clk_cpu_dvfs_node->max_rate) {
1167                                                 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
1168                                         }
1169                                         dvfs_clk_set_rate(clk_cpu_dvfs_node, clk_cpu_dvfs_node->last_set_rate);
1170                                 }
1171                         }
1172                 }
1173         }
1174
1175         DVFS_DBG("cur temp: %d, temp_limit_core_rate: %lu\n", temp, clk_cpu_dvfs_node->temp_limit_rate);
1176
1177         old_temp = temp;
1178 }
1179 static DECLARE_DELAYED_WORK(dvfs_temp_limit_work, dvfs_temp_limit_work_func);
1180
1181 int dvfs_clk_enable_limit(struct dvfs_node *clk_dvfs_node, unsigned int min_rate, unsigned int max_rate)
1182 {
1183         u32 rate = 0, ret = 0;
1184
1185         if (!clk_dvfs_node || (min_rate > max_rate))
1186                 return -EINVAL;
1187         
1188         if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
1189                 mutex_lock(&clk_dvfs_node->vd->mutex);
1190                 
1191                 /* To reset clk_dvfs_node->min_rate/max_rate */
1192                 dvfs_get_rate_range(clk_dvfs_node);
1193                 clk_dvfs_node->freq_limit_en = 1;
1194
1195                 if ((min_rate >= clk_dvfs_node->min_rate) && (min_rate <= clk_dvfs_node->max_rate)) {
1196                         clk_dvfs_node->min_rate = min_rate;
1197                 }
1198                 
1199                 if ((max_rate >= clk_dvfs_node->min_rate) && (max_rate <= clk_dvfs_node->max_rate)) {
1200                         clk_dvfs_node->max_rate = max_rate;
1201                 }
1202
1203                 if (clk_dvfs_node->last_set_rate == 0)
1204                         rate = __clk_get_rate(clk_dvfs_node->clk);
1205                 else
1206                         rate = clk_dvfs_node->last_set_rate;
1207                 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
1208
1209                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1210
1211         }
1212
1213         DVFS_DBG("%s:clk(%s) last_set_rate=%lu; [min_rate, max_rate]=[%u, %u]\n",
1214                  __func__, __clk_get_name(clk_dvfs_node->clk),
1215                  clk_dvfs_node->last_set_rate,
1216                  clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
1217
1218         return 0;
1219 }
1220 EXPORT_SYMBOL(dvfs_clk_enable_limit);
1221
1222 int dvfs_clk_disable_limit(struct dvfs_node *clk_dvfs_node)
1223 {
1224         u32 ret = 0;
1225
1226         if (!clk_dvfs_node)
1227                 return -EINVAL;
1228         
1229         if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
1230                 mutex_lock(&clk_dvfs_node->vd->mutex);
1231                 
1232                 /* To reset clk_dvfs_node->min_rate/max_rate */
1233                 dvfs_get_rate_range(clk_dvfs_node);
1234                 clk_dvfs_node->freq_limit_en = 0;
1235                 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
1236
1237                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1238         }
1239
1240         DVFS_DBG("%s: clk(%s) last_set_rate=%lu; [min_rate, max_rate]=[%u, %u]\n",
1241                  __func__, __clk_get_name(clk_dvfs_node->clk),
1242                  clk_dvfs_node->last_set_rate,
1243                  clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
1244
1245         return 0;
1246 }
1247 EXPORT_SYMBOL(dvfs_clk_disable_limit);
1248
1249 void dvfs_disable_temp_limit(void) {
1250         temp_limit_enable = 0;
1251         cancel_delayed_work_sync(&dvfs_temp_limit_work);
1252 }
1253
1254 int dvfs_clk_get_limit(struct dvfs_node *clk_dvfs_node, unsigned int *min_rate, unsigned int *max_rate) 
1255 {
1256         int freq_limit_en;
1257
1258         if (!clk_dvfs_node)
1259                 return -EINVAL;
1260
1261         mutex_lock(&clk_dvfs_node->vd->mutex);
1262
1263         *min_rate = clk_dvfs_node->min_rate;
1264         *max_rate = clk_dvfs_node->max_rate;
1265         freq_limit_en = clk_dvfs_node->freq_limit_en;
1266
1267         mutex_unlock(&clk_dvfs_node->vd->mutex);
1268
1269         return freq_limit_en;
1270 }
1271 EXPORT_SYMBOL(dvfs_clk_get_limit);
1272
1273 int dvfs_clk_register_set_rate_callback(struct dvfs_node *clk_dvfs_node, clk_set_rate_callback clk_dvfs_target)
1274 {
1275         if (!clk_dvfs_node)
1276                 return -EINVAL;
1277                         
1278         mutex_lock(&clk_dvfs_node->vd->mutex);
1279         clk_dvfs_node->clk_dvfs_target = clk_dvfs_target;
1280         mutex_unlock(&clk_dvfs_node->vd->mutex);
1281
1282         return 0;
1283 }
1284 EXPORT_SYMBOL(dvfs_clk_register_set_rate_callback);
1285
1286 struct cpufreq_frequency_table *dvfs_get_freq_volt_table(struct dvfs_node *clk_dvfs_node) 
1287 {
1288         struct cpufreq_frequency_table *table;
1289
1290         if (!clk_dvfs_node)
1291                 return NULL;
1292
1293         mutex_lock(&clk_dvfs_node->vd->mutex);
1294         table = clk_dvfs_node->dvfs_table;
1295         mutex_unlock(&clk_dvfs_node->vd->mutex);
1296         
1297         return table;
1298 }
1299 EXPORT_SYMBOL(dvfs_get_freq_volt_table);
1300
1301 int dvfs_set_freq_volt_table(struct dvfs_node *clk_dvfs_node, struct cpufreq_frequency_table *table)
1302 {
1303         if (!clk_dvfs_node)
1304                 return -EINVAL;
1305
1306         if (IS_ERR_OR_NULL(table)){
1307                 DVFS_ERR("%s:invalid table!\n", __func__);
1308                 return -EINVAL;
1309         }
1310         
1311         mutex_lock(&clk_dvfs_node->vd->mutex);
1312         clk_dvfs_node->dvfs_table = table;
1313         dvfs_get_rate_range(clk_dvfs_node);
1314         dvfs_table_round_clk_rate(clk_dvfs_node);
1315         dvfs_table_round_volt(clk_dvfs_node);
1316         mutex_unlock(&clk_dvfs_node->vd->mutex);
1317
1318         return 0;
1319 }
1320 EXPORT_SYMBOL(dvfs_set_freq_volt_table);
1321
1322 static int get_adjust_volt_by_leakage(struct dvfs_node *dvfs_node)
1323 {
1324         int leakage = 0;
1325         int delta_leakage = 0;
1326         int i = 0;
1327         int adjust_volt = 0;
1328
1329         if (!dvfs_node->vd)
1330                 return 0;
1331
1332         if (dvfs_node->lkg_info.def_table_lkg == -1)
1333                 return 0;
1334
1335         leakage = rockchip_get_leakage(dvfs_node->channel);
1336         if (!leakage || (leakage == 0xff))
1337                 return 0;
1338
1339         delta_leakage = leakage - dvfs_node->lkg_info.def_table_lkg;
1340         if (delta_leakage <= 0) {
1341                 for (i = 0; (dvfs_node->lkg_info.table[i].dlt_volt !=
1342                         CPUFREQ_TABLE_END); i++) {
1343                         if (leakage > dvfs_node->lkg_info.table[i].lkg) {
1344                                 adjust_volt =
1345                                         dvfs_node->lkg_info.table[i].dlt_volt;
1346                         } else {
1347                                 return adjust_volt;
1348                         }
1349                 }
1350         } else if (delta_leakage > 0) {
1351                 for (i = 0; (dvfs_node->lkg_info.table[i].dlt_volt !=
1352                         CPUFREQ_TABLE_END); i++) {
1353                         if (leakage <= dvfs_node->lkg_info.table[i].lkg) {
1354                                 adjust_volt =
1355                                         -dvfs_node->lkg_info.table[i].dlt_volt;
1356                                 return adjust_volt;
1357                         }
1358                 }
1359         }
1360         return adjust_volt;
1361 }
1362
1363 static void adjust_table_by_leakage(struct dvfs_node *dvfs_node)
1364 {
1365         int i, adjust_volt = get_adjust_volt_by_leakage(dvfs_node);
1366
1367         if (!adjust_volt)
1368                 return;
1369
1370         if (!dvfs_node->dvfs_table)
1371                 return;
1372
1373         if (dvfs_node->lkg_info.min_adjust_freq == -1)
1374                 return;
1375
1376         for (i = 0;
1377         (dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
1378                 if (dvfs_node->dvfs_table[i].frequency >=
1379                         dvfs_node->lkg_info.min_adjust_freq)
1380                         dvfs_node->dvfs_table[i].index += adjust_volt;
1381         }
1382 }
1383
1384 int clk_enable_dvfs(struct dvfs_node *clk_dvfs_node)
1385 {
1386         struct cpufreq_frequency_table clk_fv;
1387         int volt_new;
1388         unsigned int mode;
1389         int ret;
1390
1391         if (!clk_dvfs_node)
1392                 return -EINVAL;
1393         
1394         DVFS_DBG("%s: dvfs clk(%s) enable dvfs!\n", 
1395                 __func__, __clk_get_name(clk_dvfs_node->clk));
1396
1397         if (!clk_dvfs_node->vd) {
1398                 DVFS_ERR("%s: dvfs node(%s) has no vd node!\n", 
1399                         __func__, clk_dvfs_node->name);
1400                 return -EINVAL;
1401         }
1402         mutex_lock(&clk_dvfs_node->vd->mutex);
1403         if (clk_dvfs_node->enable_count == 0) {
1404                 if (IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator)) {
1405                         if (clk_dvfs_node->vd->regulator_name)
1406                                 clk_dvfs_node->vd->regulator = dvfs_regulator_get(NULL, clk_dvfs_node->vd->regulator_name);
1407                         if (!IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator)) {
1408                                 DVFS_DBG("%s: vd(%s) get regulator(%s) ok\n",
1409                                         __func__, clk_dvfs_node->vd->name, clk_dvfs_node->vd->regulator_name);
1410                                 clk_enable_dvfs_regulator_check(clk_dvfs_node->vd);
1411                                 dvfs_get_vd_regulator_volt_list(clk_dvfs_node->vd);
1412                                 dvfs_vd_get_regulator_volt_time_info(clk_dvfs_node->vd);
1413                         } else {
1414                                 clk_dvfs_node->enable_count = 0;
1415                                 DVFS_ERR("%s: vd(%s) can't get regulator(%s)!\n", 
1416                                         __func__, clk_dvfs_node->vd->name, clk_dvfs_node->vd->regulator_name);
1417                                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1418                                 return -ENXIO;
1419                         }
1420                 } else {
1421                         clk_enable_dvfs_regulator_check(clk_dvfs_node->vd);
1422                 }
1423                 
1424                 DVFS_DBG("%s: vd(%s) cur volt=%d\n",
1425                         __func__, clk_dvfs_node->name, clk_dvfs_node->vd->cur_volt);
1426
1427                 dvfs_table_round_clk_rate(clk_dvfs_node);
1428                 dvfs_get_rate_range(clk_dvfs_node);
1429                 clk_dvfs_node->freq_limit_en = 1;
1430                 if (clk_dvfs_node->lkg_adjust_volt_en)
1431                         adjust_table_by_leakage(clk_dvfs_node);
1432                 if (clk_dvfs_node->support_pvtm)
1433                         pvtm_set_dvfs_table(clk_dvfs_node);
1434                 dvfs_table_round_volt(clk_dvfs_node);
1435                 clk_dvfs_node->set_freq = clk_dvfs_node_get_rate_kz(clk_dvfs_node->clk);
1436                 clk_dvfs_node->last_set_rate = clk_dvfs_node->set_freq*1000;
1437                 
1438                 DVFS_DBG("%s: %s get freq %u!\n", 
1439                         __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1440
1441                 if (clk_dvfs_node_get_ref_volt(clk_dvfs_node, clk_dvfs_node->set_freq, &clk_fv)) {
1442                         if (clk_dvfs_node->dvfs_table[0].frequency == CPUFREQ_TABLE_END) {
1443                                 DVFS_ERR("%s: table empty\n", __func__);
1444                                 clk_dvfs_node->enable_count = 0;
1445                                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1446                                 return -EINVAL;
1447                         } else {
1448                                 DVFS_WARNING("%s: clk(%s) freq table all value are smaller than default(%d), use default, just enable dvfs\n", 
1449                                         __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1450                                 clk_dvfs_node->enable_count++;
1451                                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1452                                 return 0;
1453                         }
1454                 }
1455                 clk_dvfs_node->enable_count++;
1456                 clk_dvfs_node->set_volt = clk_fv.index;
1457                 volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1458                 DVFS_DBG("%s: %s, freq %u(ref vol %u)\n",
1459                         __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt);
1460 #if 0
1461                 if (clk_dvfs_node->dvfs_nb) {
1462                         // must unregister when clk disable
1463                         clk_notifier_register(clk, clk_dvfs_node->dvfs_nb);
1464                 }
1465 #endif
1466                 if(clk_dvfs_node->vd->cur_volt != volt_new) {
1467                         ret = dvfs_regulator_set_voltage_readback(clk_dvfs_node->vd->regulator, volt_new, volt_new);
1468                         dvfs_volt_up_delay(clk_dvfs_node->vd,volt_new, clk_dvfs_node->vd->cur_volt);
1469                         if (ret < 0) {
1470                                 clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
1471                                 clk_dvfs_node->enable_count = 0;
1472                                 DVFS_ERR("dvfs enable clk %s,set volt error \n", clk_dvfs_node->name);
1473                                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1474                                 return -EAGAIN;
1475                         }
1476                         clk_dvfs_node->vd->cur_volt = volt_new;
1477                         clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
1478                 }
1479
1480         } else {
1481                 DVFS_DBG("%s: dvfs already enable clk enable = %d!\n",
1482                         __func__, clk_dvfs_node->enable_count);
1483                 clk_dvfs_node->enable_count++;
1484         }
1485
1486         if (clk_dvfs_node->regu_mode_en) {
1487                 ret = dvfs_regu_mode_table_constrain(clk_dvfs_node);
1488                 if (ret) {
1489                         DVFS_ERR("%s: clk(%s) regu_mode_table is unvalid, set regu_mode_en=0!\n",
1490                                         __func__, clk_dvfs_node->name);
1491                         clk_dvfs_node->regu_mode_en = 0;
1492                         mutex_unlock(&clk_dvfs_node->vd->mutex);
1493                         return ret;
1494                 }
1495
1496                 ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, clk_dvfs_node->set_freq*1000, &mode);
1497                 if (ret < 0) {
1498                         DVFS_ERR("%s: clk(%s) rate %dKhz get regu_mode fail\n",
1499                                         __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1500                         mutex_unlock(&clk_dvfs_node->vd->mutex);
1501                         return ret;
1502                 } else
1503                         clk_dvfs_node->regu_mode = mode;
1504
1505                 dvfs_update_clk_pds_mode(clk_dvfs_node);
1506         }
1507
1508         mutex_unlock(&clk_dvfs_node->vd->mutex);
1509         
1510         return 0;
1511 }
1512 EXPORT_SYMBOL(clk_enable_dvfs);
1513
1514 int clk_disable_dvfs(struct dvfs_node *clk_dvfs_node)
1515 {
1516         int volt_new;
1517
1518         if (!clk_dvfs_node)
1519                 return -EINVAL;
1520
1521         DVFS_DBG("%s:dvfs clk(%s) disable dvfs!\n", 
1522                 __func__, __clk_get_name(clk_dvfs_node->clk));
1523
1524         mutex_lock(&clk_dvfs_node->vd->mutex);
1525         if (!clk_dvfs_node->enable_count) {
1526                 DVFS_WARNING("%s:clk(%s) is already closed!\n", 
1527                         __func__, __clk_get_name(clk_dvfs_node->clk));
1528                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1529                 return 0;
1530         } else {
1531                 clk_dvfs_node->enable_count--;
1532                 if (0 == clk_dvfs_node->enable_count) {
1533                         DVFS_DBG("%s:dvfs clk(%s) disable dvfs ok!\n",
1534                                 __func__, __clk_get_name(clk_dvfs_node->clk));
1535                         volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1536                         dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1537
1538 #if 0
1539                         clk_notifier_unregister(clk, clk_dvfs_node->dvfs_nb);
1540                         DVFS_DBG("clk unregister nb!\n");
1541 #endif
1542                 }
1543         }
1544         mutex_unlock(&clk_dvfs_node->vd->mutex);
1545         return 0;
1546 }
1547 EXPORT_SYMBOL(clk_disable_dvfs);
1548
1549 static unsigned long dvfs_get_limit_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1550 {
1551         unsigned long limit_rate;
1552
1553         limit_rate = rate;
1554         if (clk_dvfs_node->freq_limit_en) {
1555                 //dvfs table limit
1556                 if (rate < clk_dvfs_node->min_rate) {
1557                         limit_rate = clk_dvfs_node->min_rate;
1558                 } else if (rate > clk_dvfs_node->max_rate) {
1559                         limit_rate = clk_dvfs_node->max_rate;
1560                 }
1561                 if (temp_limit_enable) {
1562                         if (limit_rate > clk_dvfs_node->temp_limit_rate) {
1563                                 limit_rate = clk_dvfs_node->temp_limit_rate;
1564                         }
1565                 }
1566         }
1567
1568         DVFS_DBG("%s: rate:%ld, limit_rate:%ld,\n", __func__, rate, limit_rate);
1569
1570         return limit_rate;
1571 }
1572
1573 static int dvfs_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1574 {
1575         struct cpufreq_frequency_table clk_fv;
1576         unsigned long old_rate = 0, new_rate = 0, volt_new = 0, clk_volt_store = 0;
1577         struct clk *clk = clk_dvfs_node->clk;
1578         int ret;
1579
1580         if (!clk)
1581                 return -EINVAL;
1582
1583         if (!clk_dvfs_node->enable_count)
1584                 return 0;
1585         
1586         if (clk_dvfs_node->vd->volt_set_flag == DVFS_SET_VOLT_FAILURE) {
1587                 /* It means the last time set voltage error */
1588                 ret = dvfs_reset_volt(clk_dvfs_node->vd);
1589                 if (ret < 0) {
1590                         return -EAGAIN;
1591                 }
1592         }
1593
1594         rate = dvfs_get_limit_rate(clk_dvfs_node, rate);
1595         new_rate = __clk_round_rate(clk, rate);
1596         old_rate = __clk_get_rate(clk);
1597         if (new_rate == old_rate)
1598                 return 0;
1599
1600         DVFS_DBG("enter %s: clk(%s) new_rate = %lu Hz, old_rate =  %lu Hz\n", 
1601                 __func__, clk_dvfs_node->name, rate, old_rate); 
1602
1603         /* find the clk corresponding voltage */
1604         ret = clk_dvfs_node_get_ref_volt(clk_dvfs_node, new_rate / 1000, &clk_fv);
1605         if (ret) {
1606                 DVFS_ERR("%s:dvfs clk(%s) rate %luhz is not support\n",
1607                         __func__, clk_dvfs_node->name, new_rate);
1608                 return ret;
1609         }
1610         clk_volt_store = clk_dvfs_node->set_volt;
1611         clk_dvfs_node->set_volt = clk_fv.index;
1612         volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1613         DVFS_DBG("%s:%s new rate=%lu(was=%lu),new volt=%lu,(was=%d)\n",
1614                 __func__, clk_dvfs_node->name, new_rate, old_rate, volt_new,clk_dvfs_node->vd->cur_volt);
1615
1616
1617         /* if up the rate */
1618         if (new_rate > old_rate) {
1619                 ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
1620                 if (ret)
1621                         DVFS_ERR("%s: dvfs clk(%s) rate %luhz set mode err\n",
1622                                 __func__, clk_dvfs_node->name, new_rate);
1623
1624                 ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1625                 if (ret)
1626                         goto fail_roll_back;
1627         }
1628
1629         /* scale rate */
1630         if (clk_dvfs_node->clk_dvfs_target) {
1631                 ret = clk_dvfs_node->clk_dvfs_target(clk, rate);
1632         } else {
1633                 ret = clk_set_rate(clk, rate);
1634         }
1635
1636         if (ret) {
1637                 DVFS_ERR("%s:clk(%s) set rate err\n", 
1638                         __func__, __clk_get_name(clk));
1639                 goto fail_roll_back;
1640         }
1641         clk_dvfs_node->set_freq = new_rate / 1000;
1642
1643         DVFS_DBG("%s:dvfs clk(%s) set rate %lu ok\n", 
1644                 __func__, clk_dvfs_node->name, __clk_get_rate(clk));
1645
1646         /* if down the rate */
1647         if (new_rate < old_rate) {
1648                 ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1649                 if (ret)
1650                         goto out;
1651
1652                 ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
1653                 if (ret)
1654                         DVFS_ERR("%s:dvfs clk(%s) rate %luhz set mode err\n",
1655                         __func__, clk_dvfs_node->name, new_rate);
1656         }
1657
1658         return 0;
1659 fail_roll_back:
1660         clk_dvfs_node->set_volt = clk_volt_store;
1661 out:
1662         return ret;
1663 }
1664
1665 unsigned long dvfs_clk_round_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1666 {
1667         return __clk_round_rate(clk_dvfs_node->clk, rate);
1668 }
1669 EXPORT_SYMBOL_GPL(dvfs_clk_round_rate);
1670
1671 unsigned long dvfs_clk_get_rate(struct dvfs_node *clk_dvfs_node)
1672 {
1673         return __clk_get_rate(clk_dvfs_node->clk);
1674 }
1675 EXPORT_SYMBOL_GPL(dvfs_clk_get_rate);
1676
1677 unsigned long dvfs_clk_get_last_set_rate(struct dvfs_node *clk_dvfs_node)
1678 {
1679         unsigned long last_set_rate;
1680
1681         mutex_lock(&clk_dvfs_node->vd->mutex);
1682         last_set_rate = clk_dvfs_node->last_set_rate;
1683         mutex_unlock(&clk_dvfs_node->vd->mutex);
1684
1685         return last_set_rate;
1686 }
1687 EXPORT_SYMBOL_GPL(dvfs_clk_get_last_set_rate);
1688
1689
1690 int dvfs_clk_enable(struct dvfs_node *clk_dvfs_node)
1691 {
1692         return clk_enable(clk_dvfs_node->clk);
1693 }
1694 EXPORT_SYMBOL_GPL(dvfs_clk_enable);
1695
1696 void dvfs_clk_disable(struct dvfs_node *clk_dvfs_node)
1697 {
1698         return clk_disable(clk_dvfs_node->clk);
1699 }
1700 EXPORT_SYMBOL_GPL(dvfs_clk_disable);
1701
1702 struct dvfs_node *clk_get_dvfs_node(char *clk_name)
1703 {
1704         struct vd_node *vd;
1705         struct pd_node *pd;
1706         struct dvfs_node *clk_dvfs_node;
1707
1708         mutex_lock(&rk_dvfs_mutex);
1709         list_for_each_entry(vd, &rk_dvfs_tree, node) {
1710                 mutex_lock(&vd->mutex);
1711                 list_for_each_entry(pd, &vd->pd_list, node) {
1712                         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
1713                                 if (0 == strcmp(clk_dvfs_node->name, clk_name)) {
1714                                         mutex_unlock(&vd->mutex);
1715                                         mutex_unlock(&rk_dvfs_mutex);
1716                                         return clk_dvfs_node;
1717                                 }
1718                         }
1719                 }
1720                 mutex_unlock(&vd->mutex);
1721         }
1722         mutex_unlock(&rk_dvfs_mutex);
1723         
1724         return NULL;    
1725 }
1726 EXPORT_SYMBOL_GPL(clk_get_dvfs_node);
1727
1728 void clk_put_dvfs_node(struct dvfs_node *clk_dvfs_node)
1729 {
1730         return;
1731 }
1732 EXPORT_SYMBOL_GPL(clk_put_dvfs_node);
1733
1734 int dvfs_clk_prepare_enable(struct dvfs_node *clk_dvfs_node)
1735 {
1736         return clk_prepare_enable(clk_dvfs_node->clk);
1737 }
1738 EXPORT_SYMBOL_GPL(dvfs_clk_prepare_enable);
1739
1740
1741 void dvfs_clk_disable_unprepare(struct dvfs_node *clk_dvfs_node)
1742 {
1743         clk_disable_unprepare(clk_dvfs_node->clk);
1744 }
1745 EXPORT_SYMBOL_GPL(dvfs_clk_disable_unprepare);
1746
1747 int dvfs_clk_set_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1748 {
1749         int ret = -EINVAL;
1750         
1751         if (!clk_dvfs_node)
1752                 return -EINVAL;
1753         
1754         DVFS_DBG("%s:dvfs node(%s) set rate(%lu)\n", 
1755                 __func__, clk_dvfs_node->name, rate);
1756         
1757         #if 0 // judge by reference func in rk
1758         if (dvfs_support_clk_set_rate(dvfs_info)==false) {
1759                 DVFS_ERR("dvfs func:%s is not support!\n", __func__);
1760                 return ret;
1761         }
1762         #endif
1763
1764         if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target) {
1765                 mutex_lock(&clk_dvfs_node->vd->mutex);
1766                 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
1767                 clk_dvfs_node->last_set_rate = rate;
1768                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1769         } else {
1770                 DVFS_ERR("%s:dvfs node(%s) has no vd node or target callback!\n", 
1771                         __func__, clk_dvfs_node->name);
1772         }
1773                 
1774         return ret;     
1775 }
1776 EXPORT_SYMBOL_GPL(dvfs_clk_set_rate);
1777
1778
1779 int rk_regist_vd(struct vd_node *vd)
1780 {
1781         if (!vd)
1782                 return -EINVAL;
1783
1784         vd->mode_flag=0;
1785         vd->volt_time_flag=0;
1786         vd->n_voltages=0;
1787         INIT_LIST_HEAD(&vd->pd_list);
1788         mutex_lock(&rk_dvfs_mutex);
1789         list_add(&vd->node, &rk_dvfs_tree);
1790         mutex_unlock(&rk_dvfs_mutex);
1791
1792         return 0;
1793 }
1794 EXPORT_SYMBOL_GPL(rk_regist_vd);
1795
1796 int rk_regist_pd(struct pd_node *pd)
1797 {
1798         struct vd_node  *vd;
1799
1800         if (!pd)
1801                 return -EINVAL;
1802
1803         vd = pd->vd;
1804         if (!vd)
1805                 return -EINVAL;
1806
1807         INIT_LIST_HEAD(&pd->clk_list);
1808         mutex_lock(&vd->mutex);
1809         list_add(&pd->node, &vd->pd_list);
1810         mutex_unlock(&vd->mutex);
1811         
1812         return 0;
1813 }
1814 EXPORT_SYMBOL_GPL(rk_regist_pd);
1815
1816 int rk_regist_clk(struct dvfs_node *clk_dvfs_node)
1817 {
1818         struct vd_node  *vd;
1819         struct pd_node  *pd;
1820
1821         if (!clk_dvfs_node)
1822                 return -EINVAL;
1823
1824         vd = clk_dvfs_node->vd;
1825         pd = clk_dvfs_node->pd;
1826         if (!vd || !pd)
1827                 return -EINVAL;
1828
1829         mutex_lock(&vd->mutex);
1830         list_add(&clk_dvfs_node->node, &pd->clk_list);
1831         mutex_unlock(&vd->mutex);
1832         
1833         return 0;
1834 }
1835 EXPORT_SYMBOL_GPL(rk_regist_clk);
1836
1837 static struct cpufreq_frequency_table *of_get_temp_limit_table(struct device_node *dev_node, const char *propname)
1838 {
1839         struct cpufreq_frequency_table *temp_limt_table = NULL;
1840         const struct property *prop;
1841         const __be32 *val;
1842         int nr, i;
1843
1844         prop = of_find_property(dev_node, propname, NULL);
1845         if (!prop)
1846                 return NULL;
1847         if (!prop->value)
1848                 return NULL;
1849
1850         nr = prop->length / sizeof(u32);
1851         if (nr % 2) {
1852                 pr_err("%s: Invalid freq list\n", __func__);
1853                 return NULL;
1854         }
1855
1856         temp_limt_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
1857                              (nr/2 + 1), GFP_KERNEL);
1858
1859         val = prop->value;
1860
1861         for (i=0; i<nr/2; i++){
1862                 temp_limt_table[i].index = be32_to_cpup(val++);
1863                 temp_limt_table[i].frequency = be32_to_cpup(val++) * 1000;
1864         }
1865
1866         temp_limt_table[i].index = 0;
1867         temp_limt_table[i].frequency = CPUFREQ_TABLE_END;
1868
1869         return temp_limt_table;
1870
1871 }
1872
1873 static int of_get_dvfs_table(struct device_node *dev_node,
1874                              struct cpufreq_frequency_table **dvfs_table)
1875 {
1876         struct cpufreq_frequency_table *tmp_dvfs_table = NULL;
1877         const struct property *prop;
1878         const __be32 *val;
1879         int nr, i;
1880
1881         prop = of_find_property(dev_node, "operating-points", NULL);
1882         if (!prop)
1883                 return -EINVAL;
1884         if (!prop->value)
1885                 return -EINVAL;
1886
1887         nr = prop->length / sizeof(u32);
1888         if (nr % 2) {
1889                 pr_err("%s: Invalid freq list\n", __func__);
1890                 return -EINVAL;
1891         }
1892
1893         tmp_dvfs_table = kzalloc(sizeof(*tmp_dvfs_table) *
1894                              (nr/2 + 1), GFP_KERNEL);
1895         val = prop->value;
1896
1897         for (i = 0; i < nr/2; i++) {
1898                 tmp_dvfs_table[i].frequency = be32_to_cpup(val++);
1899                 tmp_dvfs_table[i].index = be32_to_cpup(val++);
1900         }
1901
1902         tmp_dvfs_table[i].index = 0;
1903         tmp_dvfs_table[i].frequency = CPUFREQ_TABLE_END;
1904
1905         *dvfs_table = tmp_dvfs_table;
1906
1907         return 0;
1908 }
1909
1910
1911 static int of_get_dvfs_pvtm_table(struct device_node *dev_node,
1912                                   struct cpufreq_frequency_table **dvfs_table,
1913                                   struct cpufreq_frequency_table **pvtm_table)
1914 {
1915         struct cpufreq_frequency_table *tmp_dvfs_table = NULL;
1916         struct cpufreq_frequency_table *tmp_pvtm_table = NULL;
1917         const struct property *prop;
1918         const __be32 *val;
1919         int nr, i;
1920
1921         prop = of_find_property(dev_node, "pvtm-operating-points", NULL);
1922         if (!prop)
1923                 return -EINVAL;
1924         if (!prop->value)
1925                 return -EINVAL;
1926
1927         nr = prop->length / sizeof(u32);
1928         if (nr % 3) {
1929                 pr_err("%s: Invalid freq list\n", __func__);
1930                 return -EINVAL;
1931         }
1932
1933         tmp_dvfs_table = kzalloc(sizeof(*tmp_dvfs_table) *
1934                              (nr/3 + 1), GFP_KERNEL);
1935
1936         tmp_pvtm_table = kzalloc(sizeof(*tmp_pvtm_table) *
1937                              (nr/3 + 1), GFP_KERNEL);
1938
1939         val = prop->value;
1940
1941         for (i = 0; i < nr/3; i++) {
1942                 tmp_dvfs_table[i].frequency = be32_to_cpup(val++);
1943                 tmp_dvfs_table[i].index = be32_to_cpup(val++);
1944
1945                 tmp_pvtm_table[i].frequency = tmp_dvfs_table[i].frequency;
1946                 tmp_pvtm_table[i].index = be32_to_cpup(val++);
1947         }
1948
1949         tmp_dvfs_table[i].index = 0;
1950         tmp_dvfs_table[i].frequency = CPUFREQ_TABLE_END;
1951
1952         tmp_pvtm_table[i].index = 0;
1953         tmp_pvtm_table[i].frequency = CPUFREQ_TABLE_END;
1954
1955         *dvfs_table = tmp_dvfs_table;
1956         *pvtm_table = tmp_pvtm_table;
1957
1958         return 0;
1959 }
1960
1961 static struct lkg_adjust_volt_table
1962         *of_get_lkg_adjust_volt_table(struct device_node *np,
1963         const char *propname)
1964 {
1965         struct lkg_adjust_volt_table *lkg_adjust_volt_table = NULL;
1966         const struct property *prop;
1967         const __be32 *val;
1968         int nr, i;
1969
1970         prop = of_find_property(np, propname, NULL);
1971         if (!prop)
1972                 return NULL;
1973         if (!prop->value)
1974                 return NULL;
1975
1976         nr = prop->length / sizeof(s32);
1977         if (nr % 2) {
1978                 pr_err("%s: Invalid freq list\n", __func__);
1979                 return NULL;
1980         }
1981
1982         lkg_adjust_volt_table =
1983                 kzalloc(sizeof(struct lkg_adjust_volt_table) *
1984                 (nr/2 + 1), GFP_KERNEL);
1985
1986         val = prop->value;
1987
1988         for (i = 0; i < nr/2; i++) {
1989                 lkg_adjust_volt_table[i].lkg = be32_to_cpup(val++);
1990                 lkg_adjust_volt_table[i].dlt_volt = be32_to_cpup(val++);
1991         }
1992
1993         lkg_adjust_volt_table[i].lkg = 0;
1994         lkg_adjust_volt_table[i].dlt_volt = CPUFREQ_TABLE_END;
1995
1996         return lkg_adjust_volt_table;
1997 }
1998
1999 static int dvfs_node_parse_dt(struct device_node *np,
2000                               struct dvfs_node *dvfs_node)
2001 {
2002         int process_version = rockchip_process_version();
2003         int i = 0;
2004         int ret;
2005
2006         of_property_read_u32_index(np, "channel", 0, &dvfs_node->channel);
2007
2008         pr_info("channel:%d, lkg:%d\n",
2009                 dvfs_node->channel, rockchip_get_leakage(dvfs_node->channel));
2010
2011         of_property_read_u32_index(np, "regu-mode-en", 0,
2012                                    &dvfs_node->regu_mode_en);
2013         if (dvfs_node->regu_mode_en)
2014                 dvfs_node->regu_mode_table = of_get_regu_mode_table(np);
2015         else
2016                 dvfs_node->regu_mode_table = NULL;
2017
2018         of_property_read_u32_index(np, "temp-limit-enable", 0,
2019                                    &temp_limit_enable);
2020         if (temp_limit_enable) {
2021                 of_property_read_u32_index(np, "target-temp", 0, &target_temp);
2022                 pr_info("target-temp:%d\n", target_temp);
2023                 dvfs_node->nor_temp_limit_table =
2024                         of_get_temp_limit_table(np,
2025                                                 "normal-temp-limit");
2026                 dvfs_node->per_temp_limit_table =
2027                         of_get_temp_limit_table(np,
2028                                                 "performance-temp-limit");
2029                 dvfs_node->virt_temp_limit_table[0] =
2030                         of_get_temp_limit_table(np,
2031                                                 "virt-temp-limit-1-cpu-busy");
2032                 dvfs_node->virt_temp_limit_table[1] =
2033                         of_get_temp_limit_table(np,
2034                                                 "virt-temp-limit-2-cpu-busy");
2035                 dvfs_node->virt_temp_limit_table[2] =
2036                         of_get_temp_limit_table(np,
2037                                                 "virt-temp-limit-3-cpu-busy");
2038                 dvfs_node->virt_temp_limit_table[3] =
2039                         of_get_temp_limit_table(np,
2040                                                 "virt-temp-limit-4-cpu-busy");
2041         }
2042         dvfs_node->temp_limit_rate = -1;
2043
2044         ret = of_property_read_u32_index(np, "support-pvtm", 0,
2045                                          &dvfs_node->support_pvtm);
2046         if (!ret) {
2047                 if (of_get_dvfs_pvtm_table(np, &dvfs_node->dvfs_table,
2048                                            &dvfs_node->pvtm_table))
2049                         return -EINVAL;
2050
2051                 for (i = 0; i < ARRAY_SIZE(pvtm_info_table); i++) {
2052                         struct pvtm_info *pvtm_info = pvtm_info_table[i];
2053
2054                         if ((pvtm_info->channel == dvfs_node->channel) &&
2055                             (pvtm_info->process_version == process_version) &&
2056                              of_machine_is_compatible(pvtm_info->compatible)) {
2057                                 dvfs_node->pvtm_info = pvtm_info;
2058                                 break;
2059                         }
2060                 }
2061
2062                 if (!dvfs_node->pvtm_info)
2063                         dvfs_node->support_pvtm = 0;
2064         } else {
2065                 if (of_get_dvfs_table(np, &dvfs_node->dvfs_table))
2066                         return -EINVAL;
2067         }
2068
2069         of_property_read_u32_index(np, "lkg_adjust_volt_en", 0,
2070                                    &dvfs_node->lkg_adjust_volt_en);
2071         if (dvfs_node->lkg_adjust_volt_en) {
2072                 dvfs_node->lkg_info.def_table_lkg = -1;
2073                 of_property_read_u32_index(np, "def_table_lkg", 0,
2074                                            &dvfs_node->lkg_info.def_table_lkg);
2075
2076                 dvfs_node->lkg_info.min_adjust_freq = -1;
2077                 of_property_read_u32_index(np, "min_adjust_freq", 0,
2078                                            &dvfs_node->lkg_info.min_adjust_freq
2079                                            );
2080
2081                 dvfs_node->lkg_info.table =
2082                         of_get_lkg_adjust_volt_table(np,
2083                                                      "lkg_adjust_volt_table");
2084         }
2085
2086         return 0;
2087 }
2088
2089 int of_dvfs_init(void)
2090 {
2091         struct vd_node *vd;
2092         struct pd_node *pd;
2093         struct device_node *dvfs_dev_node, *clk_dev_node, *vd_dev_node, *pd_dev_node;
2094         struct dvfs_node *dvfs_node;
2095         struct clk *clk;
2096         int ret;
2097
2098         DVFS_DBG("%s\n", __func__);
2099         pr_info("process version: %d\n", rockchip_process_version());
2100
2101         dvfs_dev_node = of_find_node_by_name(NULL, "dvfs");
2102         if (IS_ERR_OR_NULL(dvfs_dev_node)) {
2103                 DVFS_ERR("%s get dvfs dev node err\n", __func__);
2104                 return PTR_ERR(dvfs_dev_node);
2105         }
2106
2107         for_each_available_child_of_node(dvfs_dev_node, vd_dev_node) {
2108                 vd = kzalloc(sizeof(struct vd_node), GFP_KERNEL);
2109                 if (!vd)
2110                         return -ENOMEM;
2111
2112                 mutex_init(&vd->mutex);
2113                 vd->name = vd_dev_node->name;
2114                 ret = of_property_read_string(vd_dev_node, "regulator_name", &vd->regulator_name);
2115                 if (ret) {
2116                         DVFS_ERR("%s:vd(%s) get regulator_name err, ret:%d\n", 
2117                                 __func__, vd_dev_node->name, ret);
2118                         kfree(vd);
2119                         continue;
2120                 }
2121                 
2122                 vd->suspend_volt = 0;
2123                 
2124                 vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
2125                 vd->vd_dvfs_target = dvfs_target;
2126                 ret = rk_regist_vd(vd);
2127                 if (ret){
2128                         DVFS_ERR("%s:vd(%s) register err:%d\n", __func__, vd->name, ret);
2129                         kfree(vd);
2130                         continue;
2131                 }
2132
2133                 DVFS_DBG("%s:vd(%s) register ok, regulator name:%s,suspend volt:%d\n", 
2134                         __func__, vd->name, vd->regulator_name, vd->suspend_volt);
2135                 
2136                 for_each_available_child_of_node(vd_dev_node, pd_dev_node) {            
2137                         pd = kzalloc(sizeof(struct pd_node), GFP_KERNEL);
2138                         if (!pd)
2139                                 return -ENOMEM;
2140
2141                         pd->vd = vd;
2142                         pd->name = pd_dev_node->name;
2143                         
2144                         ret = rk_regist_pd(pd);
2145                         if (ret){
2146                                 DVFS_ERR("%s:pd(%s) register err:%d\n", __func__, pd->name, ret);
2147                                 kfree(pd);
2148                                 continue;
2149                         }
2150                         DVFS_DBG("%s:pd(%s) register ok, parent vd:%s\n", 
2151                                 __func__, pd->name, vd->name);                  
2152                         for_each_available_child_of_node(pd_dev_node, clk_dev_node) {
2153                                 if (!of_device_is_available(clk_dev_node))
2154                                         continue;
2155                                 
2156                                 dvfs_node = kzalloc(sizeof(struct dvfs_node), GFP_KERNEL);
2157                                 if (!dvfs_node)
2158                                         return -ENOMEM;
2159                                 
2160                                 dvfs_node->name = clk_dev_node->name;
2161                                 dvfs_node->pd = pd;
2162                                 dvfs_node->vd = vd;
2163
2164                                 if (dvfs_node_parse_dt(clk_dev_node, dvfs_node))
2165                                         continue;
2166                                 
2167                                 clk = clk_get(NULL, clk_dev_node->name);
2168                                 if (IS_ERR(clk)){
2169                                         DVFS_ERR("%s:get clk(%s) err:%ld\n", __func__, dvfs_node->name, PTR_ERR(clk));
2170                                         kfree(dvfs_node);
2171                                         continue;
2172                                         
2173                                 }
2174                                 
2175                                 dvfs_node->clk = clk;
2176                                 ret = rk_regist_clk(dvfs_node);
2177                                 if (ret){
2178                                         DVFS_ERR("%s:dvfs_node(%s) register err:%d\n", __func__, dvfs_node->name, ret);
2179                                         return ret;
2180                                 }
2181
2182                                 DVFS_DBG("%s:dvfs_node(%s) register ok, parent pd:%s\n", 
2183                                         __func__, clk_dev_node->name, pd->name);        
2184
2185                         }
2186                 }       
2187         }
2188         return 0;
2189 }
2190
2191 #ifdef CONFIG_ARM64
2192 arch_initcall_sync(of_dvfs_init);
2193 #endif
2194
2195 /*********************************************************************************/
2196 /**
2197  * dump_dbg_map() : Draw all informations of dvfs while debug
2198  */
2199 static int dump_dbg_map(char *buf)
2200 {
2201         int i;
2202         struct vd_node  *vd;
2203         struct pd_node  *pd;
2204         struct dvfs_node        *clk_dvfs_node;
2205         char *s = buf;
2206         
2207         mutex_lock(&rk_dvfs_mutex);
2208         printk( "-------------DVFS TREE-----------\n\n\n");
2209         printk( "DVFS TREE:\n");
2210
2211         list_for_each_entry(vd, &rk_dvfs_tree, node) {
2212                 mutex_lock(&vd->mutex);
2213                 printk( "|\n|- voltage domain:%s\n", vd->name);
2214                 printk( "|- current voltage:%d\n", vd->cur_volt);
2215                 printk( "|- current regu_mode:%s\n", dvfs_regu_mode_to_string(vd->regu_mode));
2216
2217                 list_for_each_entry(pd, &vd->pd_list, node) {
2218                         printk( "|  |\n|  |- power domain:%s, status = %s, current volt = %d, current regu_mode = %s\n",
2219                                         pd->name, (pd->pd_status == 1) ? "ON" : "OFF", pd->cur_volt,
2220                                         dvfs_regu_mode_to_string(pd->regu_mode));
2221
2222                         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
2223                                 printk( "|  |  |\n|  |  |- clock: %s current: rate %d, volt = %d,"
2224                                                 " enable_dvfs = %s\n",
2225                                                 clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt,
2226                                                 clk_dvfs_node->enable_count == 0 ? "DISABLE" : "ENABLE");
2227                                 printk( "|  |  |- clk limit(%s):[%u, %u]; last set rate = %lu\n",
2228                                                 clk_dvfs_node->freq_limit_en ? "enable" : "disable",
2229                                                 clk_dvfs_node->min_rate, clk_dvfs_node->max_rate,
2230                                                 clk_dvfs_node->last_set_rate/1000);
2231                                 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
2232                                         printk( "|  |  |  |- freq = %d, volt = %d\n",
2233                                                         clk_dvfs_node->dvfs_table[i].frequency,
2234                                                         clk_dvfs_node->dvfs_table[i].index);
2235
2236                                 }
2237                                 printk( "|  |  |- clock: %s current: rate %d, regu_mode = %s,"
2238                                                 " regu_mode_en = %d\n",
2239                                                 clk_dvfs_node->name, clk_dvfs_node->set_freq,
2240                                                 dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode),
2241                                                 clk_dvfs_node->regu_mode_en);
2242                                 if (clk_dvfs_node->regu_mode_table) {
2243                                         for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
2244                                                 printk( "|  |  |  |- freq = %d, regu_mode = %s\n",
2245                                                                 clk_dvfs_node->regu_mode_table[i].frequency/1000,
2246                                                                 dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode_table[i].index));
2247                                         }
2248                                 }
2249                         }
2250                 }
2251                 mutex_unlock(&vd->mutex);
2252         }
2253         
2254         printk( "-------------DVFS TREE END------------\n");
2255         mutex_unlock(&rk_dvfs_mutex);
2256         
2257         return s - buf;
2258 }
2259
2260 /*********************************************************************************/
2261 static struct kobject *dvfs_kobj;
2262 struct dvfs_attribute {
2263         struct attribute        attr;
2264         ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
2265                         char *buf);
2266         ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
2267                         const char *buf, size_t n);
2268 };
2269
2270 static ssize_t dvfs_tree_store(struct kobject *kobj, struct kobj_attribute *attr,
2271                const char *buf, size_t n)
2272 {
2273        return n;
2274 }
2275 static ssize_t dvfs_tree_show(struct kobject *kobj, struct kobj_attribute *attr,
2276                char *buf)
2277 {
2278        return dump_dbg_map(buf);
2279 }
2280
2281
2282 static struct dvfs_attribute dvfs_attrs[] = {
2283         /*     node_name        permision               show_func       store_func */
2284 //#ifdef CONFIG_RK_CLOCK_PROC
2285         __ATTR(dvfs_tree,       S_IRUSR | S_IRGRP | S_IWUSR,    dvfs_tree_show, dvfs_tree_store),
2286 //#endif
2287 };
2288
2289 static int __init dvfs_init(void)
2290 {
2291         int i, ret = 0;
2292
2293         dvfs_kobj = kobject_create_and_add("dvfs", NULL);
2294         if (!dvfs_kobj)
2295                 return -ENOMEM;
2296         for (i = 0; i < ARRAY_SIZE(dvfs_attrs); i++) {
2297                 ret = sysfs_create_file(dvfs_kobj, &dvfs_attrs[i].attr);
2298                 if (ret != 0) {
2299                         DVFS_ERR("create index %d error\n", i);
2300                         return ret;
2301                 }
2302         }
2303
2304         if (temp_limit_enable) {
2305                 clk_cpu_dvfs_node = clk_get_dvfs_node("clk_core");
2306                 if (!clk_cpu_dvfs_node){
2307                         return -EINVAL;
2308                 }
2309
2310                 clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
2311                 dvfs_wq = alloc_workqueue("dvfs", WQ_NON_REENTRANT | WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
2312                 queue_delayed_work_on(0, dvfs_wq, &dvfs_temp_limit_work, 0*HZ);
2313         }
2314
2315         vdd_gpu_regulator = dvfs_get_regulator("vdd_gpu");
2316         if (!IS_ERR_OR_NULL(vdd_gpu_regulator)) {
2317                 struct clk *clk = clk_get(NULL, "pd_gpu");
2318
2319                 if (clk)
2320                         rk_clk_pd_notifier_register(clk, &clk_pd_gpu_notifier);
2321
2322                 fb_register_client(&early_suspend_notifier);
2323                 register_reboot_notifier(&vdd_gpu_reboot_notifier);
2324         }
2325
2326         return ret;
2327 }
2328
2329 late_initcall(dvfs_init);