dvfs: add gpu temperate control
[firefly-linux-kernel-4.4.55.git] / arch / arm / mach-rockchip / dvfs.c
1 /* arch/arm/mach-rk30/rk30_dvfs.c
2  *
3  * Copyright (C) 2012 ROCKCHIP, Inc.
4  *
5  * This software is licensed under the terms of the GNU General Public
6  * License version 2, as published by the Free Software Foundation, and
7  * may be copied, distributed, and modified under those terms.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  */
15 #include <linux/slab.h>
16 #include <linux/clk.h>
17 #include <linux/regulator/consumer.h>
18 #include <linux/delay.h>
19 #include <linux/stat.h>
20 #include <linux/of.h>
21 #include <linux/opp.h>
22 #include <linux/rockchip/dvfs.h>
23 #include <linux/rockchip/common.h>
24 #include <linux/fb.h>
25 #include <linux/reboot.h>
26 #include <linux/rockchip/cpu.h>
27 #include <linux/tick.h>
28 #include <dt-bindings/clock/rk_system_status.h>
29 #include "../../../drivers/clk/rockchip/clk-pd.h"
30 #include "efuse.h"
31
32 #define MHz     (1000 * 1000)
33 static LIST_HEAD(rk_dvfs_tree);
34 static DEFINE_MUTEX(rk_dvfs_mutex);
35 static struct workqueue_struct *dvfs_wq;
36 static struct dvfs_node *clk_cpu_dvfs_node;
37 static struct dvfs_node *clk_gpu_dvfs_node;
38 static int pd_gpu_off, early_suspend;
39 static DEFINE_MUTEX(switch_vdd_gpu_mutex);
40 struct regulator *vdd_gpu_regulator;
41
42 static int vdd_gpu_reboot_notifier_event(struct notifier_block *this,
43         unsigned long event, void *ptr)
44 {
45         int ret;
46
47         DVFS_DBG("%s: enable vdd_gpu\n", __func__);
48         mutex_lock(&switch_vdd_gpu_mutex);
49         if (!regulator_is_enabled(vdd_gpu_regulator))
50                 ret = regulator_enable(vdd_gpu_regulator);
51         mutex_unlock(&switch_vdd_gpu_mutex);
52
53         return NOTIFY_OK;
54 }
55
56 static struct notifier_block vdd_gpu_reboot_notifier = {
57         .notifier_call = vdd_gpu_reboot_notifier_event,
58 };
59
60 static int clk_pd_gpu_notifier_call(struct notifier_block *nb,
61         unsigned long event, void *ptr)
62 {
63         int ret;
64
65         switch (event) {
66         case RK_CLK_PD_PREPARE:
67                 mutex_lock(&switch_vdd_gpu_mutex);
68                 pd_gpu_off = 0;
69                 if (early_suspend) {
70                         if (!regulator_is_enabled(vdd_gpu_regulator))
71                                 ret = regulator_enable(vdd_gpu_regulator);
72                 }
73                 mutex_unlock(&switch_vdd_gpu_mutex);
74                 break;
75         case RK_CLK_PD_UNPREPARE:
76                 mutex_lock(&switch_vdd_gpu_mutex);
77                 pd_gpu_off = 1;
78                 if (early_suspend) {
79                         if (regulator_is_enabled(vdd_gpu_regulator))
80                                 ret = regulator_disable(vdd_gpu_regulator);
81                 }
82                 mutex_unlock(&switch_vdd_gpu_mutex);
83                 break;
84         default:
85                 break;
86         }
87
88         return NOTIFY_OK;
89 }
90
91 static struct notifier_block clk_pd_gpu_notifier = {
92         .notifier_call = clk_pd_gpu_notifier_call,
93 };
94
95
96 static int early_suspend_notifier_call(struct notifier_block *self,
97                                 unsigned long action, void *data)
98 {
99         struct fb_event *event = data;
100         int blank_mode = *((int *)event->data);
101         int ret;
102
103         mutex_lock(&switch_vdd_gpu_mutex);
104         if (action == FB_EARLY_EVENT_BLANK) {
105                 switch (blank_mode) {
106                 case FB_BLANK_UNBLANK:
107                         early_suspend = 0;
108                         if (pd_gpu_off) {
109                                 if (!regulator_is_enabled(vdd_gpu_regulator))
110                                         ret = regulator_enable(
111                                         vdd_gpu_regulator);
112                         }
113                         break;
114                 default:
115                         break;
116                 }
117         } else if (action == FB_EVENT_BLANK) {
118                 switch (blank_mode) {
119                 case FB_BLANK_POWERDOWN:
120                         early_suspend = 1;
121                         if (pd_gpu_off) {
122                                 if (regulator_is_enabled(vdd_gpu_regulator))
123                                         ret = regulator_disable(
124                                         vdd_gpu_regulator);
125                         }
126
127                         break;
128                 default:
129                         break;
130                 }
131         }
132         mutex_unlock(&switch_vdd_gpu_mutex);
133
134         return NOTIFY_OK;
135 }
136
137 static struct notifier_block early_suspend_notifier = {
138                 .notifier_call = early_suspend_notifier_call,
139 };
140
141 #define DVFS_REGULATOR_MODE_STANDBY     1
142 #define DVFS_REGULATOR_MODE_IDLE        2
143 #define DVFS_REGULATOR_MODE_NORMAL      3
144 #define DVFS_REGULATOR_MODE_FAST        4
145
146 static const char* dvfs_regu_mode_to_string(unsigned int mode)
147 {
148         switch (mode) {
149         case DVFS_REGULATOR_MODE_FAST:
150                 return "FAST";
151         case DVFS_REGULATOR_MODE_NORMAL:
152                 return "NORMAL";
153         case DVFS_REGULATOR_MODE_IDLE:
154                 return "IDLE";
155         case DVFS_REGULATOR_MODE_STANDBY:
156                 return "STANDBY";
157         default:
158                 return "UNKNOWN";
159         }
160 }
161
162 static int dvfs_regu_mode_convert(unsigned int mode)
163 {
164         switch (mode) {
165         case DVFS_REGULATOR_MODE_FAST:
166                 return REGULATOR_MODE_FAST;
167         case DVFS_REGULATOR_MODE_NORMAL:
168                 return REGULATOR_MODE_NORMAL;
169         case DVFS_REGULATOR_MODE_IDLE:
170                 return REGULATOR_MODE_IDLE;
171         case DVFS_REGULATOR_MODE_STANDBY:
172                 return REGULATOR_MODE_STANDBY;
173         default:
174                 return -EINVAL;
175         }
176 }
177
178 static int dvfs_regu_mode_deconvert(unsigned int mode)
179 {
180         switch (mode) {
181         case REGULATOR_MODE_FAST:
182                 return DVFS_REGULATOR_MODE_FAST;
183         case REGULATOR_MODE_NORMAL:
184                 return DVFS_REGULATOR_MODE_NORMAL;
185         case REGULATOR_MODE_IDLE:
186                 return DVFS_REGULATOR_MODE_IDLE;
187         case REGULATOR_MODE_STANDBY:
188                 return DVFS_REGULATOR_MODE_STANDBY;
189         default:
190                 return -EINVAL;
191         }
192 }
193
194 static struct cpufreq_frequency_table *of_get_regu_mode_table(struct device_node *dev_node)
195 {
196         struct cpufreq_frequency_table *regu_mode_table = NULL;
197         const struct property *prop;
198         const __be32 *val;
199         int nr, i;
200
201         prop = of_find_property(dev_node, "regu-mode-table", NULL);
202         if (!prop)
203                 return NULL;
204         if (!prop->value)
205                 return NULL;
206
207         nr = prop->length / sizeof(u32);
208         if (nr % 2) {
209                 pr_err("%s: Invalid freq list\n", __func__);
210                 return NULL;
211         }
212
213         regu_mode_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
214                              (nr/2+1), GFP_KERNEL);
215         if (!regu_mode_table) {
216                 pr_err("%s: could not allocate regu_mode_table!\n", __func__);
217                 return ERR_PTR(-ENOMEM);
218         }
219
220         val = prop->value;
221
222         for (i=0; i<nr/2; i++){
223                 regu_mode_table[i].frequency = be32_to_cpup(val++) * 1000;
224                 regu_mode_table[i].index = be32_to_cpup(val++);
225         }
226
227         if (regu_mode_table[i-1].frequency != 0) {
228                 pr_err("%s: Last freq of regu_mode_table is not 0!\n", __func__);
229                 kfree(regu_mode_table);
230                 return NULL;
231         }
232
233         regu_mode_table[i].index = 0;
234         regu_mode_table[i].frequency = CPUFREQ_TABLE_END;
235
236         return regu_mode_table;
237 }
238
239 static int dvfs_regu_mode_table_constrain(struct dvfs_node *clk_dvfs_node)
240 {
241         int i, ret;
242         int mode, convert_mode, valid_mode;
243
244         if (!clk_dvfs_node)
245                 return -EINVAL;
246
247         if (!clk_dvfs_node->regu_mode_table)
248                 return -EINVAL;
249
250         if (!clk_dvfs_node->vd)
251                 return -EINVAL;
252
253         if (IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator))
254                 return -EINVAL;
255
256         for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
257                 mode = clk_dvfs_node->regu_mode_table[i].index;
258                 convert_mode = dvfs_regu_mode_convert(mode);
259
260                 ret = regulator_is_supported_mode(clk_dvfs_node->vd->regulator,
261                                                 &convert_mode);
262                 if (ret) {
263                         DVFS_ERR("%s: find mode=%d unsupported\n", __func__,
264                                 mode);
265                         kfree(clk_dvfs_node->regu_mode_table);
266                         clk_dvfs_node->regu_mode_table = NULL;
267                         return ret;
268                 }
269
270                 valid_mode = dvfs_regu_mode_deconvert(convert_mode);
271                 if (valid_mode != mode) {
272                         DVFS_ERR("%s: round mode=%d to valid mode=%d!\n",
273                                 __func__, mode, valid_mode);
274                         clk_dvfs_node->regu_mode_table[i].index = valid_mode;
275                 }
276
277         }
278
279         return 0;
280 }
281
282 static int clk_dvfs_node_get_regu_mode(struct dvfs_node *clk_dvfs_node,
283         unsigned long rate, unsigned int *mode)
284 {
285         int i;
286
287
288         if ((!clk_dvfs_node) || (!clk_dvfs_node->regu_mode_table))
289                 return -EINVAL;
290
291         for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
292                 if (rate >= clk_dvfs_node->regu_mode_table[i].frequency) {
293                         *mode = clk_dvfs_node->regu_mode_table[i].index;
294                         return 0;
295                 }
296         }
297
298         return -EINVAL;
299 }
300
301 static int dvfs_pd_get_newmode_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
302 {
303         unsigned int mode_max = 0;
304
305
306         if (clk_dvfs_node->regu_mode_en && (clk_dvfs_node->regu_mode >= pd->regu_mode)) {
307                 return clk_dvfs_node->regu_mode;
308         }
309
310         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
311                 if (clk_dvfs_node->regu_mode_en)
312                         mode_max = max(mode_max, (clk_dvfs_node->regu_mode));
313         }
314
315         return mode_max;
316 }
317
318 static void dvfs_update_clk_pds_mode(struct dvfs_node *clk_dvfs_node)
319 {
320         struct pd_node *pd;
321
322         if (!clk_dvfs_node)
323                 return;
324
325         pd = clk_dvfs_node->pd;
326         if (!pd)
327                 return;
328
329         pd->regu_mode = dvfs_pd_get_newmode_byclk(pd, clk_dvfs_node);
330 }
331
332 static int dvfs_vd_get_newmode_bypd(struct vd_node *vd)
333 {
334         unsigned int mode_max_vd = 0;
335         struct pd_node *pd;
336
337         if (!vd)
338                 return -EINVAL;
339
340         list_for_each_entry(pd, &vd->pd_list, node) {
341                 mode_max_vd = max(mode_max_vd, pd->regu_mode);
342         }
343
344         return mode_max_vd;
345 }
346
347 static int dvfs_vd_get_newmode_byclk(struct dvfs_node *clk_dvfs_node)
348 {
349         if (!clk_dvfs_node)
350                 return -EINVAL;
351
352         dvfs_update_clk_pds_mode(clk_dvfs_node);
353
354         return  dvfs_vd_get_newmode_bypd(clk_dvfs_node->vd);
355 }
356
357 static int dvfs_regu_set_mode(struct vd_node *vd, unsigned int mode)
358 {
359         int convert_mode;
360         int ret = 0;
361
362
363         if (IS_ERR_OR_NULL(vd)) {
364                 DVFS_ERR("%s: vd_node error\n", __func__);
365                 return -EINVAL;
366         }
367
368         DVFS_DBG("%s: mode=%d(old=%d)\n", __func__, mode, vd->regu_mode);
369
370         convert_mode = dvfs_regu_mode_convert(mode);
371         if (convert_mode < 0) {
372                 DVFS_ERR("%s: mode %d convert error\n", __func__, mode);
373                 return convert_mode;
374         }
375
376         if (!IS_ERR_OR_NULL(vd->regulator)) {
377                 ret = dvfs_regulator_set_mode(vd->regulator, convert_mode);
378                 if (ret < 0) {
379                         DVFS_ERR("%s: %s set mode %d (was %d) error!\n", __func__,
380                                 vd->regulator_name, mode, vd->regu_mode);
381                         return -EAGAIN;
382                 }
383         } else {
384                 DVFS_ERR("%s: invalid regulator\n", __func__);
385                 return -EINVAL;
386         }
387
388         vd->regu_mode = mode;
389
390         return 0;
391 }
392
393 static int dvfs_regu_mode_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
394 {
395         int ret;
396         int mode;
397
398
399         if (!clk_dvfs_node)
400                 return -EINVAL;
401
402         if (!clk_dvfs_node->regu_mode_en)
403                 return 0;
404
405         ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, rate, &mode);
406         if (ret) {
407                 DVFS_ERR("%s: clk(%s) rate %luhz get mode fail\n",
408                         __func__, clk_dvfs_node->name, rate);
409                 return ret;
410         }
411         clk_dvfs_node->regu_mode = mode;
412
413         mode = dvfs_vd_get_newmode_byclk(clk_dvfs_node);
414         if (mode < 0)
415                 return mode;
416
417         ret = dvfs_regu_set_mode(clk_dvfs_node->vd, mode);
418
419         return ret;
420 }
421
422 static void dvfs_volt_up_delay(struct vd_node *vd, int new_volt, int old_volt)
423 {
424         int u_time;
425         
426         if(new_volt <= old_volt)
427                 return;
428         if(vd->volt_time_flag > 0)      
429                 u_time = regulator_set_voltage_time(vd->regulator, old_volt, new_volt);
430         else
431                 u_time = -1;            
432         if(u_time < 0) {// regulator is not suported time,useing default time
433                 DVFS_DBG("%s:vd %s is not suported getting delay time,so we use default\n",
434                                 __func__, vd->name);
435                 u_time = ((new_volt) - (old_volt)) >> 9;
436         }
437         
438         DVFS_DBG("%s: vd %s volt %d to %d delay %d us\n", 
439                 __func__, vd->name, old_volt, new_volt, u_time);
440         
441         if (u_time >= 1000) {
442                 mdelay(u_time / 1000);
443                 udelay(u_time % 1000);
444                 DVFS_WARNING("%s: regulator set vol delay is larger 1ms,old is %d,new is %d\n",
445                         __func__, old_volt, new_volt);
446         } else if (u_time) {
447                 udelay(u_time);
448         }                       
449 }
450
451 static int dvfs_regulator_set_voltage_readback(struct regulator *regulator, int min_uV, int max_uV)
452 {
453         int ret = 0, read_back = 0;
454         
455         ret = dvfs_regulator_set_voltage(regulator, max_uV, max_uV);
456         if (ret < 0) {
457                 DVFS_ERR("%s: now read back to check voltage\n", __func__);
458
459                 /* read back to judge if it is already effect */
460                 mdelay(2);
461                 read_back = dvfs_regulator_get_voltage(regulator);
462                 if (read_back == max_uV) {
463                         DVFS_ERR("%s: set ERROR but already effected, volt=%d\n", __func__, read_back);
464                         ret = 0;
465                 } else {
466                         DVFS_ERR("%s: set ERROR AND NOT effected, volt=%d\n", __func__, read_back);
467                 }
468         }
469         
470         return ret;
471 }
472
473 static int dvfs_scale_volt_direct(struct vd_node *vd_clk, int volt_new)
474 {
475         int ret = 0;
476         
477         DVFS_DBG("%s: volt=%d(old=%d)\n", __func__, volt_new, vd_clk->cur_volt);
478         
479         if (IS_ERR_OR_NULL(vd_clk)) {
480                 DVFS_ERR("%s: vd_node error\n", __func__);
481                 return -EINVAL;
482         }
483
484         if (!IS_ERR_OR_NULL(vd_clk->regulator)) {
485                 ret = dvfs_regulator_set_voltage_readback(vd_clk->regulator, volt_new, volt_new);
486                 dvfs_volt_up_delay(vd_clk,volt_new, vd_clk->cur_volt);
487                 if (ret < 0) {
488                         vd_clk->volt_set_flag = DVFS_SET_VOLT_FAILURE;
489                         DVFS_ERR("%s: %s set voltage up err ret = %d, Vnew = %d(was %d)mV\n",
490                                         __func__, vd_clk->name, ret, volt_new, vd_clk->cur_volt);
491                         return -EAGAIN;
492                 }
493
494         } else {
495                 DVFS_ERR("%s: invalid regulator\n", __func__);
496                 return -EINVAL;
497         }
498
499         vd_clk->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
500         vd_clk->cur_volt = volt_new;
501
502         return 0;
503
504 }
505
506 static int dvfs_reset_volt(struct vd_node *dvfs_vd)
507 {
508         int flag_set_volt_correct = 0;
509         if (!IS_ERR_OR_NULL(dvfs_vd->regulator))
510                 flag_set_volt_correct = dvfs_regulator_get_voltage(dvfs_vd->regulator);
511         else {
512                 DVFS_ERR("%s: invalid regulator\n", __func__);
513                 return -EINVAL;
514         }
515         if (flag_set_volt_correct <= 0) {
516                 DVFS_ERR("%s (vd:%s), try to reload volt ,by it is error again(%d)!!! stop scaling\n",
517                                 __func__, dvfs_vd->name, flag_set_volt_correct);
518                 return -EAGAIN;
519         }
520         dvfs_vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
521         DVFS_WARNING("%s:vd(%s) try to reload volt = %d\n",
522                         __func__, dvfs_vd->name, flag_set_volt_correct);
523
524         /* Reset vd's voltage */
525         dvfs_vd->cur_volt = flag_set_volt_correct;
526
527         return dvfs_vd->cur_volt;
528 }
529
530
531 // for clk enable case to get vd regulator info
532 static void clk_enable_dvfs_regulator_check(struct vd_node *vd)
533 {
534         vd->cur_volt = dvfs_regulator_get_voltage(vd->regulator);
535         if(vd->cur_volt <= 0){
536                 vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
537         }
538         vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
539 }
540
541 static void dvfs_get_vd_regulator_volt_list(struct vd_node *vd)
542 {
543         unsigned int i, selector = dvfs_regulator_count_voltages(vd->regulator);
544         int n = 0, sel_volt = 0;
545         
546         if(selector > VD_VOL_LIST_CNT)
547                 selector = VD_VOL_LIST_CNT;
548
549         for (i = 0; i < selector; i++) {
550                 sel_volt = dvfs_regulator_list_voltage(vd->regulator, i);
551                 if(sel_volt <= 0){      
552                         //DVFS_WARNING("%s: vd(%s) list volt selector=%u, but volt(%d) <=0\n",
553                         //      __func__, vd->name, i, sel_volt);
554                         continue;
555                 }
556                 vd->volt_list[n++] = sel_volt;  
557                 DVFS_DBG("%s: vd(%s) list volt selector=%u, n=%d, volt=%d\n", 
558                         __func__, vd->name, i, n, sel_volt);
559         }
560         
561         vd->n_voltages = n;
562 }
563
564 // >= volt
565 static int vd_regulator_round_volt_max(struct vd_node *vd, int volt)
566 {
567         int sel_volt;
568         int i;
569         
570         for (i = 0; i < vd->n_voltages; i++) {
571                 sel_volt = vd->volt_list[i];
572                 if(sel_volt <= 0){      
573                         DVFS_WARNING("%s: selector=%u, but volt <=0\n", 
574                                 __func__, i);
575                         continue;
576                 }
577                 if(sel_volt >= volt)
578                         return sel_volt;        
579         }
580         return -EINVAL;
581 }
582
583 // >=volt
584 static int vd_regulator_round_volt_min(struct vd_node *vd, int volt)
585 {
586         int sel_volt;
587         int i;
588         
589         for (i = 0; i < vd->n_voltages; i++) {
590                 sel_volt = vd->volt_list[i];
591                 if(sel_volt <= 0){      
592                         DVFS_WARNING("%s: selector=%u, but volt <=0\n", 
593                                 __func__, i);
594                         continue;
595                 }
596                 if(sel_volt > volt){
597                         if(i > 0)
598                                 return vd->volt_list[i-1];
599                         else
600                                 return -EINVAL;
601                 }       
602         }
603         
604         return -EINVAL;
605 }
606
607 // >=volt
608 static int vd_regulator_round_volt(struct vd_node *vd, int volt, int flags)
609 {
610         if(!vd->n_voltages)
611                 return -EINVAL;
612         if(flags == VD_LIST_RELATION_L)
613                 return vd_regulator_round_volt_min(vd, volt);
614         else
615                 return vd_regulator_round_volt_max(vd, volt);   
616 }
617
618 static void dvfs_table_round_volt(struct dvfs_node *clk_dvfs_node)
619 {
620         int i, test_volt;
621
622         if(!clk_dvfs_node->dvfs_table || !clk_dvfs_node->vd || 
623                 IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator))
624                 return;
625
626         for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
627
628                 test_volt = vd_regulator_round_volt(clk_dvfs_node->vd, clk_dvfs_node->dvfs_table[i].index, VD_LIST_RELATION_H);
629                 if(test_volt <= 0)
630                 {       
631                         DVFS_WARNING("%s: clk(%s) round volt(%d) but list <=0\n",
632                                 __func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].index);
633                         break;
634                 }
635                 DVFS_DBG("clk %s:round_volt %d to %d\n",
636                         clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].index, test_volt);
637                 
638                 clk_dvfs_node->dvfs_table[i].index=test_volt;           
639         }
640 }
641
642 static void dvfs_vd_get_regulator_volt_time_info(struct vd_node *vd)
643 {
644         if(vd->volt_time_flag <= 0){// check regulator support get uping vol timer
645                 vd->volt_time_flag = dvfs_regulator_set_voltage_time(vd->regulator, vd->cur_volt, vd->cur_volt+200*1000);
646                 if(vd->volt_time_flag < 0){
647                         DVFS_DBG("%s,vd %s volt_time is no support\n",
648                                 __func__, vd->name);
649                 }
650                 else{
651                         DVFS_DBG("%s,vd %s volt_time is support,up 200mv need delay %d us\n",
652                                 __func__, vd->name, vd->volt_time_flag);
653                 }       
654         }
655 }
656 #if 0
657 static void dvfs_vd_get_regulator_mode_info(struct vd_node *vd)
658 {
659         //REGULATOR_MODE_FAST
660         if(vd->mode_flag <= 0){// check regulator support get uping vol timer{
661                 vd->mode_flag = dvfs_regulator_get_mode(vd->regulator);
662                 if(vd->mode_flag==REGULATOR_MODE_FAST || vd->mode_flag==REGULATOR_MODE_NORMAL
663                         || vd->mode_flag == REGULATOR_MODE_IDLE || vd->mode_flag==REGULATOR_MODE_STANDBY){
664                         
665                         if(dvfs_regulator_set_mode(vd->regulator, vd->mode_flag) < 0){
666                                 vd->mode_flag = 0;// check again
667                         }
668                 }
669                 if(vd->mode_flag > 0){
670                         DVFS_DBG("%s,vd %s mode(now is %d) support\n",
671                                 __func__, vd->name, vd->mode_flag);
672                 }
673                 else{
674                         DVFS_DBG("%s,vd %s mode is not support now check\n",
675                                 __func__, vd->name);
676                 }
677         }
678 }
679 #endif
680
681 struct regulator *dvfs_get_regulator(char *regulator_name) 
682 {
683         struct vd_node *vd;
684
685         mutex_lock(&rk_dvfs_mutex);
686         list_for_each_entry(vd, &rk_dvfs_tree, node) {
687                 if (strcmp(regulator_name, vd->regulator_name) == 0) {
688                         mutex_unlock(&rk_dvfs_mutex);
689                         return vd->regulator;
690                 }
691         }
692         mutex_unlock(&rk_dvfs_mutex);
693         return NULL;
694 }
695
696 static int dvfs_get_rate_range(struct dvfs_node *clk_dvfs_node)
697 {
698         struct cpufreq_frequency_table *table;
699         int i = 0;
700
701         if (!clk_dvfs_node)
702                 return -EINVAL;
703
704         clk_dvfs_node->min_rate = 0;
705         clk_dvfs_node->max_rate = 0;
706
707         table = clk_dvfs_node->dvfs_table;
708         for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
709                 clk_dvfs_node->max_rate = table[i].frequency / 1000 * 1000 * 1000;
710                 if (i == 0)
711                         clk_dvfs_node->min_rate = table[i].frequency / 1000 * 1000 * 1000;
712         }
713
714         DVFS_DBG("%s: clk %s, limit rate [min, max] = [%u, %u]\n",
715                         __func__, clk_dvfs_node->name, clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
716
717         return 0;
718 }
719
720 static void dvfs_table_round_clk_rate(struct dvfs_node  *clk_dvfs_node)
721 {
722         int i, rate, temp_rate, flags;
723         
724         if(!clk_dvfs_node || !clk_dvfs_node->dvfs_table || !clk_dvfs_node->clk)
725                 return;
726
727         for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
728                 //ddr rate = real rate+flags
729                 flags = clk_dvfs_node->dvfs_table[i].frequency%1000;
730                 rate = (clk_dvfs_node->dvfs_table[i].frequency/1000)*1000;
731                 temp_rate = __clk_round_rate(clk_dvfs_node->clk, rate*1000);
732                 if(temp_rate <= 0){     
733                         DVFS_WARNING("%s: clk(%s) rate %d round return %d\n",
734                                 __func__, clk_dvfs_node->name, clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
735                         continue;
736                 }
737                 
738                 /* Set rate unit as MHZ */
739                 if (temp_rate % MHz != 0)
740                         temp_rate = (temp_rate / MHz + 1) * MHz;
741
742                 temp_rate = (temp_rate / 1000) + flags;
743                 
744                 DVFS_DBG("clk %s round_clk_rate %d to %d\n",
745                         clk_dvfs_node->name,clk_dvfs_node->dvfs_table[i].frequency, temp_rate);
746                 
747                 clk_dvfs_node->dvfs_table[i].frequency = temp_rate;             
748         }
749 }
750
751 static int clk_dvfs_node_get_ref_volt(struct dvfs_node *clk_dvfs_node, int rate_khz,
752                 struct cpufreq_frequency_table *clk_fv)
753 {
754         int i = 0;
755         
756         if (rate_khz == 0 || !clk_dvfs_node || !clk_dvfs_node->dvfs_table) {
757                 /* since no need */
758                 return -EINVAL;
759         }
760         clk_fv->frequency = rate_khz;
761         clk_fv->index = 0;
762
763         for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
764                 if (clk_dvfs_node->dvfs_table[i].frequency >= rate_khz) {
765                         clk_fv->frequency = clk_dvfs_node->dvfs_table[i].frequency;
766                         clk_fv->index = clk_dvfs_node->dvfs_table[i].index;
767                          //printk("%s,%s rate=%ukhz(vol=%d)\n",__func__,clk_dvfs_node->name,
768                          //clk_fv->frequency, clk_fv->index);
769                         return 0;
770                 }
771         }
772         clk_fv->frequency = 0;
773         clk_fv->index = 0;
774         //DVFS_DBG("%s get corresponding voltage error! out of bound\n", clk_dvfs_node->name);
775         return -EINVAL;
776 }
777
778 static int dvfs_pd_get_newvolt_byclk(struct pd_node *pd, struct dvfs_node *clk_dvfs_node)
779 {
780         int volt_max = 0;
781
782         if (clk_dvfs_node->enable_count && (clk_dvfs_node->set_volt >= pd->cur_volt)) {
783                 return clk_dvfs_node->set_volt;
784         }
785
786         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
787                 if (clk_dvfs_node->enable_count)
788                         volt_max = max(volt_max, clk_dvfs_node->set_volt);
789         }
790         return volt_max;
791 }
792
793 static void dvfs_update_clk_pds_volt(struct dvfs_node *clk_dvfs_node)
794 {
795         struct pd_node *pd;
796         
797         if (!clk_dvfs_node)
798                 return;
799         
800         pd = clk_dvfs_node->pd;
801         if (!pd)
802                 return;
803         
804         pd->cur_volt = dvfs_pd_get_newvolt_byclk(pd, clk_dvfs_node);
805 }
806
807 static int dvfs_vd_get_newvolt_bypd(struct vd_node *vd)
808 {
809         int volt_max_vd = 0;
810         struct pd_node *pd;
811
812         if (!vd)
813                 return -EINVAL;
814         
815         list_for_each_entry(pd, &vd->pd_list, node) {
816                 volt_max_vd = max(volt_max_vd, pd->cur_volt);
817         }
818
819         return volt_max_vd;
820 }
821
822 static int dvfs_vd_get_newvolt_byclk(struct dvfs_node *clk_dvfs_node)
823 {
824         if (!clk_dvfs_node)
825                 return -EINVAL;
826
827         dvfs_update_clk_pds_volt(clk_dvfs_node);
828         return  dvfs_vd_get_newvolt_bypd(clk_dvfs_node->vd);
829 }
830
831 #if 0
832 static void dvfs_temp_limit_work_func(struct work_struct *work)
833 {
834         unsigned long delay = HZ / 10; // 100ms
835         struct vd_node *vd;
836         struct pd_node *pd;
837         struct dvfs_node *clk_dvfs_node;
838
839         queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
840
841         mutex_lock(&rk_dvfs_mutex);
842         list_for_each_entry(vd, &rk_dvfs_tree, node) {
843                 mutex_lock(&vd->mutex);
844                 list_for_each_entry(pd, &vd->pd_list, node) {
845                         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
846                                 if (clk_dvfs_node->temp_limit_table) {
847                                         clk_dvfs_node->temp = rockchip_tsadc_get_temp(clk_dvfs_node->temp_channel);
848                                         clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
849                                 }
850                         }
851                 }
852                 mutex_unlock(&vd->mutex);
853         }
854         mutex_unlock(&rk_dvfs_mutex);
855 }
856 #endif
857
858 static struct cpufreq_frequency_table rk3288v0_arm_pvtm_table[] = {
859         {.frequency = 216000,  .index = 4006},
860         {.frequency = 408000,  .index = 6518},
861         {.frequency = 600000,  .index = 8345},
862         {.frequency = 816000,  .index = 11026},
863         {.frequency = 1008000,  .index = 12906},
864         {.frequency = 1200000,  .index = 15532},
865         {.frequency = 1416000,  .index = 18076},
866         {.frequency = 1608000,  .index = 21282},
867         {.frequency = CPUFREQ_TABLE_END, .index = 1},
868 };
869
870 static struct pvtm_info rk3288v0_arm_pvtm_info = {
871         .compatible = "rockchip,rk3288",
872         .pvtm_table = rk3288v0_arm_pvtm_table,
873         .channel = ARM_DVFS_CH,
874         .process_version = RK3288_PROCESS_V0,
875         .scan_rate_hz = 216000000,
876         .sample_time_us = 1000,
877         .volt_step_uv = 12500,
878         .delta_pvtm_by_volt = 400,
879         .delta_pvtm_by_temp = 14,
880         .volt_margin_uv = 25000,
881         .min_volt_uv = 850000,
882         .max_volt_uv = 1400000,
883 };
884
885 static struct cpufreq_frequency_table rk3288v1_arm_pvtm_table[] = {
886         {.frequency = 216000,  .index = 4710},
887         {.frequency = 408000,  .index = 7200},
888         {.frequency = 600000,  .index = 9192},
889         {.frequency = 816000,  .index = 12560},
890         {.frequency = 1008000,  .index = 14741},
891         {.frequency = 1200000,  .index = 16886},
892         {.frequency = 1416000,  .index = 20081},
893         {.frequency = 1608000,  .index = 24061},
894         {.frequency = CPUFREQ_TABLE_END, .index = 1},
895 };
896
897 static struct pvtm_info rk3288v1_arm_pvtm_info = {
898         .compatible = "rockchip,rk3288",
899         .pvtm_table = rk3288v1_arm_pvtm_table,
900         .channel = ARM_DVFS_CH,
901         .process_version = RK3288_PROCESS_V1,
902         .scan_rate_hz = 216000000,
903         .sample_time_us = 1000,
904         .volt_step_uv = 12500,
905         .delta_pvtm_by_volt = 450,
906         .delta_pvtm_by_temp = 7,
907         .volt_margin_uv = 25000,
908         .min_volt_uv = 850000,
909         .max_volt_uv = 1400000,
910 };
911
912 static struct cpufreq_frequency_table rk3288v2_arm_pvtm_table[] = {
913         {.frequency = 216000,  .index = 5369},
914         {.frequency = 408000,  .index = 6984},
915         {.frequency = 600000,  .index = 8771},
916         {.frequency = 816000,  .index = 11434},
917         {.frequency = 1008000,  .index = 14178},
918         {.frequency = 1200000,  .index = 16797},
919         {.frequency = 1416000,  .index = 20178},
920         {.frequency = 1608000,  .index = 23303},
921         {.frequency = CPUFREQ_TABLE_END, .index = 1},
922 };
923
924 static struct pvtm_info rk3288v2_arm_pvtm_info = {
925         .compatible = "rockchip,rk3288",
926         .pvtm_table = rk3288v2_arm_pvtm_table,
927         .channel = ARM_DVFS_CH,
928         .process_version = RK3288_PROCESS_V2,
929         .scan_rate_hz = 216000000,
930         .sample_time_us = 1000,
931         .volt_step_uv = 12500,
932         .delta_pvtm_by_volt = 430,
933         .delta_pvtm_by_temp = 12,
934         .volt_margin_uv = 25000,
935         .min_volt_uv = 900000,
936         .max_volt_uv = 1400000,
937 };
938
939 static struct pvtm_info *pvtm_info_table[] = {
940         &rk3288v0_arm_pvtm_info,
941         &rk3288v1_arm_pvtm_info,
942         &rk3288v2_arm_pvtm_info
943 };
944
945 static int pvtm_set_single_dvfs(struct dvfs_node *dvfs_node, u32 idx,
946                                 struct pvtm_info *info, int *pvtm_list,
947                                 u32 min_pvtm)
948 {
949         struct cpufreq_frequency_table *dvfs_table = dvfs_node->dvfs_table;
950         struct cpufreq_frequency_table *pvtm_table = dvfs_node->pvtm_table;
951         int target_pvtm, pvtm_margin, volt_margin;
952         unsigned int n_voltages = dvfs_node->vd->n_voltages;
953         int *volt_list = dvfs_node->vd->volt_list;
954         int n, temp;
955
956         volt_margin = info->volt_margin_uv + pvtm_table[idx].index;
957         n = volt_margin/info->volt_step_uv;
958         if (volt_margin%info->volt_step_uv)
959                 n++;
960
961         pvtm_margin = n*info->delta_pvtm_by_volt;
962         temp = rockchip_tsadc_get_temp(1);
963         target_pvtm = min_pvtm+temp * info->delta_pvtm_by_temp + pvtm_margin;
964
965         DVFS_DBG("=====%s: temp:%d, freq:%d, target pvtm:%d=====\n",
966                  __func__, temp, dvfs_table[idx].frequency, target_pvtm);
967
968         for (n = 0; n < n_voltages; n++) {
969                 if (pvtm_list[n] >= target_pvtm) {
970                         dvfs_table[idx].index = volt_list[n];
971                         DVFS_DBG("freq[%d]=%d, volt=%d\n",
972                                  idx, dvfs_table[idx].frequency, volt_list[n]);
973
974                         return 0;
975                 }
976         }
977
978         return -EINVAL;
979
980         return 0;
981 }
982
983 static void pvtm_set_dvfs_table(struct dvfs_node *dvfs_node)
984 {
985         struct cpufreq_frequency_table *dvfs_table = dvfs_node->dvfs_table;
986         struct pvtm_info *info = dvfs_node->pvtm_info;
987         struct regulator *regulator = dvfs_node->vd->regulator;
988         int i, j;
989         int ret = 0;
990         int pvtm_list[VD_VOL_LIST_CNT] = {0};
991         unsigned int n_voltages = dvfs_node->vd->n_voltages;
992         int *volt_list = dvfs_node->vd->volt_list;
993
994         if (!info)
995                 return;
996
997         clk_set_rate(dvfs_node->clk, info->scan_rate_hz);
998         DVFS_DBG("%s:%lu\n", __func__, clk_get_rate(dvfs_node->clk));
999
1000         for (i = 0; i < n_voltages; i++) {
1001                 if ((volt_list[i] >= info->min_volt_uv) &&
1002                     (volt_list[i] <= info->max_volt_uv)) {
1003                         regulator_set_voltage(regulator, volt_list[i],
1004                                               volt_list[i]);
1005                         pvtm_list[i] = pvtm_get_value(info->channel,
1006                                                       info->sample_time_us);
1007                 }
1008         }
1009
1010         for (i = 0; dvfs_table[i].frequency != CPUFREQ_TABLE_END; i++) {
1011                 for (j = 0; info->pvtm_table[j].frequency !=
1012                      CPUFREQ_TABLE_END; j++)
1013                         if (info->pvtm_table[j].frequency >=
1014                             dvfs_table[i].frequency) {
1015                                 int min_pvtm = info->pvtm_table[j].index;
1016
1017                                 ret = pvtm_set_single_dvfs(dvfs_node,
1018                                                            i,
1019                                                            info,
1020                                                            pvtm_list,
1021                                                            min_pvtm);
1022                                 break;
1023                         }
1024
1025                 if (ret) {
1026                         DVFS_WARNING("freq: %d can not reach target pvtm\n",
1027                                      dvfs_table[i].frequency);
1028                         break;
1029                 }
1030
1031                 if (info->pvtm_table[j].frequency == CPUFREQ_TABLE_END) {
1032                         DVFS_WARNING("not support freq :%d, max freq is %d\n",
1033                                      dvfs_table[i].frequency,
1034                                      info->pvtm_table[j-1].frequency);
1035                         break;
1036                 }
1037         }
1038 }
1039
1040 static void dvfs_virt_temp_limit_work_func(struct dvfs_node *dvfs_node)
1041 {
1042         const struct cpufreq_frequency_table *limits_table = NULL;
1043         unsigned int new_temp_limit_rate = -1;
1044         unsigned int nr_cpus = num_online_cpus();
1045         static bool in_perf;
1046         int i;
1047
1048         if (!cpu_is_rk312x())
1049                 return;
1050
1051         if (rockchip_get_system_status() & SYS_STATUS_PERFORMANCE) {
1052                 in_perf = true;
1053         } else if (in_perf) {
1054                 in_perf = false;
1055         } else {
1056                 static u64 last_time_in_idle;
1057                 static u64 last_time_in_idle_timestamp;
1058                 u64 time_in_idle = 0, now;
1059                 u32 delta_idle;
1060                 u32 delta_time;
1061                 unsigned cpu, busy_cpus;
1062
1063                 for_each_online_cpu(cpu) {
1064                         time_in_idle += get_cpu_idle_time_us(cpu, &now);
1065                 }
1066                 delta_time = now - last_time_in_idle_timestamp;
1067                 delta_idle = time_in_idle - last_time_in_idle;
1068                 last_time_in_idle = time_in_idle;
1069                 last_time_in_idle_timestamp = now;
1070                 delta_idle += delta_time >> 4; /* +6.25% */
1071                 if (delta_idle > (nr_cpus - 1)
1072                     * delta_time && delta_idle < (nr_cpus + 1) * delta_time)
1073                         busy_cpus = 1;
1074                 else if (delta_idle > (nr_cpus - 2) * delta_time)
1075                         busy_cpus = 2;
1076                 else if (delta_idle > (nr_cpus - 3) * delta_time)
1077                         busy_cpus = 3;
1078                 else
1079                         busy_cpus = 4;
1080
1081                 limits_table = dvfs_node->virt_temp_limit_table[busy_cpus-1];
1082                 DVFS_DBG("delta time %6u us idle %6u us %u cpus select table %d\n",
1083                          delta_time, delta_idle, nr_cpus, busy_cpus);
1084         }
1085
1086         if (limits_table) {
1087                 new_temp_limit_rate = limits_table[0].frequency;
1088                 for (i = 0; limits_table[i].frequency != CPUFREQ_TABLE_END; i++) {
1089                         if (dvfs_node->target_temp >=
1090                                 limits_table[i].index)
1091                                 new_temp_limit_rate = limits_table[i].frequency;
1092                 }
1093         }
1094
1095         if (dvfs_node->temp_limit_rate != new_temp_limit_rate) {
1096                 dvfs_node->temp_limit_rate = new_temp_limit_rate;
1097                 dvfs_clk_set_rate(dvfs_node, dvfs_node->last_set_rate);
1098                 DVFS_DBG("temp_limit_rate:%d\n",
1099                          (int)dvfs_node->temp_limit_rate);
1100         }
1101 }
1102
1103 static void dvfs_temp_limit_performance(struct dvfs_node *dvfs_node, int temp)
1104 {
1105         int i;
1106
1107         dvfs_node->temp_limit_rate = dvfs_node->max_rate;
1108         for (i = 0; dvfs_node->per_temp_limit_table[i].frequency !=
1109                 CPUFREQ_TABLE_END; i++) {
1110                 if (temp > dvfs_node->per_temp_limit_table[i].index)
1111                         dvfs_node->temp_limit_rate =
1112                         dvfs_node->per_temp_limit_table[i].frequency;
1113         }
1114         dvfs_clk_set_rate(dvfs_node, dvfs_node->last_set_rate);
1115 }
1116
1117 static void dvfs_temp_limit_normal(struct dvfs_node *dvfs_node, int temp)
1118 {
1119         int delta_temp = 0;
1120         unsigned long arm_rate_step = 0;
1121         int i;
1122
1123         if (temp > dvfs_node->target_temp) {
1124                 if (temp > dvfs_node->old_temp) {
1125                         delta_temp = temp - dvfs_node->target_temp;
1126                         for (i = 0;
1127                         dvfs_node->nor_temp_limit_table[i].frequency !=
1128                                 CPUFREQ_TABLE_END; i++) {
1129                                 if (delta_temp >
1130                                 dvfs_node->nor_temp_limit_table[i].index)
1131                                         arm_rate_step =
1132                                 dvfs_node->nor_temp_limit_table[i].frequency;
1133                         }
1134                         if (arm_rate_step &&
1135                             (dvfs_node->temp_limit_rate > arm_rate_step)) {
1136                                 dvfs_node->temp_limit_rate -= arm_rate_step;
1137                                 if (dvfs_node->temp_limit_rate <
1138                                         dvfs_node->min_temp_limit)
1139                                         dvfs_node->temp_limit_rate =
1140                                         dvfs_node->min_temp_limit;
1141                                 dvfs_clk_set_rate(dvfs_node,
1142                                                   dvfs_node->last_set_rate);
1143                         }
1144                 }
1145         } else {
1146                 if (dvfs_node->temp_limit_rate < dvfs_node->max_rate) {
1147                         delta_temp = dvfs_node->target_temp - temp;
1148                         for (i = 0;
1149                         dvfs_node->nor_temp_limit_table[i].frequency !=
1150                                 CPUFREQ_TABLE_END; i++) {
1151                                 if (delta_temp >
1152                                 dvfs_node->nor_temp_limit_table[i].index)
1153                                         arm_rate_step =
1154                                 dvfs_node->nor_temp_limit_table[i].frequency;
1155                         }
1156
1157                         if (arm_rate_step) {
1158                                 dvfs_node->temp_limit_rate += arm_rate_step;
1159                                 if (dvfs_node->temp_limit_rate >
1160                                         dvfs_node->max_rate)
1161                                         dvfs_node->temp_limit_rate =
1162                                         dvfs_node->max_rate;
1163                                 dvfs_clk_set_rate(dvfs_node,
1164                                                   dvfs_node->last_set_rate);
1165                         }
1166                 }
1167         }
1168 }
1169
1170 static void dvfs_temp_limit(struct dvfs_node *dvfs_node, int temp)
1171 {
1172         int delta_temp = 0;
1173
1174         //debounce
1175         delta_temp = (dvfs_node->old_temp > temp) ? (dvfs_node->old_temp-temp) :
1176         (temp-dvfs_node->old_temp);
1177         if (delta_temp <= 1)
1178                 return;
1179
1180         if (ROCKCHIP_PM_POLICY_PERFORMANCE == rockchip_pm_get_policy()) {
1181                 if (!dvfs_node->per_temp_limit_table)
1182                         return;
1183                 dvfs_temp_limit_performance(dvfs_node, temp);
1184         } else if (ROCKCHIP_PM_POLICY_NORMAL == rockchip_pm_get_policy()){
1185                 if (!dvfs_node->nor_temp_limit_table)
1186                         return;
1187                 dvfs_temp_limit_normal(dvfs_node, temp);
1188         }
1189         dvfs_node->old_temp = temp;
1190         DVFS_DBG("cur temp: %d, temp_limit_core_rate: %lu\n",
1191                  temp, dvfs_node->temp_limit_rate);
1192 }
1193 static void dvfs_temp_limit_work_func(struct work_struct *work)
1194 {
1195         unsigned long delay = HZ/10;
1196         int temp = 0;
1197
1198         queue_delayed_work_on(0, dvfs_wq, to_delayed_work(work), delay);
1199
1200         if (clk_cpu_dvfs_node->temp_limit_enable == 1) {
1201                 temp = rockchip_tsadc_get_temp(1);
1202                 if (temp == INVALID_TEMP)
1203                         dvfs_virt_temp_limit_work_func(clk_cpu_dvfs_node);
1204                 else
1205                         dvfs_temp_limit(clk_cpu_dvfs_node, temp);
1206         }
1207         if (clk_gpu_dvfs_node->temp_limit_enable == 1) {
1208                 temp = rockchip_tsadc_get_temp(2);
1209                 if (temp != INVALID_TEMP)
1210                         dvfs_temp_limit(clk_gpu_dvfs_node, temp);
1211         }
1212 }
1213 static DECLARE_DELAYED_WORK(dvfs_temp_limit_work, dvfs_temp_limit_work_func);
1214
1215 int dvfs_clk_enable_limit(struct dvfs_node *clk_dvfs_node, unsigned int min_rate, unsigned int max_rate)
1216 {
1217         u32 rate = 0, ret = 0;
1218
1219         if (!clk_dvfs_node || (min_rate > max_rate))
1220                 return -EINVAL;
1221         
1222         if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
1223                 mutex_lock(&clk_dvfs_node->vd->mutex);
1224                 
1225                 /* To reset clk_dvfs_node->min_rate/max_rate */
1226                 dvfs_get_rate_range(clk_dvfs_node);
1227                 clk_dvfs_node->freq_limit_en = 1;
1228
1229                 if ((min_rate >= clk_dvfs_node->min_rate) && (min_rate <= clk_dvfs_node->max_rate)) {
1230                         clk_dvfs_node->min_rate = min_rate;
1231                 }
1232                 
1233                 if ((max_rate >= clk_dvfs_node->min_rate) && (max_rate <= clk_dvfs_node->max_rate)) {
1234                         clk_dvfs_node->max_rate = max_rate;
1235                 }
1236
1237                 if (clk_dvfs_node->last_set_rate == 0)
1238                         rate = __clk_get_rate(clk_dvfs_node->clk);
1239                 else
1240                         rate = clk_dvfs_node->last_set_rate;
1241                 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
1242
1243                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1244
1245         }
1246
1247         DVFS_DBG("%s:clk(%s) last_set_rate=%lu; [min_rate, max_rate]=[%u, %u]\n",
1248                  __func__, __clk_get_name(clk_dvfs_node->clk),
1249                  clk_dvfs_node->last_set_rate,
1250                  clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
1251
1252         return 0;
1253 }
1254 EXPORT_SYMBOL(dvfs_clk_enable_limit);
1255
1256 int dvfs_clk_disable_limit(struct dvfs_node *clk_dvfs_node)
1257 {
1258         u32 ret = 0;
1259
1260         if (!clk_dvfs_node)
1261                 return -EINVAL;
1262         
1263         if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target){
1264                 mutex_lock(&clk_dvfs_node->vd->mutex);
1265                 
1266                 /* To reset clk_dvfs_node->min_rate/max_rate */
1267                 dvfs_get_rate_range(clk_dvfs_node);
1268                 clk_dvfs_node->freq_limit_en = 0;
1269                 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, clk_dvfs_node->last_set_rate);
1270
1271                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1272         }
1273
1274         DVFS_DBG("%s: clk(%s) last_set_rate=%lu; [min_rate, max_rate]=[%u, %u]\n",
1275                  __func__, __clk_get_name(clk_dvfs_node->clk),
1276                  clk_dvfs_node->last_set_rate,
1277                  clk_dvfs_node->min_rate, clk_dvfs_node->max_rate);
1278
1279         return 0;
1280 }
1281 EXPORT_SYMBOL(dvfs_clk_disable_limit);
1282
1283 void dvfs_disable_temp_limit(void) {
1284         if (clk_cpu_dvfs_node)
1285                 clk_cpu_dvfs_node->temp_limit_enable = 0;
1286         if (clk_gpu_dvfs_node)
1287                 clk_gpu_dvfs_node->temp_limit_enable = 0;
1288         cancel_delayed_work_sync(&dvfs_temp_limit_work);
1289 }
1290
1291 int dvfs_clk_get_limit(struct dvfs_node *clk_dvfs_node, unsigned int *min_rate, unsigned int *max_rate) 
1292 {
1293         int freq_limit_en;
1294
1295         if (!clk_dvfs_node)
1296                 return -EINVAL;
1297
1298         mutex_lock(&clk_dvfs_node->vd->mutex);
1299
1300         *min_rate = clk_dvfs_node->min_rate;
1301         *max_rate = clk_dvfs_node->max_rate;
1302         freq_limit_en = clk_dvfs_node->freq_limit_en;
1303
1304         mutex_unlock(&clk_dvfs_node->vd->mutex);
1305
1306         return freq_limit_en;
1307 }
1308 EXPORT_SYMBOL(dvfs_clk_get_limit);
1309
1310 int dvfs_clk_register_set_rate_callback(struct dvfs_node *clk_dvfs_node, clk_set_rate_callback clk_dvfs_target)
1311 {
1312         if (!clk_dvfs_node)
1313                 return -EINVAL;
1314                         
1315         mutex_lock(&clk_dvfs_node->vd->mutex);
1316         clk_dvfs_node->clk_dvfs_target = clk_dvfs_target;
1317         mutex_unlock(&clk_dvfs_node->vd->mutex);
1318
1319         return 0;
1320 }
1321 EXPORT_SYMBOL(dvfs_clk_register_set_rate_callback);
1322
1323 struct cpufreq_frequency_table *dvfs_get_freq_volt_table(struct dvfs_node *clk_dvfs_node) 
1324 {
1325         struct cpufreq_frequency_table *table;
1326
1327         if (!clk_dvfs_node)
1328                 return NULL;
1329
1330         mutex_lock(&clk_dvfs_node->vd->mutex);
1331         table = clk_dvfs_node->dvfs_table;
1332         mutex_unlock(&clk_dvfs_node->vd->mutex);
1333         
1334         return table;
1335 }
1336 EXPORT_SYMBOL(dvfs_get_freq_volt_table);
1337
1338 int dvfs_set_freq_volt_table(struct dvfs_node *clk_dvfs_node, struct cpufreq_frequency_table *table)
1339 {
1340         if (!clk_dvfs_node)
1341                 return -EINVAL;
1342
1343         if (IS_ERR_OR_NULL(table)){
1344                 DVFS_ERR("%s:invalid table!\n", __func__);
1345                 return -EINVAL;
1346         }
1347         
1348         mutex_lock(&clk_dvfs_node->vd->mutex);
1349         clk_dvfs_node->dvfs_table = table;
1350         dvfs_get_rate_range(clk_dvfs_node);
1351         dvfs_table_round_clk_rate(clk_dvfs_node);
1352         dvfs_table_round_volt(clk_dvfs_node);
1353         mutex_unlock(&clk_dvfs_node->vd->mutex);
1354
1355         return 0;
1356 }
1357 EXPORT_SYMBOL(dvfs_set_freq_volt_table);
1358
1359 static int get_adjust_volt_by_leakage(struct dvfs_node *dvfs_node)
1360 {
1361         int leakage = 0;
1362         int delta_leakage = 0;
1363         int i = 0;
1364         int adjust_volt = 0;
1365
1366         if (!dvfs_node->vd)
1367                 return 0;
1368
1369         if (dvfs_node->lkg_info.def_table_lkg == -1)
1370                 return 0;
1371
1372         leakage = rockchip_get_leakage(dvfs_node->channel);
1373         if (!leakage || (leakage == 0xff))
1374                 return 0;
1375
1376         delta_leakage = leakage - dvfs_node->lkg_info.def_table_lkg;
1377         if (delta_leakage <= 0) {
1378                 for (i = 0; (dvfs_node->lkg_info.table[i].dlt_volt !=
1379                         CPUFREQ_TABLE_END); i++) {
1380                         if (leakage > dvfs_node->lkg_info.table[i].lkg) {
1381                                 adjust_volt =
1382                                         dvfs_node->lkg_info.table[i].dlt_volt;
1383                         } else {
1384                                 return adjust_volt;
1385                         }
1386                 }
1387         } else if (delta_leakage > 0) {
1388                 for (i = 0; (dvfs_node->lkg_info.table[i].dlt_volt !=
1389                         CPUFREQ_TABLE_END); i++) {
1390                         if (leakage <= dvfs_node->lkg_info.table[i].lkg) {
1391                                 adjust_volt =
1392                                         -dvfs_node->lkg_info.table[i].dlt_volt;
1393                                 return adjust_volt;
1394                         }
1395                 }
1396         }
1397         return adjust_volt;
1398 }
1399
1400 static void adjust_table_by_leakage(struct dvfs_node *dvfs_node)
1401 {
1402         int i, adjust_volt = get_adjust_volt_by_leakage(dvfs_node);
1403
1404         if (!adjust_volt)
1405                 return;
1406
1407         if (!dvfs_node->dvfs_table)
1408                 return;
1409
1410         if (dvfs_node->lkg_info.min_adjust_freq == -1)
1411                 return;
1412
1413         for (i = 0;
1414         (dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
1415                 if (dvfs_node->dvfs_table[i].frequency >=
1416                         dvfs_node->lkg_info.min_adjust_freq)
1417                         dvfs_node->dvfs_table[i].index += adjust_volt;
1418         }
1419 }
1420
1421 int clk_enable_dvfs(struct dvfs_node *clk_dvfs_node)
1422 {
1423         struct cpufreq_frequency_table clk_fv;
1424         int volt_new;
1425         unsigned int mode;
1426         int ret;
1427
1428         if (!clk_dvfs_node)
1429                 return -EINVAL;
1430         
1431         DVFS_DBG("%s: dvfs clk(%s) enable dvfs!\n", 
1432                 __func__, __clk_get_name(clk_dvfs_node->clk));
1433
1434         if (!clk_dvfs_node->vd) {
1435                 DVFS_ERR("%s: dvfs node(%s) has no vd node!\n", 
1436                         __func__, clk_dvfs_node->name);
1437                 return -EINVAL;
1438         }
1439         mutex_lock(&clk_dvfs_node->vd->mutex);
1440         if (clk_dvfs_node->enable_count == 0) {
1441                 if (IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator)) {
1442                         if (clk_dvfs_node->vd->regulator_name)
1443                                 clk_dvfs_node->vd->regulator = dvfs_regulator_get(NULL, clk_dvfs_node->vd->regulator_name);
1444                         if (!IS_ERR_OR_NULL(clk_dvfs_node->vd->regulator)) {
1445                                 DVFS_DBG("%s: vd(%s) get regulator(%s) ok\n",
1446                                         __func__, clk_dvfs_node->vd->name, clk_dvfs_node->vd->regulator_name);
1447                                 clk_enable_dvfs_regulator_check(clk_dvfs_node->vd);
1448                                 dvfs_get_vd_regulator_volt_list(clk_dvfs_node->vd);
1449                                 dvfs_vd_get_regulator_volt_time_info(clk_dvfs_node->vd);
1450                         } else {
1451                                 clk_dvfs_node->enable_count = 0;
1452                                 DVFS_ERR("%s: vd(%s) can't get regulator(%s)!\n", 
1453                                         __func__, clk_dvfs_node->vd->name, clk_dvfs_node->vd->regulator_name);
1454                                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1455                                 return -ENXIO;
1456                         }
1457                 } else {
1458                         clk_enable_dvfs_regulator_check(clk_dvfs_node->vd);
1459                 }
1460                 
1461                 DVFS_DBG("%s: vd(%s) cur volt=%d\n",
1462                         __func__, clk_dvfs_node->name, clk_dvfs_node->vd->cur_volt);
1463
1464                 dvfs_table_round_clk_rate(clk_dvfs_node);
1465                 dvfs_get_rate_range(clk_dvfs_node);
1466                 clk_dvfs_node->freq_limit_en = 1;
1467                 if (clk_dvfs_node->lkg_adjust_volt_en)
1468                         adjust_table_by_leakage(clk_dvfs_node);
1469                 if (clk_dvfs_node->support_pvtm)
1470                         pvtm_set_dvfs_table(clk_dvfs_node);
1471                 dvfs_table_round_volt(clk_dvfs_node);
1472                 clk_dvfs_node->set_freq = clk_dvfs_node_get_rate_kz(clk_dvfs_node->clk);
1473                 clk_dvfs_node->last_set_rate = clk_dvfs_node->set_freq*1000;
1474                 
1475                 DVFS_DBG("%s: %s get freq %u!\n", 
1476                         __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1477
1478                 if (clk_dvfs_node_get_ref_volt(clk_dvfs_node, clk_dvfs_node->set_freq, &clk_fv)) {
1479                         if (clk_dvfs_node->dvfs_table[0].frequency == CPUFREQ_TABLE_END) {
1480                                 DVFS_ERR("%s: table empty\n", __func__);
1481                                 clk_dvfs_node->enable_count = 0;
1482                                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1483                                 return -EINVAL;
1484                         } else {
1485                                 DVFS_WARNING("%s: clk(%s) freq table all value are smaller than default(%d), use default, just enable dvfs\n", 
1486                                         __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1487                                 clk_dvfs_node->enable_count++;
1488                                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1489                                 return 0;
1490                         }
1491                 }
1492                 clk_dvfs_node->enable_count++;
1493                 clk_dvfs_node->set_volt = clk_fv.index;
1494                 volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1495                 DVFS_DBG("%s: %s, freq %u(ref vol %u)\n",
1496                         __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt);
1497 #if 0
1498                 if (clk_dvfs_node->dvfs_nb) {
1499                         // must unregister when clk disable
1500                         clk_notifier_register(clk, clk_dvfs_node->dvfs_nb);
1501                 }
1502 #endif
1503                 if(clk_dvfs_node->vd->cur_volt != volt_new) {
1504                         ret = dvfs_regulator_set_voltage_readback(clk_dvfs_node->vd->regulator, volt_new, volt_new);
1505                         dvfs_volt_up_delay(clk_dvfs_node->vd,volt_new, clk_dvfs_node->vd->cur_volt);
1506                         if (ret < 0) {
1507                                 clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
1508                                 clk_dvfs_node->enable_count = 0;
1509                                 DVFS_ERR("dvfs enable clk %s,set volt error \n", clk_dvfs_node->name);
1510                                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1511                                 return -EAGAIN;
1512                         }
1513                         clk_dvfs_node->vd->cur_volt = volt_new;
1514                         clk_dvfs_node->vd->volt_set_flag = DVFS_SET_VOLT_SUCCESS;
1515                 }
1516
1517         } else {
1518                 DVFS_DBG("%s: dvfs already enable clk enable = %d!\n",
1519                         __func__, clk_dvfs_node->enable_count);
1520                 clk_dvfs_node->enable_count++;
1521         }
1522
1523         if (clk_dvfs_node->regu_mode_en) {
1524                 ret = dvfs_regu_mode_table_constrain(clk_dvfs_node);
1525                 if (ret) {
1526                         DVFS_ERR("%s: clk(%s) regu_mode_table is unvalid, set regu_mode_en=0!\n",
1527                                         __func__, clk_dvfs_node->name);
1528                         clk_dvfs_node->regu_mode_en = 0;
1529                         mutex_unlock(&clk_dvfs_node->vd->mutex);
1530                         return ret;
1531                 }
1532
1533                 ret = clk_dvfs_node_get_regu_mode(clk_dvfs_node, clk_dvfs_node->set_freq*1000, &mode);
1534                 if (ret < 0) {
1535                         DVFS_ERR("%s: clk(%s) rate %dKhz get regu_mode fail\n",
1536                                         __func__, clk_dvfs_node->name, clk_dvfs_node->set_freq);
1537                         mutex_unlock(&clk_dvfs_node->vd->mutex);
1538                         return ret;
1539                 } else
1540                         clk_dvfs_node->regu_mode = mode;
1541
1542                 dvfs_update_clk_pds_mode(clk_dvfs_node);
1543         }
1544
1545         mutex_unlock(&clk_dvfs_node->vd->mutex);
1546         
1547         return 0;
1548 }
1549 EXPORT_SYMBOL(clk_enable_dvfs);
1550
1551 int clk_disable_dvfs(struct dvfs_node *clk_dvfs_node)
1552 {
1553         int volt_new;
1554
1555         if (!clk_dvfs_node)
1556                 return -EINVAL;
1557
1558         DVFS_DBG("%s:dvfs clk(%s) disable dvfs!\n", 
1559                 __func__, __clk_get_name(clk_dvfs_node->clk));
1560
1561         mutex_lock(&clk_dvfs_node->vd->mutex);
1562         if (!clk_dvfs_node->enable_count) {
1563                 DVFS_WARNING("%s:clk(%s) is already closed!\n", 
1564                         __func__, __clk_get_name(clk_dvfs_node->clk));
1565                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1566                 return 0;
1567         } else {
1568                 clk_dvfs_node->enable_count--;
1569                 if (0 == clk_dvfs_node->enable_count) {
1570                         DVFS_DBG("%s:dvfs clk(%s) disable dvfs ok!\n",
1571                                 __func__, __clk_get_name(clk_dvfs_node->clk));
1572                         volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1573                         dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1574
1575 #if 0
1576                         clk_notifier_unregister(clk, clk_dvfs_node->dvfs_nb);
1577                         DVFS_DBG("clk unregister nb!\n");
1578 #endif
1579                 }
1580         }
1581         mutex_unlock(&clk_dvfs_node->vd->mutex);
1582         return 0;
1583 }
1584 EXPORT_SYMBOL(clk_disable_dvfs);
1585
1586 static unsigned long dvfs_get_limit_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1587 {
1588         unsigned long limit_rate;
1589
1590         limit_rate = rate;
1591         if (clk_dvfs_node->freq_limit_en) {
1592                 //dvfs table limit
1593                 if (rate < clk_dvfs_node->min_rate) {
1594                         limit_rate = clk_dvfs_node->min_rate;
1595                 } else if (rate > clk_dvfs_node->max_rate) {
1596                         limit_rate = clk_dvfs_node->max_rate;
1597                 }
1598                 if (clk_dvfs_node->temp_limit_enable) {
1599                         if (limit_rate > clk_dvfs_node->temp_limit_rate) {
1600                                 limit_rate = clk_dvfs_node->temp_limit_rate;
1601                         }
1602                 }
1603         }
1604
1605         DVFS_DBG("%s: rate:%ld, limit_rate:%ld,\n", __func__, rate, limit_rate);
1606
1607         return limit_rate;
1608 }
1609
1610 static int dvfs_target(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1611 {
1612         struct cpufreq_frequency_table clk_fv;
1613         unsigned long old_rate = 0, new_rate = 0, volt_new = 0, clk_volt_store = 0;
1614         struct clk *clk = clk_dvfs_node->clk;
1615         int ret;
1616
1617         if (!clk)
1618                 return -EINVAL;
1619
1620         if (!clk_dvfs_node->enable_count)
1621                 return 0;
1622         
1623         if (clk_dvfs_node->vd->volt_set_flag == DVFS_SET_VOLT_FAILURE) {
1624                 /* It means the last time set voltage error */
1625                 ret = dvfs_reset_volt(clk_dvfs_node->vd);
1626                 if (ret < 0) {
1627                         return -EAGAIN;
1628                 }
1629         }
1630
1631         rate = dvfs_get_limit_rate(clk_dvfs_node, rate);
1632         new_rate = __clk_round_rate(clk, rate);
1633         old_rate = __clk_get_rate(clk);
1634         if (new_rate == old_rate)
1635                 return 0;
1636
1637         DVFS_DBG("enter %s: clk(%s) new_rate = %lu Hz, old_rate =  %lu Hz\n", 
1638                 __func__, clk_dvfs_node->name, rate, old_rate); 
1639
1640         /* find the clk corresponding voltage */
1641         ret = clk_dvfs_node_get_ref_volt(clk_dvfs_node, new_rate / 1000, &clk_fv);
1642         if (ret) {
1643                 DVFS_ERR("%s:dvfs clk(%s) rate %luhz is not support\n",
1644                         __func__, clk_dvfs_node->name, new_rate);
1645                 return ret;
1646         }
1647         clk_volt_store = clk_dvfs_node->set_volt;
1648         clk_dvfs_node->set_volt = clk_fv.index;
1649         volt_new = dvfs_vd_get_newvolt_byclk(clk_dvfs_node);
1650         DVFS_DBG("%s:%s new rate=%lu(was=%lu),new volt=%lu,(was=%d)\n",
1651                 __func__, clk_dvfs_node->name, new_rate, old_rate, volt_new,clk_dvfs_node->vd->cur_volt);
1652
1653
1654         /* if up the rate */
1655         if (new_rate > old_rate) {
1656                 ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
1657                 if (ret)
1658                         DVFS_ERR("%s: dvfs clk(%s) rate %luhz set mode err\n",
1659                                 __func__, clk_dvfs_node->name, new_rate);
1660
1661                 ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1662                 if (ret)
1663                         goto fail_roll_back;
1664         }
1665
1666         /* scale rate */
1667         if (clk_dvfs_node->clk_dvfs_target) {
1668                 ret = clk_dvfs_node->clk_dvfs_target(clk, rate);
1669         } else {
1670                 ret = clk_set_rate(clk, rate);
1671         }
1672
1673         if (ret) {
1674                 DVFS_ERR("%s:clk(%s) set rate err\n", 
1675                         __func__, __clk_get_name(clk));
1676                 goto fail_roll_back;
1677         }
1678         clk_dvfs_node->set_freq = new_rate / 1000;
1679
1680         DVFS_DBG("%s:dvfs clk(%s) set rate %lu ok\n", 
1681                 __func__, clk_dvfs_node->name, __clk_get_rate(clk));
1682
1683         /* if down the rate */
1684         if (new_rate < old_rate) {
1685                 ret = dvfs_scale_volt_direct(clk_dvfs_node->vd, volt_new);
1686                 if (ret)
1687                         goto out;
1688
1689                 ret = dvfs_regu_mode_target(clk_dvfs_node, new_rate);
1690                 if (ret)
1691                         DVFS_ERR("%s:dvfs clk(%s) rate %luhz set mode err\n",
1692                         __func__, clk_dvfs_node->name, new_rate);
1693         }
1694
1695         return 0;
1696 fail_roll_back:
1697         clk_dvfs_node->set_volt = clk_volt_store;
1698 out:
1699         return ret;
1700 }
1701
1702 unsigned long dvfs_clk_round_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1703 {
1704         return __clk_round_rate(clk_dvfs_node->clk, rate);
1705 }
1706 EXPORT_SYMBOL_GPL(dvfs_clk_round_rate);
1707
1708 unsigned long dvfs_clk_get_rate(struct dvfs_node *clk_dvfs_node)
1709 {
1710         return __clk_get_rate(clk_dvfs_node->clk);
1711 }
1712 EXPORT_SYMBOL_GPL(dvfs_clk_get_rate);
1713
1714 unsigned long dvfs_clk_get_last_set_rate(struct dvfs_node *clk_dvfs_node)
1715 {
1716         unsigned long last_set_rate;
1717
1718         mutex_lock(&clk_dvfs_node->vd->mutex);
1719         last_set_rate = clk_dvfs_node->last_set_rate;
1720         mutex_unlock(&clk_dvfs_node->vd->mutex);
1721
1722         return last_set_rate;
1723 }
1724 EXPORT_SYMBOL_GPL(dvfs_clk_get_last_set_rate);
1725
1726
1727 int dvfs_clk_enable(struct dvfs_node *clk_dvfs_node)
1728 {
1729         return clk_enable(clk_dvfs_node->clk);
1730 }
1731 EXPORT_SYMBOL_GPL(dvfs_clk_enable);
1732
1733 void dvfs_clk_disable(struct dvfs_node *clk_dvfs_node)
1734 {
1735         return clk_disable(clk_dvfs_node->clk);
1736 }
1737 EXPORT_SYMBOL_GPL(dvfs_clk_disable);
1738
1739 struct dvfs_node *clk_get_dvfs_node(char *clk_name)
1740 {
1741         struct vd_node *vd;
1742         struct pd_node *pd;
1743         struct dvfs_node *clk_dvfs_node;
1744
1745         mutex_lock(&rk_dvfs_mutex);
1746         list_for_each_entry(vd, &rk_dvfs_tree, node) {
1747                 mutex_lock(&vd->mutex);
1748                 list_for_each_entry(pd, &vd->pd_list, node) {
1749                         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
1750                                 if (0 == strcmp(clk_dvfs_node->name, clk_name)) {
1751                                         mutex_unlock(&vd->mutex);
1752                                         mutex_unlock(&rk_dvfs_mutex);
1753                                         return clk_dvfs_node;
1754                                 }
1755                         }
1756                 }
1757                 mutex_unlock(&vd->mutex);
1758         }
1759         mutex_unlock(&rk_dvfs_mutex);
1760         
1761         return NULL;    
1762 }
1763 EXPORT_SYMBOL_GPL(clk_get_dvfs_node);
1764
1765 void clk_put_dvfs_node(struct dvfs_node *clk_dvfs_node)
1766 {
1767         return;
1768 }
1769 EXPORT_SYMBOL_GPL(clk_put_dvfs_node);
1770
1771 int dvfs_clk_prepare_enable(struct dvfs_node *clk_dvfs_node)
1772 {
1773         return clk_prepare_enable(clk_dvfs_node->clk);
1774 }
1775 EXPORT_SYMBOL_GPL(dvfs_clk_prepare_enable);
1776
1777
1778 void dvfs_clk_disable_unprepare(struct dvfs_node *clk_dvfs_node)
1779 {
1780         clk_disable_unprepare(clk_dvfs_node->clk);
1781 }
1782 EXPORT_SYMBOL_GPL(dvfs_clk_disable_unprepare);
1783
1784 int dvfs_clk_set_rate(struct dvfs_node *clk_dvfs_node, unsigned long rate)
1785 {
1786         int ret = -EINVAL;
1787         
1788         if (!clk_dvfs_node)
1789                 return -EINVAL;
1790         
1791         DVFS_DBG("%s:dvfs node(%s) set rate(%lu)\n", 
1792                 __func__, clk_dvfs_node->name, rate);
1793         
1794         #if 0 // judge by reference func in rk
1795         if (dvfs_support_clk_set_rate(dvfs_info)==false) {
1796                 DVFS_ERR("dvfs func:%s is not support!\n", __func__);
1797                 return ret;
1798         }
1799         #endif
1800
1801         if (clk_dvfs_node->vd && clk_dvfs_node->vd->vd_dvfs_target) {
1802                 mutex_lock(&clk_dvfs_node->vd->mutex);
1803                 ret = clk_dvfs_node->vd->vd_dvfs_target(clk_dvfs_node, rate);
1804                 clk_dvfs_node->last_set_rate = rate;
1805                 mutex_unlock(&clk_dvfs_node->vd->mutex);
1806         } else {
1807                 DVFS_ERR("%s:dvfs node(%s) has no vd node or target callback!\n", 
1808                         __func__, clk_dvfs_node->name);
1809         }
1810                 
1811         return ret;     
1812 }
1813 EXPORT_SYMBOL_GPL(dvfs_clk_set_rate);
1814
1815
1816 int rk_regist_vd(struct vd_node *vd)
1817 {
1818         if (!vd)
1819                 return -EINVAL;
1820
1821         vd->mode_flag=0;
1822         vd->volt_time_flag=0;
1823         vd->n_voltages=0;
1824         INIT_LIST_HEAD(&vd->pd_list);
1825         mutex_lock(&rk_dvfs_mutex);
1826         list_add(&vd->node, &rk_dvfs_tree);
1827         mutex_unlock(&rk_dvfs_mutex);
1828
1829         return 0;
1830 }
1831 EXPORT_SYMBOL_GPL(rk_regist_vd);
1832
1833 int rk_regist_pd(struct pd_node *pd)
1834 {
1835         struct vd_node  *vd;
1836
1837         if (!pd)
1838                 return -EINVAL;
1839
1840         vd = pd->vd;
1841         if (!vd)
1842                 return -EINVAL;
1843
1844         INIT_LIST_HEAD(&pd->clk_list);
1845         mutex_lock(&vd->mutex);
1846         list_add(&pd->node, &vd->pd_list);
1847         mutex_unlock(&vd->mutex);
1848         
1849         return 0;
1850 }
1851 EXPORT_SYMBOL_GPL(rk_regist_pd);
1852
1853 int rk_regist_clk(struct dvfs_node *clk_dvfs_node)
1854 {
1855         struct vd_node  *vd;
1856         struct pd_node  *pd;
1857
1858         if (!clk_dvfs_node)
1859                 return -EINVAL;
1860
1861         vd = clk_dvfs_node->vd;
1862         pd = clk_dvfs_node->pd;
1863         if (!vd || !pd)
1864                 return -EINVAL;
1865
1866         mutex_lock(&vd->mutex);
1867         list_add(&clk_dvfs_node->node, &pd->clk_list);
1868         mutex_unlock(&vd->mutex);
1869         
1870         return 0;
1871 }
1872 EXPORT_SYMBOL_GPL(rk_regist_clk);
1873
1874 static struct cpufreq_frequency_table *of_get_temp_limit_table(struct device_node *dev_node, const char *propname)
1875 {
1876         struct cpufreq_frequency_table *temp_limt_table = NULL;
1877         const struct property *prop;
1878         const __be32 *val;
1879         int nr, i;
1880
1881         prop = of_find_property(dev_node, propname, NULL);
1882         if (!prop)
1883                 return NULL;
1884         if (!prop->value)
1885                 return NULL;
1886
1887         nr = prop->length / sizeof(u32);
1888         if (nr % 2) {
1889                 pr_err("%s: Invalid freq list\n", __func__);
1890                 return NULL;
1891         }
1892
1893         temp_limt_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
1894                              (nr/2 + 1), GFP_KERNEL);
1895
1896         val = prop->value;
1897
1898         for (i=0; i<nr/2; i++){
1899                 temp_limt_table[i].index = be32_to_cpup(val++);
1900                 temp_limt_table[i].frequency = be32_to_cpup(val++) * 1000;
1901         }
1902
1903         temp_limt_table[i].index = 0;
1904         temp_limt_table[i].frequency = CPUFREQ_TABLE_END;
1905
1906         return temp_limt_table;
1907
1908 }
1909
1910 static int of_get_dvfs_table(struct device_node *dev_node,
1911                              struct cpufreq_frequency_table **dvfs_table)
1912 {
1913         struct cpufreq_frequency_table *tmp_dvfs_table = NULL;
1914         const struct property *prop;
1915         const __be32 *val;
1916         int nr, i;
1917
1918         prop = of_find_property(dev_node, "operating-points", NULL);
1919         if (!prop)
1920                 return -EINVAL;
1921         if (!prop->value)
1922                 return -EINVAL;
1923
1924         nr = prop->length / sizeof(u32);
1925         if (nr % 2) {
1926                 pr_err("%s: Invalid freq list\n", __func__);
1927                 return -EINVAL;
1928         }
1929
1930         tmp_dvfs_table = kzalloc(sizeof(*tmp_dvfs_table) *
1931                              (nr/2 + 1), GFP_KERNEL);
1932         val = prop->value;
1933
1934         for (i = 0; i < nr/2; i++) {
1935                 tmp_dvfs_table[i].frequency = be32_to_cpup(val++);
1936                 tmp_dvfs_table[i].index = be32_to_cpup(val++);
1937         }
1938
1939         tmp_dvfs_table[i].index = 0;
1940         tmp_dvfs_table[i].frequency = CPUFREQ_TABLE_END;
1941
1942         *dvfs_table = tmp_dvfs_table;
1943
1944         return 0;
1945 }
1946
1947
1948 static int of_get_dvfs_pvtm_table(struct device_node *dev_node,
1949                                   struct cpufreq_frequency_table **dvfs_table,
1950                                   struct cpufreq_frequency_table **pvtm_table)
1951 {
1952         struct cpufreq_frequency_table *tmp_dvfs_table = NULL;
1953         struct cpufreq_frequency_table *tmp_pvtm_table = NULL;
1954         const struct property *prop;
1955         const __be32 *val;
1956         int nr, i;
1957
1958         prop = of_find_property(dev_node, "pvtm-operating-points", NULL);
1959         if (!prop)
1960                 return -EINVAL;
1961         if (!prop->value)
1962                 return -EINVAL;
1963
1964         nr = prop->length / sizeof(u32);
1965         if (nr % 3) {
1966                 pr_err("%s: Invalid freq list\n", __func__);
1967                 return -EINVAL;
1968         }
1969
1970         tmp_dvfs_table = kzalloc(sizeof(*tmp_dvfs_table) *
1971                              (nr/3 + 1), GFP_KERNEL);
1972
1973         tmp_pvtm_table = kzalloc(sizeof(*tmp_pvtm_table) *
1974                              (nr/3 + 1), GFP_KERNEL);
1975
1976         val = prop->value;
1977
1978         for (i = 0; i < nr/3; i++) {
1979                 tmp_dvfs_table[i].frequency = be32_to_cpup(val++);
1980                 tmp_dvfs_table[i].index = be32_to_cpup(val++);
1981
1982                 tmp_pvtm_table[i].frequency = tmp_dvfs_table[i].frequency;
1983                 tmp_pvtm_table[i].index = be32_to_cpup(val++);
1984         }
1985
1986         tmp_dvfs_table[i].index = 0;
1987         tmp_dvfs_table[i].frequency = CPUFREQ_TABLE_END;
1988
1989         tmp_pvtm_table[i].index = 0;
1990         tmp_pvtm_table[i].frequency = CPUFREQ_TABLE_END;
1991
1992         *dvfs_table = tmp_dvfs_table;
1993         *pvtm_table = tmp_pvtm_table;
1994
1995         return 0;
1996 }
1997
1998 static struct lkg_adjust_volt_table
1999         *of_get_lkg_adjust_volt_table(struct device_node *np,
2000         const char *propname)
2001 {
2002         struct lkg_adjust_volt_table *lkg_adjust_volt_table = NULL;
2003         const struct property *prop;
2004         const __be32 *val;
2005         int nr, i;
2006
2007         prop = of_find_property(np, propname, NULL);
2008         if (!prop)
2009                 return NULL;
2010         if (!prop->value)
2011                 return NULL;
2012
2013         nr = prop->length / sizeof(s32);
2014         if (nr % 2) {
2015                 pr_err("%s: Invalid freq list\n", __func__);
2016                 return NULL;
2017         }
2018
2019         lkg_adjust_volt_table =
2020                 kzalloc(sizeof(struct lkg_adjust_volt_table) *
2021                 (nr/2 + 1), GFP_KERNEL);
2022
2023         val = prop->value;
2024
2025         for (i = 0; i < nr/2; i++) {
2026                 lkg_adjust_volt_table[i].lkg = be32_to_cpup(val++);
2027                 lkg_adjust_volt_table[i].dlt_volt = be32_to_cpup(val++);
2028         }
2029
2030         lkg_adjust_volt_table[i].lkg = 0;
2031         lkg_adjust_volt_table[i].dlt_volt = CPUFREQ_TABLE_END;
2032
2033         return lkg_adjust_volt_table;
2034 }
2035
2036 static int dvfs_node_parse_dt(struct device_node *np,
2037                               struct dvfs_node *dvfs_node)
2038 {
2039         int process_version = rockchip_process_version();
2040         int i = 0;
2041         int ret;
2042
2043         of_property_read_u32_index(np, "channel", 0, &dvfs_node->channel);
2044
2045         pr_info("channel:%d, lkg:%d\n",
2046                 dvfs_node->channel, rockchip_get_leakage(dvfs_node->channel));
2047
2048         of_property_read_u32_index(np, "regu-mode-en", 0,
2049                                    &dvfs_node->regu_mode_en);
2050         if (dvfs_node->regu_mode_en)
2051                 dvfs_node->regu_mode_table = of_get_regu_mode_table(np);
2052         else
2053                 dvfs_node->regu_mode_table = NULL;
2054
2055         of_property_read_u32_index(np, "temp-limit-enable", 0,
2056                                    &dvfs_node->temp_limit_enable);
2057         if (dvfs_node->temp_limit_enable) {
2058                 of_property_read_u32_index(np, "min_temp_limit",
2059                                            0, &dvfs_node->min_temp_limit);
2060                 of_property_read_u32_index(np, "target-temp",
2061                                            0, &dvfs_node->target_temp);
2062                 pr_info("target-temp:%d\n", dvfs_node->target_temp);
2063                 dvfs_node->nor_temp_limit_table =
2064                         of_get_temp_limit_table(np,
2065                                                 "normal-temp-limit");
2066                 dvfs_node->per_temp_limit_table =
2067                         of_get_temp_limit_table(np,
2068                                                 "performance-temp-limit");
2069                 dvfs_node->virt_temp_limit_table[0] =
2070                         of_get_temp_limit_table(np,
2071                                                 "virt-temp-limit-1-cpu-busy");
2072                 dvfs_node->virt_temp_limit_table[1] =
2073                         of_get_temp_limit_table(np,
2074                                                 "virt-temp-limit-2-cpu-busy");
2075                 dvfs_node->virt_temp_limit_table[2] =
2076                         of_get_temp_limit_table(np,
2077                                                 "virt-temp-limit-3-cpu-busy");
2078                 dvfs_node->virt_temp_limit_table[3] =
2079                         of_get_temp_limit_table(np,
2080                                                 "virt-temp-limit-4-cpu-busy");
2081         }
2082         dvfs_node->temp_limit_rate = -1;
2083
2084         ret = of_property_read_u32_index(np, "support-pvtm", 0,
2085                                          &dvfs_node->support_pvtm);
2086         if (!ret) {
2087                 if (of_get_dvfs_pvtm_table(np, &dvfs_node->dvfs_table,
2088                                            &dvfs_node->pvtm_table))
2089                         return -EINVAL;
2090
2091                 for (i = 0; i < ARRAY_SIZE(pvtm_info_table); i++) {
2092                         struct pvtm_info *pvtm_info = pvtm_info_table[i];
2093
2094                         if ((pvtm_info->channel == dvfs_node->channel) &&
2095                             (pvtm_info->process_version == process_version) &&
2096                              of_machine_is_compatible(pvtm_info->compatible)) {
2097                                 dvfs_node->pvtm_info = pvtm_info;
2098                                 break;
2099                         }
2100                 }
2101
2102                 if (!dvfs_node->pvtm_info)
2103                         dvfs_node->support_pvtm = 0;
2104         } else {
2105                 if (of_get_dvfs_table(np, &dvfs_node->dvfs_table))
2106                         return -EINVAL;
2107         }
2108
2109         of_property_read_u32_index(np, "lkg_adjust_volt_en", 0,
2110                                    &dvfs_node->lkg_adjust_volt_en);
2111         if (dvfs_node->lkg_adjust_volt_en) {
2112                 dvfs_node->lkg_info.def_table_lkg = -1;
2113                 of_property_read_u32_index(np, "def_table_lkg", 0,
2114                                            &dvfs_node->lkg_info.def_table_lkg);
2115
2116                 dvfs_node->lkg_info.min_adjust_freq = -1;
2117                 of_property_read_u32_index(np, "min_adjust_freq", 0,
2118                                            &dvfs_node->lkg_info.min_adjust_freq
2119                                            );
2120
2121                 dvfs_node->lkg_info.table =
2122                         of_get_lkg_adjust_volt_table(np,
2123                                                      "lkg_adjust_volt_table");
2124         }
2125
2126         return 0;
2127 }
2128
2129 int of_dvfs_init(void)
2130 {
2131         struct vd_node *vd;
2132         struct pd_node *pd;
2133         struct device_node *dvfs_dev_node, *clk_dev_node, *vd_dev_node, *pd_dev_node;
2134         struct dvfs_node *dvfs_node;
2135         struct clk *clk;
2136         int ret;
2137
2138         DVFS_DBG("%s\n", __func__);
2139         pr_info("process version: %d\n", rockchip_process_version());
2140
2141         dvfs_dev_node = of_find_node_by_name(NULL, "dvfs");
2142         if (IS_ERR_OR_NULL(dvfs_dev_node)) {
2143                 DVFS_ERR("%s get dvfs dev node err\n", __func__);
2144                 return PTR_ERR(dvfs_dev_node);
2145         }
2146
2147         for_each_available_child_of_node(dvfs_dev_node, vd_dev_node) {
2148                 vd = kzalloc(sizeof(struct vd_node), GFP_KERNEL);
2149                 if (!vd)
2150                         return -ENOMEM;
2151
2152                 mutex_init(&vd->mutex);
2153                 vd->name = vd_dev_node->name;
2154                 ret = of_property_read_string(vd_dev_node, "regulator_name", &vd->regulator_name);
2155                 if (ret) {
2156                         DVFS_ERR("%s:vd(%s) get regulator_name err, ret:%d\n", 
2157                                 __func__, vd_dev_node->name, ret);
2158                         kfree(vd);
2159                         continue;
2160                 }
2161                 
2162                 vd->suspend_volt = 0;
2163                 
2164                 vd->volt_set_flag = DVFS_SET_VOLT_FAILURE;
2165                 vd->vd_dvfs_target = dvfs_target;
2166                 ret = rk_regist_vd(vd);
2167                 if (ret){
2168                         DVFS_ERR("%s:vd(%s) register err:%d\n", __func__, vd->name, ret);
2169                         kfree(vd);
2170                         continue;
2171                 }
2172
2173                 DVFS_DBG("%s:vd(%s) register ok, regulator name:%s,suspend volt:%d\n", 
2174                         __func__, vd->name, vd->regulator_name, vd->suspend_volt);
2175                 
2176                 for_each_available_child_of_node(vd_dev_node, pd_dev_node) {            
2177                         pd = kzalloc(sizeof(struct pd_node), GFP_KERNEL);
2178                         if (!pd)
2179                                 return -ENOMEM;
2180
2181                         pd->vd = vd;
2182                         pd->name = pd_dev_node->name;
2183                         
2184                         ret = rk_regist_pd(pd);
2185                         if (ret){
2186                                 DVFS_ERR("%s:pd(%s) register err:%d\n", __func__, pd->name, ret);
2187                                 kfree(pd);
2188                                 continue;
2189                         }
2190                         DVFS_DBG("%s:pd(%s) register ok, parent vd:%s\n", 
2191                                 __func__, pd->name, vd->name);                  
2192                         for_each_available_child_of_node(pd_dev_node, clk_dev_node) {
2193                                 if (!of_device_is_available(clk_dev_node))
2194                                         continue;
2195                                 
2196                                 dvfs_node = kzalloc(sizeof(struct dvfs_node), GFP_KERNEL);
2197                                 if (!dvfs_node)
2198                                         return -ENOMEM;
2199                                 
2200                                 dvfs_node->name = clk_dev_node->name;
2201                                 dvfs_node->pd = pd;
2202                                 dvfs_node->vd = vd;
2203
2204                                 if (dvfs_node_parse_dt(clk_dev_node, dvfs_node))
2205                                         continue;
2206                                 
2207                                 clk = clk_get(NULL, clk_dev_node->name);
2208                                 if (IS_ERR(clk)){
2209                                         DVFS_ERR("%s:get clk(%s) err:%ld\n", __func__, dvfs_node->name, PTR_ERR(clk));
2210                                         kfree(dvfs_node);
2211                                         continue;
2212                                         
2213                                 }
2214                                 
2215                                 dvfs_node->clk = clk;
2216                                 ret = rk_regist_clk(dvfs_node);
2217                                 if (ret){
2218                                         DVFS_ERR("%s:dvfs_node(%s) register err:%d\n", __func__, dvfs_node->name, ret);
2219                                         return ret;
2220                                 }
2221
2222                                 DVFS_DBG("%s:dvfs_node(%s) register ok, parent pd:%s\n", 
2223                                         __func__, clk_dev_node->name, pd->name);        
2224
2225                         }
2226                 }       
2227         }
2228         return 0;
2229 }
2230
2231 #ifdef CONFIG_ARM64
2232 arch_initcall_sync(of_dvfs_init);
2233 #endif
2234
2235 /*********************************************************************************/
2236 /**
2237  * dump_dbg_map() : Draw all informations of dvfs while debug
2238  */
2239 static int dump_dbg_map(char *buf)
2240 {
2241         int i;
2242         struct vd_node  *vd;
2243         struct pd_node  *pd;
2244         struct dvfs_node        *clk_dvfs_node;
2245         char *s = buf;
2246         
2247         mutex_lock(&rk_dvfs_mutex);
2248         printk( "-------------DVFS TREE-----------\n\n\n");
2249         printk( "DVFS TREE:\n");
2250
2251         list_for_each_entry(vd, &rk_dvfs_tree, node) {
2252                 mutex_lock(&vd->mutex);
2253                 printk( "|\n|- voltage domain:%s\n", vd->name);
2254                 printk( "|- current voltage:%d\n", vd->cur_volt);
2255                 printk( "|- current regu_mode:%s\n", dvfs_regu_mode_to_string(vd->regu_mode));
2256
2257                 list_for_each_entry(pd, &vd->pd_list, node) {
2258                         printk( "|  |\n|  |- power domain:%s, status = %s, current volt = %d, current regu_mode = %s\n",
2259                                         pd->name, (pd->pd_status == 1) ? "ON" : "OFF", pd->cur_volt,
2260                                         dvfs_regu_mode_to_string(pd->regu_mode));
2261
2262                         list_for_each_entry(clk_dvfs_node, &pd->clk_list, node) {
2263                                 printk( "|  |  |\n|  |  |- clock: %s current: rate %d, volt = %d,"
2264                                                 " enable_dvfs = %s\n",
2265                                                 clk_dvfs_node->name, clk_dvfs_node->set_freq, clk_dvfs_node->set_volt,
2266                                                 clk_dvfs_node->enable_count == 0 ? "DISABLE" : "ENABLE");
2267                                 printk( "|  |  |- clk limit(%s):[%u, %u]; last set rate = %lu\n",
2268                                                 clk_dvfs_node->freq_limit_en ? "enable" : "disable",
2269                                                 clk_dvfs_node->min_rate, clk_dvfs_node->max_rate,
2270                                                 clk_dvfs_node->last_set_rate/1000);
2271                                 for (i = 0; (clk_dvfs_node->dvfs_table[i].frequency != CPUFREQ_TABLE_END); i++) {
2272                                         printk( "|  |  |  |- freq = %d, volt = %d\n",
2273                                                         clk_dvfs_node->dvfs_table[i].frequency,
2274                                                         clk_dvfs_node->dvfs_table[i].index);
2275
2276                                 }
2277                                 printk( "|  |  |- clock: %s current: rate %d, regu_mode = %s,"
2278                                                 " regu_mode_en = %d\n",
2279                                                 clk_dvfs_node->name, clk_dvfs_node->set_freq,
2280                                                 dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode),
2281                                                 clk_dvfs_node->regu_mode_en);
2282                                 if (clk_dvfs_node->regu_mode_table) {
2283                                         for (i = 0; (clk_dvfs_node->regu_mode_table[i].frequency != CPUFREQ_TABLE_END); i++) {
2284                                                 printk( "|  |  |  |- freq = %d, regu_mode = %s\n",
2285                                                                 clk_dvfs_node->regu_mode_table[i].frequency/1000,
2286                                                                 dvfs_regu_mode_to_string(clk_dvfs_node->regu_mode_table[i].index));
2287                                         }
2288                                 }
2289                         }
2290                 }
2291                 mutex_unlock(&vd->mutex);
2292         }
2293         
2294         printk( "-------------DVFS TREE END------------\n");
2295         mutex_unlock(&rk_dvfs_mutex);
2296         
2297         return s - buf;
2298 }
2299
2300 /*********************************************************************************/
2301 static struct kobject *dvfs_kobj;
2302 struct dvfs_attribute {
2303         struct attribute        attr;
2304         ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
2305                         char *buf);
2306         ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
2307                         const char *buf, size_t n);
2308 };
2309
2310 static ssize_t dvfs_tree_store(struct kobject *kobj, struct kobj_attribute *attr,
2311                const char *buf, size_t n)
2312 {
2313        return n;
2314 }
2315 static ssize_t dvfs_tree_show(struct kobject *kobj, struct kobj_attribute *attr,
2316                char *buf)
2317 {
2318        return dump_dbg_map(buf);
2319 }
2320
2321
2322 static struct dvfs_attribute dvfs_attrs[] = {
2323         /*     node_name        permision               show_func       store_func */
2324 //#ifdef CONFIG_RK_CLOCK_PROC
2325         __ATTR(dvfs_tree,       S_IRUSR | S_IRGRP | S_IWUSR,    dvfs_tree_show, dvfs_tree_store),
2326 //#endif
2327 };
2328
2329 static int __init dvfs_init(void)
2330 {
2331         int i, ret = 0;
2332
2333         dvfs_kobj = kobject_create_and_add("dvfs", NULL);
2334         if (!dvfs_kobj)
2335                 return -ENOMEM;
2336         for (i = 0; i < ARRAY_SIZE(dvfs_attrs); i++) {
2337                 ret = sysfs_create_file(dvfs_kobj, &dvfs_attrs[i].attr);
2338                 if (ret != 0) {
2339                         DVFS_ERR("create index %d error\n", i);
2340                         return ret;
2341                 }
2342         }
2343
2344         clk_cpu_dvfs_node = clk_get_dvfs_node("clk_core");
2345         if (!clk_cpu_dvfs_node)
2346                 return -EINVAL;
2347         clk_cpu_dvfs_node->temp_limit_rate = clk_cpu_dvfs_node->max_rate;
2348
2349         clk_gpu_dvfs_node = clk_get_dvfs_node("clk_gpu");
2350         if (!clk_gpu_dvfs_node)
2351                 return -EINVAL;
2352         clk_gpu_dvfs_node->temp_limit_rate = clk_gpu_dvfs_node->max_rate;
2353
2354         if (clk_cpu_dvfs_node->temp_limit_enable ||
2355             clk_gpu_dvfs_node->temp_limit_enable) {
2356                 dvfs_wq = alloc_workqueue("dvfs", WQ_NON_REENTRANT | WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
2357                 queue_delayed_work_on(0, dvfs_wq, &dvfs_temp_limit_work, 0*HZ);
2358         }
2359
2360         vdd_gpu_regulator = dvfs_get_regulator("vdd_gpu");
2361         if (!IS_ERR_OR_NULL(vdd_gpu_regulator)) {
2362                 struct clk *clk = clk_get(NULL, "pd_gpu");
2363
2364                 if (clk)
2365                         rk_clk_pd_notifier_register(clk, &clk_pd_gpu_notifier);
2366
2367                 fb_register_client(&early_suspend_notifier);
2368                 register_reboot_notifier(&vdd_gpu_reboot_notifier);
2369         }
2370
2371         return ret;
2372 }
2373
2374 late_initcall(dvfs_init);