rk3288: change clk_vepu rate to 297M as default rate
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / rockchip-cpufreq.c
1 /*
2  * Copyright (C) 2013 ROCKCHIP, Inc.
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14 #define pr_fmt(fmt) "cpufreq: " fmt
15 #include <linux/clk.h>
16 #include <linux/cpufreq.h>
17 #include <linux/err.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/init.h>
20 #include <linux/reboot.h>
21 #include <linux/suspend.h>
22 #include <linux/tick.h>
23 #include <linux/workqueue.h>
24 #include <linux/delay.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/fs.h>
27 #include <linux/miscdevice.h>
28 #include <linux/string.h>
29 #include <linux/rockchip/cpu.h>
30 #include <linux/rockchip/dvfs.h>
31 #include <asm/smp_plat.h>
32 #include <asm/cpu.h>
33 #include <asm/unistd.h>
34 #include <asm/uaccess.h>
35
36 #define VERSION "1.0"
37
38 #ifdef DEBUG
39 #define FREQ_DBG(fmt, args...) pr_debug(fmt, ## args)
40 #define FREQ_LOG(fmt, args...) pr_debug(fmt, ## args)
41 #else
42 #define FREQ_DBG(fmt, args...) do {} while(0)
43 #define FREQ_LOG(fmt, args...) do {} while(0)
44 #endif
45 #define FREQ_ERR(fmt, args...) pr_err(fmt, ## args)
46
47 /* Frequency table index must be sequential starting at 0 */
48 static struct cpufreq_frequency_table default_freq_table[] = {
49         {.frequency = 312 * 1000,       .index = 875 * 1000},
50         {.frequency = 504 * 1000,       .index = 925 * 1000},
51         {.frequency = 816 * 1000,       .index = 975 * 1000},
52         {.frequency = 1008 * 1000,      .index = 1075 * 1000},
53         {.frequency = 1200 * 1000,      .index = 1150 * 1000},
54         {.frequency = 1416 * 1000,      .index = 1250 * 1000},
55         {.frequency = 1608 * 1000,      .index = 1350 * 1000},
56         {.frequency = CPUFREQ_TABLE_END},
57 };
58 static struct cpufreq_frequency_table *freq_table = default_freq_table;
59 /*********************************************************/
60 /* additional symantics for "relation" in cpufreq with pm */
61 #define DISABLE_FURTHER_CPUFREQ         0x10
62 #define ENABLE_FURTHER_CPUFREQ          0x20
63 #define MASK_FURTHER_CPUFREQ            0x30
64 /* With 0x00(NOCHANGE), it depends on the previous "further" status */
65 #define CPUFREQ_PRIVATE                 0x100
66 static int no_cpufreq_access;
67 static unsigned int suspend_freq = 816 * 1000;
68 static unsigned int suspend_volt = 1000000; // 1V
69 static unsigned int low_battery_freq = 600 * 1000;
70 static unsigned int low_battery_capacity = 5; // 5%
71 static bool is_booting = true;
72 static struct workqueue_struct *freq_wq;
73 static DEFINE_MUTEX(cpufreq_mutex);
74 static bool gpu_is_mali400;
75 struct dvfs_node *clk_cpu_dvfs_node = NULL;
76 struct dvfs_node *clk_gpu_dvfs_node = NULL;
77 struct dvfs_node *clk_vepu_dvfs_node = NULL;
78 /*******************************************************/
79 static unsigned int cpufreq_get_rate(unsigned int cpu)
80 {
81         if (clk_cpu_dvfs_node)
82                 return clk_get_rate(clk_cpu_dvfs_node->clk) / 1000;
83
84         return 0;
85 }
86
87 static bool cpufreq_is_ondemand(struct cpufreq_policy *policy)
88 {
89         char c = 0;
90         if (policy && policy->governor)
91                 c = policy->governor->name[0];
92         return (c == 'o' || c == 'i' || c == 'c' || c == 'h');
93 }
94
95 static unsigned int get_freq_from_table(unsigned int max_freq)
96 {
97         unsigned int i;
98         unsigned int target_freq = 0;
99         for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
100                 unsigned int freq = freq_table[i].frequency;
101                 if (freq <= max_freq && target_freq < freq) {
102                         target_freq = freq;
103                 }
104         }
105         if (!target_freq)
106                 target_freq = max_freq;
107         return target_freq;
108 }
109
110 /**********************thermal limit**************************/
111 //#define CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
112
113 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
114 static unsigned int temp_limit_freq = -1;
115 module_param(temp_limit_freq, uint, 0444);
116
117 static struct cpufreq_frequency_table temp_limits[4][4] = {
118         {       // 1 CPU busy
119                 {.frequency =          -1, .index = 50},
120                 {.frequency =          -1, .index = 55},
121                 {.frequency =          -1, .index = 60},
122                 {.frequency = 1608 * 1000, .index = 75},
123         }, {    // 2 CPUs busy
124                 {.frequency = 1800 * 1000, .index = 50},
125                 {.frequency = 1608 * 1000, .index = 55},
126                 {.frequency = 1416 * 1000, .index = 60},
127                 {.frequency = 1200 * 1000, .index = 75},
128         }, {    // 3 CPUs busy
129                 {.frequency = 1608 * 1000, .index = 50},
130                 {.frequency = 1416 * 1000, .index = 55},
131                 {.frequency = 1200 * 1000, .index = 60},
132                 {.frequency = 1008 * 1000, .index = 75},
133         }, {    // 4 CPUs busy
134                 {.frequency = 1416 * 1000, .index = 50},
135                 {.frequency = 1200 * 1000, .index = 55},
136                 {.frequency = 1008 * 1000, .index = 60},
137                 {.frequency =  816 * 1000, .index = 75},
138         }
139 };
140
141 static struct cpufreq_frequency_table temp_limits_cpu_perf[] = {
142         {.frequency = 1008 * 1000, .index = 100},
143 };
144
145 static struct cpufreq_frequency_table temp_limits_gpu_perf[] = {
146         {.frequency = 1008 * 1000, .index = 0},
147 };
148
149 static int get_temp(void)
150 {
151         return 60;
152 }
153
154 static char sys_state;
155 static ssize_t sys_state_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
156 {
157         char state;
158
159         if (count < 1)
160                 return count;
161         if (copy_from_user(&state, buffer, 1)) {
162                 return -EFAULT;
163         }
164
165         sys_state = state;
166         return count;
167 }
168
169 static const struct file_operations sys_state_fops = {
170         .owner  = THIS_MODULE,
171         .write  = sys_state_write,
172 };
173
174 static struct miscdevice sys_state_dev = {
175         .fops   = &sys_state_fops,
176         .name   = "sys_state",
177         .minor  = MISC_DYNAMIC_MINOR,
178 };
179
180 static void cpufreq_temp_limit_work_func(struct work_struct *work)
181 {
182         static bool in_perf = false;
183         struct cpufreq_policy *policy;
184         int temp, i;
185         unsigned int new_freq = -1;
186         unsigned long delay = HZ / 10; // 100ms
187         unsigned int nr_cpus = num_online_cpus();
188         const struct cpufreq_frequency_table *limits_table = temp_limits[nr_cpus - 1];
189         size_t limits_size = ARRAY_SIZE(temp_limits[nr_cpus - 1]);
190
191         temp = get_temp();
192
193         if (sys_state == '1') {
194                 in_perf = true;
195                 if (gpu_is_mali400) {
196                         unsigned int gpu_irqs[2];
197                         gpu_irqs[0] = kstat_irqs(IRQ_GPU_GP);
198                         msleep(40);
199                         gpu_irqs[1] = kstat_irqs(IRQ_GPU_GP);
200                         delay = 0;
201                         if ((gpu_irqs[1] - gpu_irqs[0]) < 8) {
202                                 limits_table = temp_limits_cpu_perf;
203                                 limits_size = ARRAY_SIZE(temp_limits_cpu_perf);
204                         } else {
205                                 limits_table = temp_limits_gpu_perf;
206                                 limits_size = ARRAY_SIZE(temp_limits_gpu_perf);
207                         }
208                 } else {
209                         delay = HZ; // 1s
210                         limits_table = temp_limits_cpu_perf;
211                         limits_size = ARRAY_SIZE(temp_limits_cpu_perf);
212                 }
213         } else if (in_perf) {
214                 in_perf = false;
215         } else {
216                 static u64 last_time_in_idle = 0;
217                 static u64 last_time_in_idle_timestamp = 0;
218                 u64 time_in_idle = 0, now;
219                 u32 delta_idle;
220                 u32 delta_time;
221                 unsigned cpu;
222
223                 for_each_online_cpu(cpu) {
224                         time_in_idle += get_cpu_idle_time_us(cpu, &now);
225                 }
226                 delta_time = now - last_time_in_idle_timestamp;
227                 delta_idle = time_in_idle - last_time_in_idle;
228                 last_time_in_idle = time_in_idle;
229                 last_time_in_idle_timestamp = now;
230                 delta_idle += delta_time >> 4; // +6.25%
231                 if (delta_idle > (nr_cpus - 1) * delta_time && delta_idle < (nr_cpus + 1) * delta_time)
232                         limits_table = temp_limits[0];
233                 else if (delta_idle > (nr_cpus - 2) * delta_time)
234                         limits_table = temp_limits[1];
235                 else if (delta_idle > (nr_cpus - 3) * delta_time)
236                         limits_table = temp_limits[2];
237                 FREQ_DBG("delta time %6u us idle %6u us %u cpus select table %d\n", delta_time, delta_idle, nr_cpus, (limits_table - temp_limits[0]) / ARRAY_SIZE(temp_limits[0]));
238         }
239
240         for (i = 0; i < limits_size; i++) {
241                 if (temp >= limits_table[i].index) {
242                         new_freq = limits_table[i].frequency;
243                 }
244         }
245
246         if (temp_limit_freq != new_freq) {
247                 unsigned int cur_freq;
248                 temp_limit_freq = new_freq;
249                 cur_freq = cpufreq_get_rate(0);
250                 FREQ_DBG("temp limit %7d KHz cur %7d KHz\n", temp_limit_freq, cur_freq);
251                 if (cur_freq > temp_limit_freq) {
252                         policy = cpufreq_cpu_get(0);
253                         cpufreq_driver_target(policy, policy->cur, CPUFREQ_RELATION_L | CPUFREQ_PRIVATE);
254                         cpufreq_cpu_put(policy);
255                 }
256         }
257
258         queue_delayed_work_on(0, freq_wq, to_delayed_work(work), delay);
259 }
260
261 static DECLARE_DELAYED_WORK(cpufreq_temp_limit_work, cpufreq_temp_limit_work_func);
262
263 static int cpufreq_notifier_policy(struct notifier_block *nb, unsigned long val, void *data)
264 {
265         struct cpufreq_policy *policy = data;
266
267         if (val != CPUFREQ_NOTIFY)
268                 return 0;
269
270         if (cpufreq_is_ondemand(policy)) {
271                 FREQ_DBG("queue work\n");
272                 queue_delayed_work_on(0, freq_wq, &cpufreq_temp_limit_work, 0);
273         } else {
274                 FREQ_DBG("cancel work\n");
275                 cancel_delayed_work_sync(&cpufreq_temp_limit_work);
276         }
277
278         return 0;
279 }
280
281 static struct notifier_block notifier_policy_block = {
282         .notifier_call = cpufreq_notifier_policy
283 };
284
285 static void cpufreq_temp_limit_init(struct cpufreq_policy *policy)
286 {
287         unsigned int i;
288         struct cpufreq_frequency_table *table;
289
290         table = temp_limits[0];
291         for (i = 0; i < sizeof(temp_limits) / sizeof(struct cpufreq_frequency_table); i++) {
292                 table[i].frequency = get_freq_from_table(table[i].frequency);
293         }
294         table = temp_limits_cpu_perf;
295         for (i = 0; i < sizeof(temp_limits_cpu_perf) / sizeof(struct cpufreq_frequency_table); i++) {
296                 table[i].frequency = get_freq_from_table(table[i].frequency);
297         }
298         table = temp_limits_gpu_perf;
299         for (i = 0; i < sizeof(temp_limits_gpu_perf) / sizeof(struct cpufreq_frequency_table); i++) {
300                 table[i].frequency = get_freq_from_table(table[i].frequency);
301         }
302         misc_register(&sys_state_dev);
303         if (cpufreq_is_ondemand(policy)) {
304                 queue_delayed_work_on(0, freq_wq, &cpufreq_temp_limit_work, 0*HZ);
305         }
306         cpufreq_register_notifier(&notifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
307 }
308
309 static void cpufreq_temp_limit_exit(void)
310 {
311         cpufreq_unregister_notifier(&notifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
312         if (freq_wq)
313                 cancel_delayed_work(&cpufreq_temp_limit_work);
314 }
315 #else
316 static inline void cpufreq_temp_limit_init(struct cpufreq_policy *policy) {}
317 static inline void cpufreq_temp_limit_exit(void) {}
318 #endif
319
320 static int cpufreq_verify(struct cpufreq_policy *policy)
321 {
322         if (!freq_table)
323                 return -EINVAL;
324         return cpufreq_frequency_table_verify(policy, freq_table);
325 }
326
327 static int cpufreq_scale_rate_for_dvfs(struct clk *clk, unsigned long rate)
328 {
329         unsigned int i;
330         int ret;
331         struct cpufreq_freqs freqs;
332         struct cpufreq_policy *policy;
333         
334         freqs.new = rate / 1000;
335         freqs.old = clk_get_rate(clk) / 1000;
336         
337         for_each_online_cpu(freqs.cpu) {
338                 policy = cpufreq_cpu_get(freqs.cpu);
339                 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
340                 cpufreq_cpu_put(policy);
341         }
342         
343         FREQ_DBG("cpufreq_scale_rate_for_dvfs(%lu)\n", rate);
344         
345         ret = clk_set_rate(clk, rate);
346
347 #ifdef CONFIG_SMP
348         /*
349          * Note that loops_per_jiffy is not updated on SMP systems in
350          * cpufreq driver. So, update the per-CPU loops_per_jiffy value
351          * on frequency transition. We need to update all dependent CPUs.
352          */
353         for_each_possible_cpu(i) {
354                 per_cpu(cpu_data, i).loops_per_jiffy = loops_per_jiffy;
355         }
356 #endif
357
358         freqs.new = clk_get_rate(clk) / 1000;
359         /* notifiers */
360         for_each_online_cpu(freqs.cpu) {
361                 policy = cpufreq_cpu_get(freqs.cpu);
362                 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
363                 cpufreq_cpu_put(policy);
364         }
365
366         return ret;
367         
368 }
369
370 static int cpufreq_init_cpu0(struct cpufreq_policy *policy)
371 {
372         unsigned int i;
373         gpu_is_mali400 = cpu_is_rk3188();
374
375         clk_gpu_dvfs_node = clk_get_dvfs_node("clk_gpu");
376         if (clk_gpu_dvfs_node){
377                 clk_enable_dvfs(clk_gpu_dvfs_node);
378                 if (gpu_is_mali400)
379                         dvfs_clk_enable_limit(clk_gpu_dvfs_node, 133000000, 600000000); 
380         }
381
382         clk_vepu_dvfs_node = clk_get_dvfs_node("clk_vepu");
383         if (clk_vepu_dvfs_node){
384                 clk_enable_dvfs(clk_vepu_dvfs_node);
385                 dvfs_clk_set_rate(clk_vepu_dvfs_node, 198000000);
386                 dvfs_clk_set_rate(clk_vepu_dvfs_node, 297000000);
387         }
388
389         clk_cpu_dvfs_node = clk_get_dvfs_node("clk_core");
390         if (!clk_cpu_dvfs_node){
391                 return -EINVAL;
392         }
393         dvfs_clk_register_set_rate_callback(clk_cpu_dvfs_node, cpufreq_scale_rate_for_dvfs);
394         freq_table = dvfs_get_freq_volt_table(clk_cpu_dvfs_node);
395         if (freq_table == NULL) {
396                 freq_table = default_freq_table;
397         } else {
398                 int v = INT_MAX;
399                 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
400                         if (freq_table[i].index >= suspend_volt && v > freq_table[i].index) {
401                                 suspend_freq = freq_table[i].frequency;
402                                 v = freq_table[i].index;
403                         }
404                 }
405         }
406         low_battery_freq = get_freq_from_table(low_battery_freq);
407         clk_enable_dvfs(clk_cpu_dvfs_node);
408
409         freq_wq = alloc_workqueue("cpufreq", WQ_NON_REENTRANT | WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
410         cpufreq_temp_limit_init(policy);
411
412         printk("cpufreq version " VERSION ", suspend freq %d MHz\n", suspend_freq / 1000);
413         return 0;
414 }
415
416 static int cpufreq_init(struct cpufreq_policy *policy)
417 {
418         static int cpu0_err;
419         
420         if (policy->cpu == 0) {
421                 cpu0_err = cpufreq_init_cpu0(policy);
422         }
423         
424         if (cpu0_err)
425                 return cpu0_err;
426         
427         //set freq min max
428         cpufreq_frequency_table_cpuinfo(policy, freq_table);
429         //sys nod
430         cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
431
432
433         policy->cur = clk_get_rate(clk_cpu_dvfs_node->clk) / 1000;
434
435         policy->cpuinfo.transition_latency = 40 * NSEC_PER_USEC;        // make ondemand default sampling_rate to 40000
436
437         /*
438          * On SMP configuartion, both processors share the voltage
439          * and clock. So both CPUs needs to be scaled together and hence
440          * needs software co-ordination. Use cpufreq affected_cpus
441          * interface to handle this scenario. Additional is_smp() check
442          * is to keep SMP_ON_UP build working.
443          */
444         if (is_smp())
445                 cpumask_setall(policy->cpus);
446
447         return 0;
448
449 }
450
451 static int cpufreq_exit(struct cpufreq_policy *policy)
452 {
453         if (policy->cpu != 0)
454                 return 0;
455
456         cpufreq_frequency_table_cpuinfo(policy, freq_table);
457         clk_put_dvfs_node(clk_cpu_dvfs_node);
458         cpufreq_temp_limit_exit();
459         if (freq_wq) {
460                 flush_workqueue(freq_wq);
461                 destroy_workqueue(freq_wq);
462                 freq_wq = NULL;
463         }
464
465         return 0;
466 }
467
468 static struct freq_attr *cpufreq_attr[] = {
469         &cpufreq_freq_attr_scaling_available_freqs,
470         NULL,
471 };
472
473 //#ifdef CONFIG_POWER_SUPPLY
474 #if 0
475 extern int rk_get_system_battery_capacity(void);
476 #else
477 static int rk_get_system_battery_capacity(void) { return 100; }
478 #endif
479
480 static unsigned int cpufreq_scale_limit(unsigned int target_freq, struct cpufreq_policy *policy, bool is_private)
481 {
482         bool is_ondemand = cpufreq_is_ondemand(policy);
483
484         if (!is_ondemand)
485                 return target_freq;
486
487         if (is_booting) {
488                 s64 boottime_ms = ktime_to_ms(ktime_get_boottime());
489                 if (boottime_ms > 60 * MSEC_PER_SEC) {
490                         is_booting = false;
491                 } else if (target_freq > low_battery_freq &&
492                            rk_get_system_battery_capacity() <= low_battery_capacity) {
493                         target_freq = low_battery_freq;
494                 }
495         }
496
497 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
498         {
499                 static unsigned int ondemand_target = 816 * 1000;
500                 if (is_private)
501                         target_freq = ondemand_target;
502                 else
503                         ondemand_target = target_freq;
504         }
505
506         /*
507          * If the new frequency is more than the thermal max allowed
508          * frequency, go ahead and scale the mpu device to proper frequency.
509          */
510         target_freq = min(target_freq, temp_limit_freq);
511 #endif
512
513         return target_freq;
514 }
515
516 static int cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation)
517 {
518         unsigned int i, new_freq = target_freq, new_rate, cur_rate;
519         int ret = 0;
520         bool is_private;
521
522         if (!freq_table) {
523                 FREQ_ERR("no freq table!\n");
524                 return -EINVAL;
525         }
526
527         mutex_lock(&cpufreq_mutex);
528
529         is_private = relation & CPUFREQ_PRIVATE;
530         relation &= ~CPUFREQ_PRIVATE;
531
532         if (relation & ENABLE_FURTHER_CPUFREQ)
533                 no_cpufreq_access--;
534         if (no_cpufreq_access) {
535                 FREQ_LOG("denied access to %s as it is disabled temporarily\n", __func__);
536                 ret = -EINVAL;
537                 goto out;
538         }
539         if (relation & DISABLE_FURTHER_CPUFREQ)
540                 no_cpufreq_access++;
541         relation &= ~MASK_FURTHER_CPUFREQ;
542
543         ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, relation, &i);
544         if (ret) {
545                 FREQ_ERR("no freq match for %d(ret=%d)\n", target_freq, ret);
546                 goto out;
547         }
548         new_freq = freq_table[i].frequency;
549         if (!no_cpufreq_access)
550                 new_freq = cpufreq_scale_limit(new_freq, policy, is_private);
551
552         new_rate = new_freq * 1000;
553         cur_rate = dvfs_clk_get_rate(clk_cpu_dvfs_node);
554         FREQ_LOG("req = %7u new = %7u (was = %7u)\n", target_freq, new_freq, cur_rate / 1000);
555         if (new_rate == cur_rate)
556                 goto out;
557         ret = dvfs_clk_set_rate(clk_cpu_dvfs_node, new_rate);
558
559 out:
560         FREQ_DBG("set freq (%7u) end, ret %d\n", new_freq, ret);
561         mutex_unlock(&cpufreq_mutex);
562         return ret;
563
564 }
565
566 static int cpufreq_pm_notifier_event(struct notifier_block *this, unsigned long event, void *ptr)
567 {
568         int ret = NOTIFY_DONE;
569         struct cpufreq_policy *policy = cpufreq_cpu_get(0);
570
571         if (!policy)
572                 return ret;
573
574         if (!cpufreq_is_ondemand(policy))
575                 goto out;
576
577         switch (event) {
578         case PM_SUSPEND_PREPARE:
579                 ret = cpufreq_driver_target(policy, suspend_freq, DISABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
580                 if (ret < 0) {
581                         ret = NOTIFY_BAD;
582                         goto out;
583                 }
584                 ret = NOTIFY_OK;
585                 break;
586         case PM_POST_RESTORE:
587         case PM_POST_SUSPEND:
588                 cpufreq_driver_target(policy, suspend_freq, ENABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
589                 ret = NOTIFY_OK;
590                 break;
591         }
592 out:
593         cpufreq_cpu_put(policy);
594         return ret;
595 }
596
597 static struct notifier_block cpufreq_pm_notifier = {
598         .notifier_call = cpufreq_pm_notifier_event,
599 };
600
601 static int cpufreq_reboot_notifier_event(struct notifier_block *this, unsigned long event, void *ptr)
602 {
603         struct cpufreq_policy *policy = cpufreq_cpu_get(0);
604
605         if (policy) {
606                 is_booting = false;
607                 cpufreq_driver_target(policy, suspend_freq, DISABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
608                 cpufreq_cpu_put(policy);
609         }
610
611         return NOTIFY_OK;
612 }
613
614 static struct notifier_block cpufreq_reboot_notifier = {
615         .notifier_call = cpufreq_reboot_notifier_event,
616 };
617
618 static struct cpufreq_driver cpufreq_driver = {
619         .flags = CPUFREQ_CONST_LOOPS,
620         .verify = cpufreq_verify,
621         .target = cpufreq_target,
622         .get = cpufreq_get_rate,
623         .init = cpufreq_init,
624         .exit = cpufreq_exit,
625         .name = "rockchip",
626         .attr = cpufreq_attr,
627 };
628
629 static int __init cpufreq_driver_init(void)
630 {
631         register_pm_notifier(&cpufreq_pm_notifier);
632         register_reboot_notifier(&cpufreq_reboot_notifier);
633         return cpufreq_register_driver(&cpufreq_driver);
634 }
635
636 device_initcall(cpufreq_driver_init);