rk3188:add status to dvfs dts node, modify rk3188-cpufreq.c to rockchip-cpufreq.c...
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / rockchip-cpufreq.c
1 /*
2  * Copyright (C) 2013 ROCKCHIP, Inc.
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14 #define pr_fmt(fmt) "cpufreq: " fmt
15 #include <linux/clk.h>
16 #include <linux/cpufreq.h>
17 #include <linux/err.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/init.h>
20 #include <linux/reboot.h>
21 #include <linux/suspend.h>
22 #include <linux/tick.h>
23 #include <linux/workqueue.h>
24 #include <linux/delay.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/fs.h>
27 #include <linux/miscdevice.h>
28 #include <linux/string.h>
29 #include <linux/rockchip/cpu.h>
30 #include <linux/rockchip/dvfs.h>
31 #include <asm/smp_plat.h>
32 #include <asm/cpu.h>
33 #include <asm/unistd.h>
34 #include <asm/uaccess.h>
35
36 #define VERSION "1.0"
37
38 #ifdef DEBUG
39 #define FREQ_DBG(fmt, args...) pr_debug(fmt, ## args)
40 #define FREQ_LOG(fmt, args...) pr_debug(fmt, ## args)
41 #else
42 #define FREQ_DBG(fmt, args...) do {} while(0)
43 #define FREQ_LOG(fmt, args...) do {} while(0)
44 #endif
45 #define FREQ_ERR(fmt, args...) pr_err(fmt, ## args)
46
47 /* Frequency table index must be sequential starting at 0 */
48 static struct cpufreq_frequency_table default_freq_table[] = {
49         {.frequency = 312 * 1000,       .index = 875 * 1000},
50         {.frequency = 504 * 1000,       .index = 925 * 1000},
51         {.frequency = 816 * 1000,       .index = 975 * 1000},
52         {.frequency = 1008 * 1000,      .index = 1075 * 1000},
53         {.frequency = 1200 * 1000,      .index = 1150 * 1000},
54         {.frequency = 1416 * 1000,      .index = 1250 * 1000},
55         {.frequency = 1608 * 1000,      .index = 1350 * 1000},
56         {.frequency = CPUFREQ_TABLE_END},
57 };
58 static struct cpufreq_frequency_table *freq_table = default_freq_table;
59 /*********************************************************/
60 /* additional symantics for "relation" in cpufreq with pm */
61 #define DISABLE_FURTHER_CPUFREQ         0x10
62 #define ENABLE_FURTHER_CPUFREQ          0x20
63 #define MASK_FURTHER_CPUFREQ            0x30
64 /* With 0x00(NOCHANGE), it depends on the previous "further" status */
65 #define CPUFREQ_PRIVATE                 0x100
66 static int no_cpufreq_access;
67 static unsigned int suspend_freq = 816 * 1000;
68 static unsigned int suspend_volt = 1000000; // 1V
69 static unsigned int low_battery_freq = 600 * 1000;
70 static unsigned int low_battery_capacity = 5; // 5%
71 static bool is_booting = true;
72 static struct workqueue_struct *freq_wq;
73 static DEFINE_MUTEX(cpufreq_mutex);
74 static bool gpu_is_mali400;
75 struct dvfs_node *clk_cpu_dvfs_node = NULL;
76 struct dvfs_node *clk_gpu_dvfs_node = NULL;
77 /*******************************************************/
78 static unsigned int cpufreq_get_rate(unsigned int cpu)
79 {
80         if (clk_cpu_dvfs_node)
81                 return clk_get_rate(clk_cpu_dvfs_node->clk) / 1000;
82
83         return 0;
84 }
85
86 static bool cpufreq_is_ondemand(struct cpufreq_policy *policy)
87 {
88         char c = 0;
89         if (policy && policy->governor)
90                 c = policy->governor->name[0];
91         return (c == 'o' || c == 'i' || c == 'c' || c == 'h');
92 }
93
94 static unsigned int get_freq_from_table(unsigned int max_freq)
95 {
96         unsigned int i;
97         unsigned int target_freq = 0;
98         for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
99                 unsigned int freq = freq_table[i].frequency;
100                 if (freq <= max_freq && target_freq < freq) {
101                         target_freq = freq;
102                 }
103         }
104         if (!target_freq)
105                 target_freq = max_freq;
106         return target_freq;
107 }
108
109 /**********************thermal limit**************************/
110 //#define CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
111
112 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
113 static unsigned int temp_limit_freq = -1;
114 module_param(temp_limit_freq, uint, 0444);
115
116 static struct cpufreq_frequency_table temp_limits[4][4] = {
117         {       // 1 CPU busy
118                 {.frequency =          -1, .index = 50},
119                 {.frequency =          -1, .index = 55},
120                 {.frequency =          -1, .index = 60},
121                 {.frequency = 1608 * 1000, .index = 75},
122         }, {    // 2 CPUs busy
123                 {.frequency = 1800 * 1000, .index = 50},
124                 {.frequency = 1608 * 1000, .index = 55},
125                 {.frequency = 1416 * 1000, .index = 60},
126                 {.frequency = 1200 * 1000, .index = 75},
127         }, {    // 3 CPUs busy
128                 {.frequency = 1608 * 1000, .index = 50},
129                 {.frequency = 1416 * 1000, .index = 55},
130                 {.frequency = 1200 * 1000, .index = 60},
131                 {.frequency = 1008 * 1000, .index = 75},
132         }, {    // 4 CPUs busy
133                 {.frequency = 1416 * 1000, .index = 50},
134                 {.frequency = 1200 * 1000, .index = 55},
135                 {.frequency = 1008 * 1000, .index = 60},
136                 {.frequency =  816 * 1000, .index = 75},
137         }
138 };
139
140 static struct cpufreq_frequency_table temp_limits_cpu_perf[] = {
141         {.frequency = 1008 * 1000, .index = 100},
142 };
143
144 static struct cpufreq_frequency_table temp_limits_gpu_perf[] = {
145         {.frequency = 1008 * 1000, .index = 0},
146 };
147
148 static int get_temp(void)
149 {
150         return 60;
151 }
152
153 static char sys_state;
154 static ssize_t sys_state_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
155 {
156         char state;
157
158         if (count < 1)
159                 return count;
160         if (copy_from_user(&state, buffer, 1)) {
161                 return -EFAULT;
162         }
163
164         sys_state = state;
165         return count;
166 }
167
168 static const struct file_operations sys_state_fops = {
169         .owner  = THIS_MODULE,
170         .write  = sys_state_write,
171 };
172
173 static struct miscdevice sys_state_dev = {
174         .fops   = &sys_state_fops,
175         .name   = "sys_state",
176         .minor  = MISC_DYNAMIC_MINOR,
177 };
178
179 static void cpufreq_temp_limit_work_func(struct work_struct *work)
180 {
181         static bool in_perf = false;
182         struct cpufreq_policy *policy;
183         int temp, i;
184         unsigned int new_freq = -1;
185         unsigned long delay = HZ / 10; // 100ms
186         unsigned int nr_cpus = num_online_cpus();
187         const struct cpufreq_frequency_table *limits_table = temp_limits[nr_cpus - 1];
188         size_t limits_size = ARRAY_SIZE(temp_limits[nr_cpus - 1]);
189
190         temp = get_temp();
191
192         if (sys_state == '1') {
193                 in_perf = true;
194                 if (gpu_is_mali400) {
195                         unsigned int gpu_irqs[2];
196                         gpu_irqs[0] = kstat_irqs(IRQ_GPU_GP);
197                         msleep(40);
198                         gpu_irqs[1] = kstat_irqs(IRQ_GPU_GP);
199                         delay = 0;
200                         if ((gpu_irqs[1] - gpu_irqs[0]) < 8) {
201                                 limits_table = temp_limits_cpu_perf;
202                                 limits_size = ARRAY_SIZE(temp_limits_cpu_perf);
203                         } else {
204                                 limits_table = temp_limits_gpu_perf;
205                                 limits_size = ARRAY_SIZE(temp_limits_gpu_perf);
206                         }
207                 } else {
208                         delay = HZ; // 1s
209                         limits_table = temp_limits_cpu_perf;
210                         limits_size = ARRAY_SIZE(temp_limits_cpu_perf);
211                 }
212         } else if (in_perf) {
213                 in_perf = false;
214         } else {
215                 static u64 last_time_in_idle = 0;
216                 static u64 last_time_in_idle_timestamp = 0;
217                 u64 time_in_idle = 0, now;
218                 u32 delta_idle;
219                 u32 delta_time;
220                 unsigned cpu;
221
222                 for_each_online_cpu(cpu) {
223                         time_in_idle += get_cpu_idle_time_us(cpu, &now);
224                 }
225                 delta_time = now - last_time_in_idle_timestamp;
226                 delta_idle = time_in_idle - last_time_in_idle;
227                 last_time_in_idle = time_in_idle;
228                 last_time_in_idle_timestamp = now;
229                 delta_idle += delta_time >> 4; // +6.25%
230                 if (delta_idle > (nr_cpus - 1) * delta_time && delta_idle < (nr_cpus + 1) * delta_time)
231                         limits_table = temp_limits[0];
232                 else if (delta_idle > (nr_cpus - 2) * delta_time)
233                         limits_table = temp_limits[1];
234                 else if (delta_idle > (nr_cpus - 3) * delta_time)
235                         limits_table = temp_limits[2];
236                 FREQ_DBG("delta time %6u us idle %6u us %u cpus select table %d\n", delta_time, delta_idle, nr_cpus, (limits_table - temp_limits[0]) / ARRAY_SIZE(temp_limits[0]));
237         }
238
239         for (i = 0; i < limits_size; i++) {
240                 if (temp >= limits_table[i].index) {
241                         new_freq = limits_table[i].frequency;
242                 }
243         }
244
245         if (temp_limit_freq != new_freq) {
246                 unsigned int cur_freq;
247                 temp_limit_freq = new_freq;
248                 cur_freq = cpufreq_get_rate(0);
249                 FREQ_DBG("temp limit %7d KHz cur %7d KHz\n", temp_limit_freq, cur_freq);
250                 if (cur_freq > temp_limit_freq) {
251                         policy = cpufreq_cpu_get(0);
252                         cpufreq_driver_target(policy, policy->cur, CPUFREQ_RELATION_L | CPUFREQ_PRIVATE);
253                         cpufreq_cpu_put(policy);
254                 }
255         }
256
257         queue_delayed_work_on(0, freq_wq, to_delayed_work(work), delay);
258 }
259
260 static DECLARE_DELAYED_WORK(cpufreq_temp_limit_work, cpufreq_temp_limit_work_func);
261
262 static int cpufreq_notifier_policy(struct notifier_block *nb, unsigned long val, void *data)
263 {
264         struct cpufreq_policy *policy = data;
265
266         if (val != CPUFREQ_NOTIFY)
267                 return 0;
268
269         if (cpufreq_is_ondemand(policy)) {
270                 FREQ_DBG("queue work\n");
271                 queue_delayed_work_on(0, freq_wq, &cpufreq_temp_limit_work, 0);
272         } else {
273                 FREQ_DBG("cancel work\n");
274                 cancel_delayed_work_sync(&cpufreq_temp_limit_work);
275         }
276
277         return 0;
278 }
279
280 static struct notifier_block notifier_policy_block = {
281         .notifier_call = cpufreq_notifier_policy
282 };
283
284 static void cpufreq_temp_limit_init(struct cpufreq_policy *policy)
285 {
286         unsigned int i;
287         struct cpufreq_frequency_table *table;
288
289         table = temp_limits[0];
290         for (i = 0; i < sizeof(temp_limits) / sizeof(struct cpufreq_frequency_table); i++) {
291                 table[i].frequency = get_freq_from_table(table[i].frequency);
292         }
293         table = temp_limits_cpu_perf;
294         for (i = 0; i < sizeof(temp_limits_cpu_perf) / sizeof(struct cpufreq_frequency_table); i++) {
295                 table[i].frequency = get_freq_from_table(table[i].frequency);
296         }
297         table = temp_limits_gpu_perf;
298         for (i = 0; i < sizeof(temp_limits_gpu_perf) / sizeof(struct cpufreq_frequency_table); i++) {
299                 table[i].frequency = get_freq_from_table(table[i].frequency);
300         }
301         misc_register(&sys_state_dev);
302         if (cpufreq_is_ondemand(policy)) {
303                 queue_delayed_work_on(0, freq_wq, &cpufreq_temp_limit_work, 0*HZ);
304         }
305         cpufreq_register_notifier(&notifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
306 }
307
308 static void cpufreq_temp_limit_exit(void)
309 {
310         cpufreq_unregister_notifier(&notifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
311         if (freq_wq)
312                 cancel_delayed_work(&cpufreq_temp_limit_work);
313 }
314 #else
315 static inline void cpufreq_temp_limit_init(struct cpufreq_policy *policy) {}
316 static inline void cpufreq_temp_limit_exit(void) {}
317 #endif
318
319 static int cpufreq_verify(struct cpufreq_policy *policy)
320 {
321         if (!freq_table)
322                 return -EINVAL;
323         return cpufreq_frequency_table_verify(policy, freq_table);
324 }
325
326 static int cpufreq_scale_rate_for_dvfs(struct clk *clk, unsigned long rate)
327 {
328         unsigned int i;
329         int ret;
330         struct cpufreq_freqs freqs;
331         struct cpufreq_policy *policy;
332         
333         freqs.new = rate / 1000;
334         freqs.old = clk_get_rate(clk) / 1000;
335         
336         for_each_online_cpu(freqs.cpu) {
337                 policy = cpufreq_cpu_get(freqs.cpu);
338                 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
339                 cpufreq_cpu_put(policy);
340         }
341         
342         FREQ_DBG("cpufreq_scale_rate_for_dvfs(%lu)\n", rate);
343         
344         ret = clk_set_rate(clk, rate);
345
346 #ifdef CONFIG_SMP
347         /*
348          * Note that loops_per_jiffy is not updated on SMP systems in
349          * cpufreq driver. So, update the per-CPU loops_per_jiffy value
350          * on frequency transition. We need to update all dependent CPUs.
351          */
352         for_each_possible_cpu(i) {
353                 per_cpu(cpu_data, i).loops_per_jiffy = loops_per_jiffy;
354         }
355 #endif
356
357         freqs.new = clk_get_rate(clk) / 1000;
358         /* notifiers */
359         for_each_online_cpu(freqs.cpu) {
360                 policy = cpufreq_cpu_get(freqs.cpu);
361                 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
362                 cpufreq_cpu_put(policy);
363         }
364
365         return ret;
366         
367 }
368
369
370 static int cpufreq_init_cpu0(struct cpufreq_policy *policy)
371 {
372         unsigned int i;
373         gpu_is_mali400 = cpu_is_rk3188();
374
375         clk_gpu_dvfs_node = clk_get_dvfs_node("clk_gpu");
376         if (clk_gpu_dvfs_node){
377                 clk_enable_dvfs(clk_gpu_dvfs_node);
378                 if (gpu_is_mali400)
379                         dvfs_clk_enable_limit(clk_gpu_dvfs_node, 133000000, 600000000); 
380         }
381
382         clk_cpu_dvfs_node = clk_get_dvfs_node("clk_core");
383         if (!clk_cpu_dvfs_node){
384                 return -EINVAL;
385         }
386         dvfs_clk_register_set_rate_callback(clk_cpu_dvfs_node, cpufreq_scale_rate_for_dvfs);
387         freq_table = dvfs_get_freq_volt_table(clk_cpu_dvfs_node);
388         if (freq_table == NULL) {
389                 freq_table = default_freq_table;
390         } else {
391                 int v = INT_MAX;
392                 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
393                         if (freq_table[i].index >= suspend_volt && v > freq_table[i].index) {
394                                 suspend_freq = freq_table[i].frequency;
395                                 v = freq_table[i].index;
396                         }
397                 }
398         }
399         low_battery_freq = get_freq_from_table(low_battery_freq);
400         clk_enable_dvfs(clk_cpu_dvfs_node);
401
402         freq_wq = alloc_workqueue("cpufreq", WQ_NON_REENTRANT | WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
403         cpufreq_temp_limit_init(policy);
404
405         printk("cpufreq version " VERSION ", suspend freq %d MHz\n", suspend_freq / 1000);
406         return 0;
407 }
408
409 static int cpufreq_init(struct cpufreq_policy *policy)
410 {
411         static int cpu0_err;
412         
413         if (policy->cpu == 0) {
414                 cpu0_err = cpufreq_init_cpu0(policy);
415         }
416         
417         if (cpu0_err)
418                 return cpu0_err;
419         
420         //set freq min max
421         cpufreq_frequency_table_cpuinfo(policy, freq_table);
422         //sys nod
423         cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
424
425
426         policy->cur = clk_get_rate(clk_cpu_dvfs_node->clk) / 1000;
427
428         policy->cpuinfo.transition_latency = 40 * NSEC_PER_USEC;        // make ondemand default sampling_rate to 40000
429
430         /*
431          * On SMP configuartion, both processors share the voltage
432          * and clock. So both CPUs needs to be scaled together and hence
433          * needs software co-ordination. Use cpufreq affected_cpus
434          * interface to handle this scenario. Additional is_smp() check
435          * is to keep SMP_ON_UP build working.
436          */
437         if (is_smp())
438                 cpumask_setall(policy->cpus);
439
440         return 0;
441
442 }
443
444 static int cpufreq_exit(struct cpufreq_policy *policy)
445 {
446         if (policy->cpu != 0)
447                 return 0;
448
449         cpufreq_frequency_table_cpuinfo(policy, freq_table);
450         clk_put_dvfs_node(clk_cpu_dvfs_node);
451         cpufreq_temp_limit_exit();
452         if (freq_wq) {
453                 flush_workqueue(freq_wq);
454                 destroy_workqueue(freq_wq);
455                 freq_wq = NULL;
456         }
457
458         return 0;
459 }
460
461 static struct freq_attr *cpufreq_attr[] = {
462         &cpufreq_freq_attr_scaling_available_freqs,
463         NULL,
464 };
465
466 //#ifdef CONFIG_POWER_SUPPLY
467 #if 0
468 extern int rk_get_system_battery_capacity(void);
469 #else
470 static int rk_get_system_battery_capacity(void) { return 100; }
471 #endif
472
473 static unsigned int cpufreq_scale_limit(unsigned int target_freq, struct cpufreq_policy *policy, bool is_private)
474 {
475         bool is_ondemand = cpufreq_is_ondemand(policy);
476
477         if (!is_ondemand)
478                 return target_freq;
479
480         if (is_booting) {
481                 s64 boottime_ms = ktime_to_ms(ktime_get_boottime());
482                 if (boottime_ms > 60 * MSEC_PER_SEC) {
483                         is_booting = false;
484                 } else if (target_freq > low_battery_freq &&
485                            rk_get_system_battery_capacity() <= low_battery_capacity) {
486                         target_freq = low_battery_freq;
487                 }
488         }
489
490 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
491         {
492                 static unsigned int ondemand_target = 816 * 1000;
493                 if (is_private)
494                         target_freq = ondemand_target;
495                 else
496                         ondemand_target = target_freq;
497         }
498
499         /*
500          * If the new frequency is more than the thermal max allowed
501          * frequency, go ahead and scale the mpu device to proper frequency.
502          */
503         target_freq = min(target_freq, temp_limit_freq);
504 #endif
505
506         return target_freq;
507 }
508
509 static int cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation)
510 {
511         unsigned int i, new_freq = target_freq, new_rate, cur_rate;
512         int ret = 0;
513         bool is_private;
514
515         if (!freq_table) {
516                 FREQ_ERR("no freq table!\n");
517                 return -EINVAL;
518         }
519
520         mutex_lock(&cpufreq_mutex);
521
522         is_private = relation & CPUFREQ_PRIVATE;
523         relation &= ~CPUFREQ_PRIVATE;
524
525         if (relation & ENABLE_FURTHER_CPUFREQ)
526                 no_cpufreq_access--;
527         if (no_cpufreq_access) {
528                 FREQ_LOG("denied access to %s as it is disabled temporarily\n", __func__);
529                 ret = -EINVAL;
530                 goto out;
531         }
532         if (relation & DISABLE_FURTHER_CPUFREQ)
533                 no_cpufreq_access++;
534         relation &= ~MASK_FURTHER_CPUFREQ;
535
536         ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, relation, &i);
537         if (ret) {
538                 FREQ_ERR("no freq match for %d(ret=%d)\n", target_freq, ret);
539                 goto out;
540         }
541         new_freq = freq_table[i].frequency;
542         if (!no_cpufreq_access)
543                 new_freq = cpufreq_scale_limit(new_freq, policy, is_private);
544
545         new_rate = new_freq * 1000;
546         cur_rate = dvfs_clk_get_rate(clk_cpu_dvfs_node);
547         FREQ_LOG("req = %7u new = %7u (was = %7u)\n", target_freq, new_freq, cur_rate / 1000);
548         if (new_rate == cur_rate)
549                 goto out;
550         ret = dvfs_clk_set_rate(clk_cpu_dvfs_node, new_rate);
551
552 out:
553         FREQ_DBG("set freq (%7u) end, ret %d\n", new_freq, ret);
554         mutex_unlock(&cpufreq_mutex);
555         return ret;
556
557 }
558
559 static int cpufreq_pm_notifier_event(struct notifier_block *this, unsigned long event, void *ptr)
560 {
561         int ret = NOTIFY_DONE;
562         struct cpufreq_policy *policy = cpufreq_cpu_get(0);
563
564         if (!policy)
565                 return ret;
566
567         if (!cpufreq_is_ondemand(policy))
568                 goto out;
569
570         switch (event) {
571         case PM_SUSPEND_PREPARE:
572                 ret = cpufreq_driver_target(policy, suspend_freq, DISABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
573                 if (ret < 0) {
574                         ret = NOTIFY_BAD;
575                         goto out;
576                 }
577                 ret = NOTIFY_OK;
578                 break;
579         case PM_POST_RESTORE:
580         case PM_POST_SUSPEND:
581                 cpufreq_driver_target(policy, suspend_freq, ENABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
582                 ret = NOTIFY_OK;
583                 break;
584         }
585 out:
586         cpufreq_cpu_put(policy);
587         return ret;
588 }
589
590 static struct notifier_block cpufreq_pm_notifier = {
591         .notifier_call = cpufreq_pm_notifier_event,
592 };
593
594 static int cpufreq_reboot_notifier_event(struct notifier_block *this, unsigned long event, void *ptr)
595 {
596         struct cpufreq_policy *policy = cpufreq_cpu_get(0);
597
598         if (policy) {
599                 is_booting = false;
600                 cpufreq_driver_target(policy, suspend_freq, DISABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
601                 cpufreq_cpu_put(policy);
602         }
603
604         return NOTIFY_OK;
605 }
606
607 static struct notifier_block cpufreq_reboot_notifier = {
608         .notifier_call = cpufreq_reboot_notifier_event,
609 };
610
611 static struct cpufreq_driver cpufreq_driver = {
612         .flags = CPUFREQ_CONST_LOOPS,
613         .verify = cpufreq_verify,
614         .target = cpufreq_target,
615         .get = cpufreq_get_rate,
616         .init = cpufreq_init,
617         .exit = cpufreq_exit,
618         .name = "rockchip",
619         .attr = cpufreq_attr,
620 };
621
622 static int __init cpufreq_driver_init(void)
623 {
624         register_pm_notifier(&cpufreq_pm_notifier);
625         register_reboot_notifier(&cpufreq_reboot_notifier);
626         return cpufreq_register_driver(&cpufreq_driver);
627 }
628
629 device_initcall(cpufreq_driver_init);