dvfs : move dvfs.h to include/linux/rockchip/
[firefly-linux-kernel-4.4.55.git] / arch / arm / mach-rockchip / rk3188-cpufreq.c
1 /*
2  * Copyright (C) 2013 ROCKCHIP, Inc.
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14 #define pr_fmt(fmt) "cpufreq: " fmt
15 #include <linux/clk.h>
16 #include <linux/cpufreq.h>
17 #include <linux/err.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/init.h>
20 #include <linux/reboot.h>
21 #include <linux/suspend.h>
22 #include <linux/tick.h>
23 #include <linux/workqueue.h>
24 #include <linux/delay.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/fs.h>
27 #include <linux/miscdevice.h>
28 #include <linux/string.h>
29 #include <linux/rockchip/dvfs.h>
30 #include <asm/smp_plat.h>
31 #include <asm/cpu.h>
32 #include <asm/unistd.h>
33 #include <asm/uaccess.h>
34
35 #include "cpu.h"
36
37 #define VERSION "2.2"
38
39 #ifdef DEBUG
40 #define FREQ_DBG(fmt, args...) pr_debug(fmt, ## args)
41 #define FREQ_LOG(fmt, args...) pr_debug(fmt, ## args)
42 #else
43 #define FREQ_DBG(fmt, args...) do {} while(0)
44 #define FREQ_LOG(fmt, args...) do {} while(0)
45 #endif
46 #define FREQ_ERR(fmt, args...) pr_err(fmt, ## args)
47
48 /* Frequency table index must be sequential starting at 0 */
49 static struct cpufreq_frequency_table default_freq_table[] = {
50         {.frequency = 312 * 1000,       .index = 875 * 1000},
51         {.frequency = 504 * 1000,       .index = 925 * 1000},
52         {.frequency = 816 * 1000,       .index = 975 * 1000},
53         {.frequency = 1008 * 1000,      .index = 1075 * 1000},
54         {.frequency = 1200 * 1000,      .index = 1150 * 1000},
55         {.frequency = 1416 * 1000,      .index = 1250 * 1000},
56         {.frequency = 1608 * 1000,      .index = 1350 * 1000},
57         {.frequency = CPUFREQ_TABLE_END},
58 };
59
60 static struct cpufreq_frequency_table *freq_table = default_freq_table;
61
62 /*********************************************************/
63
64 /* additional symantics for "relation" in cpufreq with pm */
65 #define DISABLE_FURTHER_CPUFREQ         0x10
66 #define ENABLE_FURTHER_CPUFREQ          0x20
67 #define MASK_FURTHER_CPUFREQ            0x30
68 /* With 0x00(NOCHANGE), it depends on the previous "further" status */
69 #define CPUFREQ_PRIVATE                 0x100
70 static int no_cpufreq_access;
71 static unsigned int suspend_freq = 816 * 1000;
72 #if defined(CONFIG_ARCH_RK3026)
73 static unsigned int suspend_volt = 1100000; // 1.1V
74 #else
75 static unsigned int suspend_volt = 1000000; // 1V
76 #endif
77 static unsigned int low_battery_freq = 600 * 1000;
78 //static unsigned int low_battery_capacity = 5; // 5%
79 static bool is_booting = true;
80 static struct workqueue_struct *freq_wq;
81 static DEFINE_MUTEX(cpufreq_mutex);
82 static bool gpu_is_mali400;
83 struct dvfs_node *clk_cpu_dvfs_node = NULL;
84 struct dvfs_node *clk_gpu_dvfs_node = NULL;
85 struct dvfs_node *clk_ddr_dvfs_node = NULL;
86
87 static int cpufreq_scale_rate_for_dvfs(struct clk *clk, unsigned long rate);
88
89 /*******************************************************/
90 static unsigned int rk3188_cpufreq_get(unsigned int cpu)
91 {
92         if (clk_cpu_dvfs_node)
93                 return clk_get_rate(clk_cpu_dvfs_node->clk) / 1000;
94
95         return 0;
96 }
97
98 static bool cpufreq_is_ondemand(struct cpufreq_policy *policy)
99 {
100         char c = 0;
101         if (policy && policy->governor)
102                 c = policy->governor->name[0];
103         return (c == 'o' || c == 'i' || c == 'c' || c == 'h');
104 }
105
106 static unsigned int get_freq_from_table(unsigned int max_freq)
107 {
108         unsigned int i;
109         unsigned int target_freq = 0;
110         for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
111                 unsigned int freq = freq_table[i].frequency;
112                 if (freq <= max_freq && target_freq < freq) {
113                         target_freq = freq;
114                 }
115         }
116         if (!target_freq)
117                 target_freq = max_freq;
118         return target_freq;
119 }
120
121 /**********************thermal limit**************************/
122 //#define CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
123
124 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
125 static unsigned int temp_limit_freq = -1;
126 module_param(temp_limit_freq, uint, 0444);
127
128 static struct cpufreq_frequency_table temp_limits[4][4] = {
129         {       // 1 CPU busy
130                 {.frequency =          -1, .index = 50},
131                 {.frequency =          -1, .index = 55},
132                 {.frequency =          -1, .index = 60},
133                 {.frequency = 1608 * 1000, .index = 75},
134         }, {    // 2 CPUs busy
135                 {.frequency = 1800 * 1000, .index = 50},
136                 {.frequency = 1608 * 1000, .index = 55},
137                 {.frequency = 1416 * 1000, .index = 60},
138                 {.frequency = 1200 * 1000, .index = 75},
139         }, {    // 3 CPUs busy
140                 {.frequency = 1608 * 1000, .index = 50},
141                 {.frequency = 1416 * 1000, .index = 55},
142                 {.frequency = 1200 * 1000, .index = 60},
143                 {.frequency = 1008 * 1000, .index = 75},
144         }, {    // 4 CPUs busy
145                 {.frequency = 1416 * 1000, .index = 50},
146                 {.frequency = 1200 * 1000, .index = 55},
147                 {.frequency = 1008 * 1000, .index = 60},
148                 {.frequency =  816 * 1000, .index = 75},
149         }
150 };
151
152 static struct cpufreq_frequency_table temp_limits_cpu_perf[] = {
153         {.frequency = 1008 * 1000, .index = 100},
154 };
155
156 static struct cpufreq_frequency_table temp_limits_gpu_perf[] = {
157         {.frequency = 1008 * 1000, .index = 0},
158 };
159
160 static int rk3188_get_temp(void)
161 {
162         return 60;
163 }
164
165 static char sys_state;
166 static ssize_t sys_state_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
167 {
168         char state;
169
170         if (count < 1)
171                 return count;
172         if (copy_from_user(&state, buffer, 1)) {
173                 return -EFAULT;
174         }
175
176         sys_state = state;
177         return count;
178 }
179
180 static const struct file_operations sys_state_fops = {
181         .owner  = THIS_MODULE,
182         .write  = sys_state_write,
183 };
184
185 static struct miscdevice sys_state_dev = {
186         .fops   = &sys_state_fops,
187         .name   = "sys_state",
188         .minor  = MISC_DYNAMIC_MINOR,
189 };
190
191 static void rk3188_cpufreq_temp_limit_work_func(struct work_struct *work)
192 {
193         static bool in_perf = false;
194         struct cpufreq_policy *policy;
195         int temp, i;
196         unsigned int new_freq = -1;
197         unsigned long delay = HZ / 10; // 100ms
198         unsigned int nr_cpus = num_online_cpus();
199         const struct cpufreq_frequency_table *limits_table = temp_limits[nr_cpus - 1];
200         size_t limits_size = ARRAY_SIZE(temp_limits[nr_cpus - 1]);
201
202         temp = rk3188_get_temp();
203
204         if (sys_state == '1') {
205                 in_perf = true;
206                 if (gpu_is_mali400) {
207                         unsigned int gpu_irqs[2];
208                         gpu_irqs[0] = kstat_irqs(IRQ_GPU_GP);
209                         msleep(40);
210                         gpu_irqs[1] = kstat_irqs(IRQ_GPU_GP);
211                         delay = 0;
212                         if ((gpu_irqs[1] - gpu_irqs[0]) < 8) {
213                                 limits_table = temp_limits_cpu_perf;
214                                 limits_size = ARRAY_SIZE(temp_limits_cpu_perf);
215                         } else {
216                                 limits_table = temp_limits_gpu_perf;
217                                 limits_size = ARRAY_SIZE(temp_limits_gpu_perf);
218                         }
219                 } else {
220                         delay = HZ; // 1s
221                         limits_table = temp_limits_cpu_perf;
222                         limits_size = ARRAY_SIZE(temp_limits_cpu_perf);
223                 }
224         } else if (in_perf) {
225                 in_perf = false;
226         } else {
227                 static u64 last_time_in_idle = 0;
228                 static u64 last_time_in_idle_timestamp = 0;
229                 u64 time_in_idle = 0, now;
230                 u32 delta_idle;
231                 u32 delta_time;
232                 unsigned cpu;
233
234                 for_each_online_cpu(cpu) {
235                         time_in_idle += get_cpu_idle_time_us(cpu, &now);
236                 }
237                 delta_time = now - last_time_in_idle_timestamp;
238                 delta_idle = time_in_idle - last_time_in_idle;
239                 last_time_in_idle = time_in_idle;
240                 last_time_in_idle_timestamp = now;
241                 delta_idle += delta_time >> 4; // +6.25%
242                 if (delta_idle > (nr_cpus - 1) * delta_time && delta_idle < (nr_cpus + 1) * delta_time)
243                         limits_table = temp_limits[0];
244                 else if (delta_idle > (nr_cpus - 2) * delta_time)
245                         limits_table = temp_limits[1];
246                 else if (delta_idle > (nr_cpus - 3) * delta_time)
247                         limits_table = temp_limits[2];
248                 FREQ_DBG("delta time %6u us idle %6u us %u cpus select table %d\n", delta_time, delta_idle, nr_cpus, (limits_table - temp_limits[0]) / ARRAY_SIZE(temp_limits[0]));
249         }
250
251         for (i = 0; i < limits_size; i++) {
252                 if (temp >= limits_table[i].index) {
253                         new_freq = limits_table[i].frequency;
254                 }
255         }
256
257         if (temp_limit_freq != new_freq) {
258                 unsigned int cur_freq;
259                 temp_limit_freq = new_freq;
260                 cur_freq = rk3188_cpufreq_get(0);
261                 FREQ_DBG("temp limit %7d KHz cur %7d KHz\n", temp_limit_freq, cur_freq);
262                 if (cur_freq > temp_limit_freq) {
263                         policy = cpufreq_cpu_get(0);
264                         cpufreq_driver_target(policy, policy->cur, CPUFREQ_RELATION_L | CPUFREQ_PRIVATE);
265                         cpufreq_cpu_put(policy);
266                 }
267         }
268
269         queue_delayed_work_on(0, freq_wq, to_delayed_work(work), delay);
270 }
271
272 static DECLARE_DELAYED_WORK(rk3188_cpufreq_temp_limit_work, rk3188_cpufreq_temp_limit_work_func);
273
274 static int rk3188_cpufreq_notifier_policy(struct notifier_block *nb, unsigned long val, void *data)
275 {
276         struct cpufreq_policy *policy = data;
277
278         if (val != CPUFREQ_NOTIFY)
279                 return 0;
280
281         if (cpufreq_is_ondemand(policy)) {
282                 FREQ_DBG("queue work\n");
283                 queue_delayed_work_on(0, freq_wq, &rk3188_cpufreq_temp_limit_work, 0);
284         } else {
285                 FREQ_DBG("cancel work\n");
286                 cancel_delayed_work_sync(&rk3188_cpufreq_temp_limit_work);
287         }
288
289         return 0;
290 }
291
292 static struct notifier_block notifier_policy_block = {
293         .notifier_call = rk3188_cpufreq_notifier_policy
294 };
295
296 static void rk3188_cpufreq_temp_limit_init(struct cpufreq_policy *policy)
297 {
298         unsigned int i;
299         struct cpufreq_frequency_table *table;
300
301         table = temp_limits[0];
302         for (i = 0; i < sizeof(temp_limits) / sizeof(struct cpufreq_frequency_table); i++) {
303                 table[i].frequency = get_freq_from_table(table[i].frequency);
304         }
305         table = temp_limits_cpu_perf;
306         for (i = 0; i < sizeof(temp_limits_cpu_perf) / sizeof(struct cpufreq_frequency_table); i++) {
307                 table[i].frequency = get_freq_from_table(table[i].frequency);
308         }
309         table = temp_limits_gpu_perf;
310         for (i = 0; i < sizeof(temp_limits_gpu_perf) / sizeof(struct cpufreq_frequency_table); i++) {
311                 table[i].frequency = get_freq_from_table(table[i].frequency);
312         }
313         misc_register(&sys_state_dev);
314         if (cpufreq_is_ondemand(policy)) {
315                 queue_delayed_work_on(0, freq_wq, &rk3188_cpufreq_temp_limit_work, 0*HZ);
316         }
317         cpufreq_register_notifier(&notifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
318 }
319
320 static void rk3188_cpufreq_temp_limit_exit(void)
321 {
322         cpufreq_unregister_notifier(&notifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
323         if (freq_wq)
324                 cancel_delayed_work(&rk3188_cpufreq_temp_limit_work);
325 }
326 #else
327 static inline void rk3188_cpufreq_temp_limit_init(struct cpufreq_policy *policy) {}
328 static inline void rk3188_cpufreq_temp_limit_exit(void) {}
329 #endif
330
331 /************************************dvfs tst************************************/
332 //#define CPU_FREQ_DVFS_TST
333 #ifdef CPU_FREQ_DVFS_TST
334 static unsigned int freq_dvfs_tst_rate;
335 static int test_count;
336 #define TEST_FRE_NUM 11
337 static int test_tlb_rate[TEST_FRE_NUM] = { 504, 1008, 504, 1200, 252, 816, 1416, 252, 1512, 252, 816 };
338 //static int test_tlb_rate[TEST_FRE_NUM]={504,1008,504,1200,252,816,1416,126,1512,126,816};
339
340 #define TEST_GPU_NUM 3
341
342 static int test_tlb_gpu[TEST_GPU_NUM] = { 360, 400, 180 };
343 static int test_tlb_ddr[TEST_GPU_NUM] = { 401, 200, 500 };
344
345 static int gpu_ddr = 0;
346
347 static void rk3188_cpufreq_dvsf_tst_work_func(struct work_struct *work)
348 {
349         struct cpufreq_policy *policy = cpufreq_cpu_get(0);
350
351         gpu_ddr++;
352 #if 0
353         FREQ_LOG("cpufreq_dvsf_tst,ddr%u,gpu%u\n",
354                 test_tlb_ddr[gpu_ddr % TEST_GPU_NUM],
355                 test_tlb_gpu[gpu_ddr % TEST_GPU_NUM]);
356         clk_set_rate(ddr_clk, test_tlb_ddr[gpu_ddr % TEST_GPU_NUM] * 1000 * 1000);
357         clk_set_rate(gpu_clk, test_tlb_gpu[gpu_ddr % TEST_GPU_NUM] * 1000 * 1000);
358 #endif
359
360         test_count++;
361         freq_dvfs_tst_rate = test_tlb_rate[test_count % TEST_FRE_NUM] * 1000;
362         printk("cpufreq_dvsf_tst,cpu set rate %d\n", freq_dvfs_tst_rate);
363         cpufreq_driver_target(policy, policy->cur, CPUFREQ_RELATION_L);
364         cpufreq_cpu_put(policy);
365
366         queue_delayed_work_on(0, freq_wq, to_delayed_work(work), msecs_to_jiffies(1000));
367 }
368
369 static DECLARE_DELAYED_WORK(rk3188_cpufreq_dvsf_tst_work, rk3188_cpufreq_dvsf_tst_work_func);
370 #endif /* CPU_FREQ_DVFS_TST */
371
372 /***********************************************************************/
373 static int rk3188_cpufreq_verify(struct cpufreq_policy *policy)
374 {
375         if (!freq_table)
376                 return -EINVAL;
377         return cpufreq_frequency_table_verify(policy, freq_table);
378 }
379
380 static int rk3188_cpufreq_init_cpu0(struct cpufreq_policy *policy)
381 {
382         unsigned int i;
383         //struct cpufreq_frequency_table *table_adjust;
384
385         gpu_is_mali400 = cpu_is_rk3188();
386
387         clk_gpu_dvfs_node = clk_get_dvfs_node("clk_gpu");
388         if (!clk_gpu_dvfs_node){
389                 return -EINVAL;
390         }
391
392         clk_ddr_dvfs_node = clk_get_dvfs_node("clk_ddr");
393         if (!clk_ddr_dvfs_node){
394                 return -EINVAL;
395         }
396
397         clk_cpu_dvfs_node = clk_get_dvfs_node("clk_core");
398         if (!clk_cpu_dvfs_node){
399                 return -EINVAL;
400         }
401
402         //table_adjust = dvfs_get_freq_volt_table(cpu_clk);
403         //dvfs_adjust_table_lmtvolt(cpu_clk, table_adjust);
404         //table_adjust = dvfs_get_freq_volt_table(gpu_clk);
405         //dvfs_adjust_table_lmtvolt(gpu_clk, table_adjust);
406
407         clk_enable_dvfs(clk_gpu_dvfs_node);
408         if (gpu_is_mali400)
409                 dvfs_clk_enable_limit(clk_gpu_dvfs_node, 133000000, 600000000);
410
411         clk_enable_dvfs(clk_ddr_dvfs_node);
412
413         dvfs_clk_register_set_rate_callback(clk_cpu_dvfs_node, cpufreq_scale_rate_for_dvfs);
414         freq_table = dvfs_get_freq_volt_table(clk_cpu_dvfs_node);
415         if (freq_table == NULL) {
416                 freq_table = default_freq_table;
417         } else {
418                 int v = INT_MAX;
419                 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
420                         if (freq_table[i].index >= suspend_volt && v > freq_table[i].index) {
421                                 suspend_freq = freq_table[i].frequency;
422                                 v = freq_table[i].index;
423                         }
424                 }
425         }
426         low_battery_freq = get_freq_from_table(low_battery_freq);
427         clk_enable_dvfs(clk_cpu_dvfs_node);
428         /*if(rk_tflag()){
429 #define RK3188_T_LIMIT_FREQ     (1416 * 1000)
430                 dvfs_clk_enable_limit(cpu_clk, 0, RK3188_T_LIMIT_FREQ * 1000);
431                 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
432                         if (freq_table[i].frequency > RK3188_T_LIMIT_FREQ) {
433                                 printk("cpufreq: delete arm freq(%u)\n", freq_table[i].frequency);
434                                 freq_table[i].frequency = CPUFREQ_TABLE_END;
435                         }
436                 }
437         }*/
438         freq_wq = alloc_workqueue("rk3188_cpufreqd", WQ_NON_REENTRANT | WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
439         rk3188_cpufreq_temp_limit_init(policy);
440 #ifdef CPU_FREQ_DVFS_TST
441         queue_delayed_work(freq_wq, &rk3188_cpufreq_dvsf_tst_work, msecs_to_jiffies(20 * 1000));
442 #endif
443
444         printk("rk3188 cpufreq version " VERSION ", suspend freq %d MHz\n", suspend_freq / 1000);
445         return 0;
446         
447 }
448
449 static int rk3188_cpufreq_init(struct cpufreq_policy *policy)
450 {
451         static int cpu0_err;
452         
453         if (policy->cpu == 0) {
454                 cpu0_err = rk3188_cpufreq_init_cpu0(policy);
455         }
456         
457         if (cpu0_err)
458                 return cpu0_err;
459         
460         //set freq min max
461         cpufreq_frequency_table_cpuinfo(policy, freq_table);
462         //sys nod
463         cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
464
465
466         policy->cur = rk3188_cpufreq_get(0);
467
468         policy->cpuinfo.transition_latency = 40 * NSEC_PER_USEC;        // make ondemand default sampling_rate to 40000
469
470         /*
471          * On SMP configuartion, both processors share the voltage
472          * and clock. So both CPUs needs to be scaled together and hence
473          * needs software co-ordination. Use cpufreq affected_cpus
474          * interface to handle this scenario. Additional is_smp() check
475          * is to keep SMP_ON_UP build working.
476          */
477         if (is_smp())
478                 cpumask_setall(policy->cpus);
479
480         return 0;
481
482 }
483
484 static int rk3188_cpufreq_exit(struct cpufreq_policy *policy)
485 {
486         if (policy->cpu != 0)
487                 return 0;
488
489         cpufreq_frequency_table_cpuinfo(policy, freq_table);
490 //      clk_put(clk_cpu_dvfs_node->clk);
491         rk3188_cpufreq_temp_limit_exit();
492         if (freq_wq) {
493                 flush_workqueue(freq_wq);
494                 destroy_workqueue(freq_wq);
495                 freq_wq = NULL;
496         }
497
498         return 0;
499 }
500
501 static struct freq_attr *rk3188_cpufreq_attr[] = {
502         &cpufreq_freq_attr_scaling_available_freqs,
503         NULL,
504 };
505
506 #ifdef CONFIG_POWER_SUPPLY
507 extern int rk_get_system_battery_capacity(void);
508 #else
509 static int rk_get_system_battery_capacity(void) { return 100; }
510 #endif
511
512 static unsigned int cpufreq_scale_limit(unsigned int target_freq, struct cpufreq_policy *policy, bool is_private)
513 {
514         bool is_ondemand = cpufreq_is_ondemand(policy);
515
516 #ifdef CPU_FREQ_DVFS_TST
517         if (freq_dvfs_tst_rate) {
518                 target_freq = freq_dvfs_tst_rate;
519                 freq_dvfs_tst_rate = 0;
520                 return target_freq;
521         }
522 #endif
523
524         if (!is_ondemand)
525                 return target_freq;
526
527         if (is_booting) {
528                 s64 boottime_ms = ktime_to_ms(ktime_get_boottime());
529                 if (boottime_ms > 60 * MSEC_PER_SEC) {
530                         is_booting = false;
531                 }/* else if (target_freq > low_battery_freq &&
532                            rk_get_system_battery_capacity() <= low_battery_capacity) {
533                         target_freq = low_battery_freq;
534                 }*/
535         }
536
537 #ifdef CONFIG_RK30_CPU_FREQ_LIMIT_BY_TEMP
538         {
539                 static unsigned int ondemand_target = 816 * 1000;
540                 if (is_private)
541                         target_freq = ondemand_target;
542                 else
543                         ondemand_target = target_freq;
544         }
545
546         /*
547          * If the new frequency is more than the thermal max allowed
548          * frequency, go ahead and scale the mpu device to proper frequency.
549          */
550         target_freq = min(target_freq, temp_limit_freq);
551 #endif
552
553         return target_freq;
554 }
555
556 static int cpufreq_scale_rate_for_dvfs(struct clk *clk, unsigned long rate)
557 {
558
559         unsigned int i;
560         int ret;
561         struct cpufreq_freqs freqs;
562         struct cpufreq_policy *policy;
563         
564         freqs.new = rate / 1000;
565         freqs.old = clk_get_rate(clk) / 1000;
566         
567         for_each_online_cpu(freqs.cpu) {
568                 policy = cpufreq_cpu_get(freqs.cpu);
569                 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
570                 cpufreq_cpu_put(policy);
571         }
572         
573         FREQ_DBG("cpufreq_scale_rate_for_dvfs(%lu)\n", rate);
574         
575         ret = clk_set_rate(clk, rate);
576
577 #ifdef CONFIG_SMP
578         /*
579          * Note that loops_per_jiffy is not updated on SMP systems in
580          * cpufreq driver. So, update the per-CPU loops_per_jiffy value
581          * on frequency transition. We need to update all dependent CPUs.
582          */
583         for_each_possible_cpu(i) {
584                 per_cpu(cpu_data, i).loops_per_jiffy = loops_per_jiffy;
585         }
586 #endif
587
588         freqs.new = clk_get_rate(clk) / 1000;
589         /* notifiers */
590         for_each_online_cpu(freqs.cpu) {
591                 policy = cpufreq_cpu_get(freqs.cpu);
592                 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
593                 cpufreq_cpu_put(policy);
594         }
595
596         return ret;
597         
598 }
599
600 static int rk3188_cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation)
601 {
602
603         unsigned int i, new_freq = target_freq, new_rate, cur_rate;
604         int ret = 0;
605         bool is_private;
606
607         if (!freq_table) {
608                 FREQ_ERR("no freq table!\n");
609                 return -EINVAL;
610         }
611
612         mutex_lock(&cpufreq_mutex);
613
614         is_private = relation & CPUFREQ_PRIVATE;
615         relation &= ~CPUFREQ_PRIVATE;
616
617         if (relation & ENABLE_FURTHER_CPUFREQ)
618                 no_cpufreq_access--;
619         if (no_cpufreq_access) {
620                 FREQ_LOG("denied access to %s as it is disabled temporarily\n", __func__);
621                 ret = -EINVAL;
622                 goto out;
623         }
624         if (relation & DISABLE_FURTHER_CPUFREQ)
625                 no_cpufreq_access++;
626         relation &= ~MASK_FURTHER_CPUFREQ;
627
628         ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, relation, &i);
629         if (ret) {
630                 FREQ_ERR("no freq match for %d(ret=%d)\n", target_freq, ret);
631                 goto out;
632         }
633         new_freq = freq_table[i].frequency;
634         if (!no_cpufreq_access)
635                 new_freq = cpufreq_scale_limit(new_freq, policy, is_private);
636
637         new_rate = new_freq * 1000;
638         cur_rate = clk_get_rate(clk_cpu_dvfs_node->clk);
639         FREQ_LOG("req = %7u new = %7u (was = %7u)\n", target_freq, new_freq, cur_rate / 1000);
640         if (new_rate == cur_rate)
641                 goto out;
642         ret = dvfs_clk_set_rate(clk_cpu_dvfs_node, new_rate);
643
644 out:
645         FREQ_DBG("set freq (%7u) end, ret %d\n", new_freq, ret);
646         mutex_unlock(&cpufreq_mutex);
647         return ret;
648
649 }
650
651 static int rk3188_cpufreq_pm_notifier_event(struct notifier_block *this, unsigned long event, void *ptr)
652 {
653         int ret = NOTIFY_DONE;
654         struct cpufreq_policy *policy = cpufreq_cpu_get(0);
655
656         if (!policy)
657                 return ret;
658
659         if (!cpufreq_is_ondemand(policy))
660                 goto out;
661
662         switch (event) {
663         case PM_SUSPEND_PREPARE:
664                 ret = cpufreq_driver_target(policy, suspend_freq, DISABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
665                 if (ret < 0) {
666                         ret = NOTIFY_BAD;
667                         goto out;
668                 }
669                 ret = NOTIFY_OK;
670                 break;
671         case PM_POST_RESTORE:
672         case PM_POST_SUSPEND:
673                 cpufreq_driver_target(policy, suspend_freq, ENABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
674                 ret = NOTIFY_OK;
675                 break;
676         }
677 out:
678         cpufreq_cpu_put(policy);
679         return ret;
680 }
681
682 static struct notifier_block rk3188_cpufreq_pm_notifier = {
683         .notifier_call = rk3188_cpufreq_pm_notifier_event,
684 };
685
686 static int rk3188_cpufreq_reboot_notifier_event(struct notifier_block *this, unsigned long event, void *ptr)
687 {
688         struct cpufreq_policy *policy = cpufreq_cpu_get(0);
689
690         if (policy) {
691                 is_booting = false;
692                 cpufreq_driver_target(policy, suspend_freq, DISABLE_FURTHER_CPUFREQ | CPUFREQ_RELATION_H);
693                 cpufreq_cpu_put(policy);
694         }
695
696         return NOTIFY_OK;
697 }
698
699 static struct notifier_block rk3188_cpufreq_reboot_notifier = {
700         .notifier_call = rk3188_cpufreq_reboot_notifier_event,
701 };
702
703 static struct cpufreq_driver rk3188_cpufreq_driver = {
704         .flags = CPUFREQ_CONST_LOOPS,
705         .verify = rk3188_cpufreq_verify,
706         .target = rk3188_cpufreq_target,
707         .get = rk3188_cpufreq_get,
708         .init = rk3188_cpufreq_init,
709         .exit = rk3188_cpufreq_exit,
710         .name = "rk3188",
711         .attr = rk3188_cpufreq_attr,
712 };
713
714 static int __init rk3188_cpufreq_driver_init(void)
715 {
716         if (!cpu_is_rk3188())
717                 return 0;
718         register_pm_notifier(&rk3188_cpufreq_pm_notifier);
719         register_reboot_notifier(&rk3188_cpufreq_reboot_notifier);
720         return cpufreq_register_driver(&rk3188_cpufreq_driver);
721 }
722
723 device_initcall(rk3188_cpufreq_driver_init);