41ac370f706f01c244437dbc001bea3dedab3242
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / rockchip_big_little.c
1 /*
2  * Copyright (C) 2015 Fuzhou Rockchip Electronics Co., Ltd
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14 #define pr_fmt(fmt) "cpufreq: " fmt
15 #include <linux/clk.h>
16 #include <linux/cpufreq.h>
17 #include <linux/err.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/init.h>
20 #include <linux/reboot.h>
21 #include <linux/suspend.h>
22 #include <linux/tick.h>
23 #include <linux/workqueue.h>
24 #include <linux/delay.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/fs.h>
27 #include <linux/miscdevice.h>
28 #include <linux/string.h>
29 #include <linux/rockchip/cpu.h>
30 #include <linux/rockchip/dvfs.h>
31 #include <asm/smp_plat.h>
32 #include <asm/unistd.h>
33 #include <linux/uaccess.h>
34 #include <asm/system_misc.h>
35 #include <linux/cpu.h>
36 #include <linux/of.h>
37 #include <linux/mfd/syscon.h>
38 #include <linux/regmap.h>
39 #include <linux/rockchip/common.h>
40 #include <dt-bindings/clock/rk_system_status.h>
41 #include <linux/platform_device.h>
42 #include <linux/module.h>
43 #include "../../../drivers/clk/rockchip/clk-pd.h"
44
45 #define RK3368_GRF_CPU_CON(n) (0x500 + 4*n)
46
47
48 #define VERSION "1.0"
49 #define RK_MAX_CLUSTERS 2
50
51 #ifdef DEBUG
52 #define FREQ_DBG(fmt, args...) pr_debug(fmt, ## args)
53 #define FREQ_LOG(fmt, args...) pr_debug(fmt, ## args)
54 #else
55 #define FREQ_DBG(fmt, args...) do {} while (0)
56 #define FREQ_LOG(fmt, args...) do {} while (0)
57 #endif
58 #define FREQ_ERR(fmt, args...) pr_err(fmt, ## args)
59
60 /* Frequency table index must be sequential starting at 0 */
61 static struct cpufreq_frequency_table default_freq_table[] = {
62         {.frequency = 312 * 1000,       .index = 875 * 1000},
63         {.frequency = 504 * 1000,       .index = 925 * 1000},
64         {.frequency = 816 * 1000,       .index = 975 * 1000},
65         {.frequency = 1008 * 1000,      .index = 1075 * 1000},
66         {.frequency = 1200 * 1000,      .index = 1150 * 1000},
67         {.frequency = 1416 * 1000,      .index = 1250 * 1000},
68         {.frequency = 1608 * 1000,      .index = 1350 * 1000},
69         {.frequency = CPUFREQ_TABLE_END},
70 };
71 static struct cpufreq_frequency_table *freq_table[RK_MAX_CLUSTERS + 1];
72 /*********************************************************/
73 /* additional symantics for "relation" in cpufreq with pm */
74 #define DISABLE_FURTHER_CPUFREQ         0x10
75 #define ENABLE_FURTHER_CPUFREQ          0x20
76 #define MASK_FURTHER_CPUFREQ            0x30
77 /* With 0x00(NOCHANGE), it depends on the previous "further" status */
78 #define CPUFREQ_PRIVATE                 0x100
79 static unsigned int no_cpufreq_access[RK_MAX_CLUSTERS] = {0};
80 static unsigned int suspend_freq[RK_MAX_CLUSTERS] = {816 * 1000, 816 * 1000};
81 static unsigned int suspend_volt = 1100000;
82 static unsigned int low_battery_freq[RK_MAX_CLUSTERS] = {600 * 1000,
83         600 * 1000};
84 static unsigned int low_battery_capacity = 5;
85 static bool is_booting = true;
86 static DEFINE_MUTEX(cpufreq_mutex);
87 static struct dvfs_node *clk_cpu_dvfs_node[RK_MAX_CLUSTERS];
88 static struct dvfs_node *clk_gpu_dvfs_node;
89 static struct dvfs_node *clk_ddr_dvfs_node;
90 static u32 cluster_policy_cpu[RK_MAX_CLUSTERS];
91 static unsigned int big_little = 1;
92
93 /*******************************************************/
94 static int cpu_to_cluster(int cpu)
95 {
96         return topology_physical_package_id(cpu);
97 }
98
99 static unsigned int cpufreq_bl_get_rate(unsigned int cpu)
100 {
101         u32 cur_cluster = cpu_to_cluster(cpu);
102
103         if (clk_cpu_dvfs_node[cur_cluster])
104                 return clk_get_rate(clk_cpu_dvfs_node[cur_cluster]->clk) / 1000;
105
106         return 0;
107 }
108
109 static bool cpufreq_is_ondemand(struct cpufreq_policy *policy)
110 {
111         char c = 0;
112
113         if (policy && policy->governor)
114                 c = policy->governor->name[0];
115         return (c == 'o' || c == 'i' || c == 'c' || c == 'h');
116 }
117
118 static unsigned int get_freq_from_table(unsigned int max_freq,
119                                         unsigned int cluster)
120 {
121         unsigned int i;
122         unsigned int target_freq = 0;
123
124         for (i = 0; freq_table[cluster][i].frequency !=
125                 CPUFREQ_TABLE_END; i++) {
126                 unsigned int freq = freq_table[cluster][i].frequency;
127
128                 if (freq <= max_freq && target_freq < freq)
129                         target_freq = freq;
130         }
131         if (!target_freq)
132                 target_freq = max_freq;
133         return target_freq;
134 }
135
136 static int cpufreq_notifier_policy(struct notifier_block *nb, unsigned long val,
137                                    void *data)
138 {
139         static unsigned int min_rate = 0, max_rate = -1;
140         struct cpufreq_policy *policy = data;
141         u32 cur_cluster = cpu_to_cluster(policy->cpu);
142
143         if (val != CPUFREQ_ADJUST)
144                 return 0;
145
146         if (cpufreq_is_ondemand(policy)) {
147                 FREQ_DBG("queue work\n");
148                 dvfs_clk_enable_limit(clk_cpu_dvfs_node[cur_cluster],
149                                       min_rate, max_rate);
150         } else {
151                 FREQ_DBG("cancel work\n");
152                 dvfs_clk_get_limit(clk_cpu_dvfs_node[cur_cluster],
153                                    &min_rate, &max_rate);
154         }
155
156         return 0;
157 }
158
159 static struct notifier_block notifier_policy_block = {
160         .notifier_call = cpufreq_notifier_policy
161 };
162
163 static int cpufreq_bl_verify(struct cpufreq_policy *policy)
164 {
165         u32 cur_cluster = cpu_to_cluster(policy->cpu);
166
167         if (!freq_table[cur_cluster])
168                 return -EINVAL;
169         return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]);
170 }
171
172 static int clk_node_get_cluster_id(struct clk *clk)
173 {
174         int i;
175
176         for (i = 0; i < RK_MAX_CLUSTERS; i++) {
177                 if (clk_cpu_dvfs_node[i]->clk == clk)
178                         return i;
179         }
180         return 0;
181 }
182
183 static int cpufreq_bl_scale_rate_for_dvfs(struct clk *clk, unsigned long rate)
184 {
185         int ret;
186         struct cpufreq_freqs freqs;
187         struct cpufreq_policy *policy;
188         u32 cur_cluster;
189
190         cur_cluster = clk_node_get_cluster_id(clk);
191         policy = cpufreq_cpu_get(cluster_policy_cpu[cur_cluster]);
192
193         freqs.new = rate / 1000;
194         freqs.old = clk_get_rate(clk) / 1000;
195
196         cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
197
198         FREQ_DBG("cpufreq_scale_rate_for_dvfs(%lu)\n", rate);
199
200         ret = clk_set_rate(clk, rate);
201
202         freqs.new = clk_get_rate(clk) / 1000;
203         /* notifiers */
204         cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
205
206         cpufreq_cpu_put(policy);
207         return ret;
208 }
209
210 static int cluster_cpus_freq_dvfs_init(u32 cluster_id, char *dvfs_name)
211 {
212         clk_cpu_dvfs_node[cluster_id] = clk_get_dvfs_node(dvfs_name);
213
214         if (!clk_cpu_dvfs_node[cluster_id]) {
215                 FREQ_ERR("%s:cluster_id=%d,get dvfs err\n",
216                          __func__, cluster_id);
217                 return -EINVAL;
218         }
219         dvfs_clk_register_set_rate_callback(clk_cpu_dvfs_node[cluster_id],
220                                             cpufreq_bl_scale_rate_for_dvfs);
221         freq_table[cluster_id] =
222                 dvfs_get_freq_volt_table(clk_cpu_dvfs_node[cluster_id]);
223         if (freq_table[cluster_id] == NULL) {
224                 freq_table[cluster_id] = default_freq_table;
225         } else {
226                 int v = INT_MAX;
227                 int i;
228
229                 for (i = 0; freq_table[cluster_id][i].frequency !=
230                         CPUFREQ_TABLE_END; i++) {
231                         if (freq_table[cluster_id][i].index >= suspend_volt &&
232                             v > freq_table[cluster_id][i].index) {
233                                 suspend_freq[cluster_id] =
234                                         freq_table[cluster_id][i].frequency;
235                                 v = freq_table[cluster_id][i].index;
236                                 }
237                         }
238                 }
239         low_battery_freq[cluster_id] =
240                 get_freq_from_table(low_battery_freq[cluster_id], cluster_id);
241         clk_enable_dvfs(clk_cpu_dvfs_node[cluster_id]);
242         return 0;
243 }
244
245 static int cpufreq_bl_init_cpu0(struct cpufreq_policy *policy)
246 {
247         clk_gpu_dvfs_node = clk_get_dvfs_node("clk_gpu");
248         if (clk_gpu_dvfs_node)
249                 clk_enable_dvfs(clk_gpu_dvfs_node);
250
251         clk_ddr_dvfs_node = clk_get_dvfs_node("clk_ddr");
252         if (clk_ddr_dvfs_node)
253                 clk_enable_dvfs(clk_ddr_dvfs_node);
254
255         cluster_cpus_freq_dvfs_init(0, "clk_core_b");
256         cluster_cpus_freq_dvfs_init(1, "clk_core_l");
257
258         cpufreq_register_notifier(&notifier_policy_block,
259                                   CPUFREQ_POLICY_NOTIFIER);
260
261         pr_info("cpufreq version " VERSION ", suspend freq %d %d MHz\n",
262                 suspend_freq[0] / 1000, suspend_freq[1] / 1000);
263         return 0;
264 }
265
266 static int cpufreq_bl_init(struct cpufreq_policy *policy)
267 {
268         static int cpu0_err;
269         u32 cur_cluster = cpu_to_cluster(policy->cpu);
270
271         if (policy->cpu == 0)
272                 cpu0_err = cpufreq_bl_init_cpu0(policy);
273         if (cpu0_err)
274                 return cpu0_err;
275
276         cluster_policy_cpu[cur_cluster] = policy->cpu;
277
278         /* set freq min max */
279         cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
280         /* sys nod */
281         cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
282
283         if (cur_cluster < RK_MAX_CLUSTERS) {
284                 /* int cpu; */
285                 cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
286                 /* for_each_cpu(cpu, policy->cpus) {
287                         pr_info("%s:policy->cpu=%d,cpu=%d,%02x\n",
288                                 __func__, policy->cpu, cpu, policy->cpus[0]);
289                 } */
290         }
291
292         policy->cur = clk_get_rate(clk_cpu_dvfs_node[cur_cluster]->clk) / 1000;
293
294         /* make ondemand default sampling_rate to 40000 */
295         policy->cpuinfo.transition_latency = 40 * NSEC_PER_USEC;
296
297         return 0;
298 }
299
300 static int cpufreq_bl_exit(struct cpufreq_policy *policy)
301 {
302         u32 cur_cluster = cpu_to_cluster(policy->cpu);
303
304         if (policy->cpu == 0) {
305                 cpufreq_unregister_notifier(&notifier_policy_block,
306                                             CPUFREQ_POLICY_NOTIFIER);
307         }
308         cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
309         clk_put_dvfs_node(clk_cpu_dvfs_node[cur_cluster]);
310
311         return 0;
312 }
313
314 static struct freq_attr *cpufreq_attr[] = {
315         &cpufreq_freq_attr_scaling_available_freqs,
316         NULL,
317 };
318
319 #ifdef CONFIG_CHARGER_DISPLAY
320 extern int rk_get_system_battery_capacity(void);
321 #else
322 static int rk_get_system_battery_capacity(void) { return 100; }
323 #endif
324
325 static unsigned int cpufreq_bl_scale_limit(unsigned int target_freq,
326                                            struct cpufreq_policy *policy,
327                                            bool is_private)
328 {
329         bool is_ondemand = cpufreq_is_ondemand(policy);
330         u32 cur_cluster = cpu_to_cluster(policy->cpu);
331
332         if (!is_ondemand)
333                 return target_freq;
334
335         if (is_booting) {
336                 s64 boottime_ms = ktime_to_ms(ktime_get_boottime());
337
338                 if (boottime_ms > 60 * MSEC_PER_SEC) {
339                         is_booting = false;
340                 } else if (target_freq > low_battery_freq[cur_cluster] &&
341                            rk_get_system_battery_capacity() <=
342                            low_battery_capacity) {
343                         target_freq = low_battery_freq[cur_cluster];
344                 }
345         }
346
347         return target_freq;
348 }
349
350 static int cpufreq_bl_target(struct cpufreq_policy *policy,
351                              unsigned int target_freq, unsigned int relation)
352 {
353         unsigned int i, new_freq = target_freq, new_rate, cur_rate;
354         int ret = 0;
355         bool is_private;
356         u32 cur_cluster = cpu_to_cluster(policy->cpu);
357
358         if (!freq_table[cur_cluster]) {
359                 FREQ_ERR("no freq table!\n");
360                 return -EINVAL;
361         }
362
363         mutex_lock(&cpufreq_mutex);
364
365         is_private = relation & CPUFREQ_PRIVATE;
366         relation &= ~CPUFREQ_PRIVATE;
367
368         if ((relation & ENABLE_FURTHER_CPUFREQ) &&
369             no_cpufreq_access[cur_cluster])
370                 no_cpufreq_access[cur_cluster]--;
371         if (no_cpufreq_access[cur_cluster]) {
372                 FREQ_LOG("denied access to %s as it is disabled temporarily\n",
373                          __func__);
374                 ret = -EINVAL;
375                 goto out;
376         }
377         if (relation & DISABLE_FURTHER_CPUFREQ)
378                 no_cpufreq_access[cur_cluster]++;
379         relation &= ~MASK_FURTHER_CPUFREQ;
380
381         ret = cpufreq_frequency_table_target(policy, freq_table[cur_cluster],
382                                              target_freq, relation, &i);
383         if (ret) {
384                 FREQ_ERR("no freq match for %d(ret=%d)\n", target_freq, ret);
385                 goto out;
386         }
387         new_freq = freq_table[cur_cluster][i].frequency;
388         if (!no_cpufreq_access[cur_cluster])
389                 new_freq = cpufreq_bl_scale_limit(new_freq, policy, is_private);
390
391         new_rate = new_freq * 1000;
392         cur_rate = dvfs_clk_get_rate(clk_cpu_dvfs_node[cur_cluster]);
393         FREQ_LOG("req = %7u new = %7u (was = %7u)\n", target_freq,
394                  new_freq, cur_rate / 1000);
395         if (new_rate == cur_rate)
396                 goto out;
397         ret = dvfs_clk_set_rate(clk_cpu_dvfs_node[cur_cluster], new_rate);
398
399 out:
400         FREQ_DBG("set freq (%7u) end, ret %d\n", new_freq, ret);
401         mutex_unlock(&cpufreq_mutex);
402         return ret;
403 }
404
405 static int cpufreq_pm_notifier_event(struct notifier_block *this,
406                                      unsigned long event, void *ptr)
407 {
408         int ret = NOTIFY_DONE;
409         int i;
410
411         for (i = 0; i < RK_MAX_CLUSTERS; i++) {
412                 struct cpufreq_policy *policy =
413                         cpufreq_cpu_get(cluster_policy_cpu[i]);
414
415                 if (!policy)
416                         return ret;
417
418                 if (!cpufreq_is_ondemand(policy))
419                         goto out;
420
421                 switch (event) {
422                 case PM_SUSPEND_PREPARE:
423                         policy->cur++;
424                         ret = cpufreq_driver_target(policy, suspend_freq[i],
425                                                     DISABLE_FURTHER_CPUFREQ |
426                                                     CPUFREQ_RELATION_H);
427                         if (ret < 0) {
428                                 ret = NOTIFY_BAD;
429                                 goto out;
430                         }
431                         ret = NOTIFY_OK;
432                         break;
433                 case PM_POST_RESTORE:
434                 case PM_POST_SUSPEND:
435                         /* if (target_freq == policy->cur) then
436                         cpufreq_driver_target will return, and
437                         our target will not be called, it casue
438                         ENABLE_FURTHER_CPUFREQ flag invalid,
439                         avoid that. */
440                         policy->cur++;
441                         cpufreq_driver_target(policy, suspend_freq[i],
442                                               ENABLE_FURTHER_CPUFREQ |
443                                               CPUFREQ_RELATION_H);
444                         ret = NOTIFY_OK;
445                         break;
446                 }
447 out:
448                 cpufreq_cpu_put(policy);
449         }
450         return ret;
451 }
452
453 static struct notifier_block cpufreq_pm_notifier = {
454         .notifier_call = cpufreq_pm_notifier_event,
455 };
456
457 static int rockchip_bl_cpufreq_reboot_limit_freq(void)
458 {
459         struct regulator *regulator;
460         int volt = 0;
461         u32 rate;
462         int i;
463
464         dvfs_disable_temp_limit();
465
466         for (i = 0; i < RK_MAX_CLUSTERS; i++) {
467                 dvfs_clk_enable_limit(clk_cpu_dvfs_node[i],
468                                       1000*suspend_freq[i],
469                                       1000*suspend_freq[i]);
470                         rate = dvfs_clk_get_rate(clk_cpu_dvfs_node[i]);
471         }
472
473         regulator = dvfs_get_regulator("vdd_arm");
474         if (regulator)
475                 volt = regulator_get_voltage(regulator);
476         else
477                 pr_info("cpufreq: get arm regulator failed\n");
478         pr_info("cpufreq: reboot set cluster0 rate=%lu, cluster1 rate=%lu, volt=%d\n",
479                 dvfs_clk_get_rate(clk_cpu_dvfs_node[0]),
480                 dvfs_clk_get_rate(clk_cpu_dvfs_node[1]), volt);
481
482         return 0;
483 }
484
485 static int cpufreq_reboot_notifier_event(struct notifier_block *this,
486                                          unsigned long event, void *ptr)
487 {
488         rockchip_set_system_status(SYS_STATUS_REBOOT);
489         rockchip_bl_cpufreq_reboot_limit_freq();
490
491         return NOTIFY_OK;
492 };
493
494 static struct notifier_block cpufreq_reboot_notifier = {
495         .notifier_call = cpufreq_reboot_notifier_event,
496 };
497
498 static struct cpufreq_driver cpufreq_driver = {
499         .flags = CPUFREQ_CONST_LOOPS,
500         .verify = cpufreq_bl_verify,
501         .target = cpufreq_bl_target,
502         .get = cpufreq_bl_get_rate,
503         .init = cpufreq_bl_init,
504         .exit = cpufreq_bl_exit,
505         .name = "rockchip-bl",
506         .have_governor_per_policy = true,
507         .attr = cpufreq_attr,
508 };
509
510 static const struct of_device_id cpufreq_match[] = {
511         {
512                 .compatible = "rockchip,rk3368-cpufreq",
513         },
514         {},
515 };
516 MODULE_DEVICE_TABLE(of, cpufreq_match);
517
518 static int cpufreq_probe(struct platform_device *pdev)
519 {
520         struct device_node *np;
521         struct regmap *grf_regmap;
522         unsigned int big_bits, litt_bits;
523         int ret;
524
525         np =  pdev->dev.of_node;
526         if (!np)
527                 return -ENODEV;
528
529         grf_regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
530         if (IS_ERR(grf_regmap)) {
531                 FREQ_ERR("Cpufreq couldn't find grf regmap\n");
532                 return PTR_ERR(grf_regmap);
533         }
534         ret = regmap_read(grf_regmap, RK3368_GRF_CPU_CON(3), &big_bits);
535         if (ret != 0) {
536                 FREQ_ERR("Cpufreq couldn't read to GRF\n");
537                 return -1;
538         }
539         ret = regmap_read(grf_regmap, RK3368_GRF_CPU_CON(1),
540                           &litt_bits);
541         if (ret != 0) {
542                 FREQ_ERR("Cpufreq couldn't read to GRF\n");
543                 return -1;
544         }
545
546         big_bits = (big_bits >> 8) & 0x03;
547         litt_bits = (litt_bits >> 8) & 0x03;
548
549         if (big_bits == 0x01 && litt_bits == 0x00)
550                 big_little = 1;
551         else if (big_bits == 0x0 && litt_bits == 0x01)
552                 big_little = 0;
553         pr_info("cpufreq: boot from %d\n", big_little);
554
555         register_reboot_notifier(&cpufreq_reboot_notifier);
556         register_pm_notifier(&cpufreq_pm_notifier);
557
558         return cpufreq_register_driver(&cpufreq_driver);
559 }
560
561 static int cpufreq_remove(struct platform_device *pdev)
562 {
563         cpufreq_unregister_driver(&cpufreq_driver);
564         return 0;
565 }
566
567 static struct platform_driver cpufreq_platdrv = {
568         .driver = {
569                 .name   = "rockchip-bl-cpufreq",
570                 .owner  = THIS_MODULE,
571                 .of_match_table = cpufreq_match,
572         },
573         .probe          = cpufreq_probe,
574         .remove         = cpufreq_remove,
575 };
576 module_platform_driver(cpufreq_platdrv);
577
578 MODULE_AUTHOR("Xiao Feng <xf@rock-chips.com>");
579 MODULE_LICENSE("GPL");