cpufreq: rockchip_big_little: fix panic when topology_physical_package_id return...
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / rockchip_big_little.c
1 /*
2  * Copyright (C) 2015 Fuzhou Rockchip Electronics Co., Ltd
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/clk.h>
18 #include <linux/cpufreq.h>
19 #include <linux/err.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/init.h>
22 #include <linux/reboot.h>
23 #include <linux/suspend.h>
24 #include <linux/tick.h>
25 #include <linux/workqueue.h>
26 #include <linux/delay.h>
27 #include <linux/regulator/consumer.h>
28 #include <linux/fs.h>
29 #include <linux/miscdevice.h>
30 #include <linux/string.h>
31 #include <linux/rockchip/cpu.h>
32 #include <linux/rockchip/dvfs.h>
33 #include <asm/smp_plat.h>
34 #include <asm/unistd.h>
35 #include <linux/uaccess.h>
36 #include <asm/system_misc.h>
37 #include <linux/cpu.h>
38 #include <linux/of.h>
39 #include <linux/mfd/syscon.h>
40 #include <linux/regmap.h>
41 #include <linux/rockchip/common.h>
42 #include <dt-bindings/clock/rk_system_status.h>
43 #include <linux/platform_device.h>
44 #include <linux/module.h>
45 #include "../../../drivers/clk/rockchip/clk-pd.h"
46
47 #define RK3368_GRF_CPU_CON(n) (0x500 + 4*n)
48
49 #define VERSION "1.0"
50 #define RK_MAX_CLUSTERS 2
51
52 #ifdef DEBUG
53 #define FREQ_DBG(fmt, args...) pr_debug(fmt, ## args)
54 #define FREQ_LOG(fmt, args...) pr_debug(fmt, ## args)
55 #else
56 #define FREQ_DBG(fmt, args...) do {} while (0)
57 #define FREQ_LOG(fmt, args...) do {} while (0)
58 #endif
59 #define FREQ_ERR(fmt, args...) pr_err(fmt, ## args)
60
61 /* Frequency table index must be sequential starting at 0 */
62 static struct cpufreq_frequency_table default_freq_table[] = {
63         {.frequency = 312 * 1000,       .index = 875 * 1000},
64         {.frequency = 504 * 1000,       .index = 925 * 1000},
65         {.frequency = 816 * 1000,       .index = 975 * 1000},
66         {.frequency = 1008 * 1000,      .index = 1075 * 1000},
67         {.frequency = 1200 * 1000,      .index = 1150 * 1000},
68         {.frequency = 1416 * 1000,      .index = 1250 * 1000},
69         {.frequency = 1608 * 1000,      .index = 1350 * 1000},
70         {.frequency = CPUFREQ_TABLE_END},
71 };
72
73 static struct cpufreq_frequency_table *freq_table[RK_MAX_CLUSTERS + 1];
74 /*********************************************************/
75 /* additional symantics for "relation" in cpufreq with pm */
76 #define DISABLE_FURTHER_CPUFREQ         0x10
77 #define ENABLE_FURTHER_CPUFREQ          0x20
78 #define MASK_FURTHER_CPUFREQ            0x30
79 /* With 0x00(NOCHANGE), it depends on the previous "further" status */
80 #define CPUFREQ_PRIVATE                 0x100
81 static unsigned int no_cpufreq_access[RK_MAX_CLUSTERS] = { 0 };
82 static unsigned int suspend_freq[RK_MAX_CLUSTERS] = { 816 * 1000, 816 * 1000 };
83 static unsigned int suspend_volt = 1100000;
84 static unsigned int low_battery_freq[RK_MAX_CLUSTERS] = { 600 * 1000,
85         600 * 1000 };
86 static unsigned int low_battery_capacity = 5;
87 static bool is_booting = true;
88 static DEFINE_MUTEX(cpufreq_mutex);
89 static struct dvfs_node *clk_cpu_dvfs_node[RK_MAX_CLUSTERS];
90 static struct dvfs_node *clk_gpu_dvfs_node;
91 static struct dvfs_node *clk_ddr_dvfs_node;
92 static u32 cluster_policy_cpu[RK_MAX_CLUSTERS];
93 static unsigned int big_little = 1;
94
95 /*******************************************************/
96 static inline int cpu_to_cluster(int cpu)
97 {
98         int id = topology_physical_package_id(cpu);
99         if (id < 0)
100                 id = 0;
101         return id;
102 }
103
104 static unsigned int rockchip_bl_cpufreq_get_rate(unsigned int cpu)
105 {
106         u32 cur_cluster = cpu_to_cluster(cpu);
107
108         if (clk_cpu_dvfs_node[cur_cluster])
109                 return clk_get_rate(clk_cpu_dvfs_node[cur_cluster]->clk) / 1000;
110
111         return 0;
112 }
113
114 static bool cpufreq_is_ondemand(struct cpufreq_policy *policy)
115 {
116         char c = 0;
117
118         if (policy && policy->governor)
119                 c = policy->governor->name[0];
120         return (c == 'o' || c == 'i' || c == 'c' || c == 'h');
121 }
122
123 static unsigned int get_freq_from_table(unsigned int max_freq,
124                                         unsigned int cluster)
125 {
126         unsigned int i;
127         unsigned int target_freq = 0;
128
129         for (i = 0; freq_table[cluster][i].frequency != CPUFREQ_TABLE_END;
130              i++) {
131                 unsigned int freq = freq_table[cluster][i].frequency;
132
133                 if (freq <= max_freq && target_freq < freq)
134                         target_freq = freq;
135         }
136         if (!target_freq)
137                 target_freq = max_freq;
138         return target_freq;
139 }
140
141 static int rockchip_bl_cpufreq_notifier_policy(struct notifier_block *nb,
142                                                unsigned long val,
143                                                void *data)
144 {
145         static unsigned int min_rate = 0, max_rate = -1;
146         struct cpufreq_policy *policy = data;
147         u32 cur_cluster = cpu_to_cluster(policy->cpu);
148
149         if (val != CPUFREQ_ADJUST)
150                 return 0;
151
152         if (cpufreq_is_ondemand(policy)) {
153                 FREQ_DBG("queue work\n");
154                 dvfs_clk_enable_limit(clk_cpu_dvfs_node[cur_cluster],
155                                       min_rate, max_rate);
156         } else {
157                 FREQ_DBG("cancel work\n");
158                 dvfs_clk_get_limit(clk_cpu_dvfs_node[cur_cluster],
159                                    &min_rate, &max_rate);
160         }
161
162         return 0;
163 }
164
165 static struct notifier_block notifier_policy_block = {
166         .notifier_call = rockchip_bl_cpufreq_notifier_policy
167 };
168
169 static int rockchip_bl_cpufreq_verify(struct cpufreq_policy *policy)
170 {
171         u32 cur_cluster = cpu_to_cluster(policy->cpu);
172
173         if (!freq_table[cur_cluster])
174                 return -EINVAL;
175         return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]);
176 }
177
178 static int clk_node_get_cluster_id(struct clk *clk)
179 {
180         int i;
181
182         for (i = 0; i < RK_MAX_CLUSTERS; i++) {
183                 if (clk_cpu_dvfs_node[i]->clk == clk)
184                         return i;
185         }
186         return 0;
187 }
188
189 static int rockchip_bl_cpufreq_scale_rate_for_dvfs(struct clk *clk,
190                                                    unsigned long rate)
191 {
192         int ret;
193         struct cpufreq_freqs freqs;
194         struct cpufreq_policy *policy;
195         u32 cur_cluster;
196
197         cur_cluster = clk_node_get_cluster_id(clk);
198         policy = cpufreq_cpu_get(cluster_policy_cpu[cur_cluster]);
199
200         freqs.new = rate / 1000;
201         freqs.old = clk_get_rate(clk) / 1000;
202
203         cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
204
205         FREQ_DBG("cpufreq_scale_rate_for_dvfs(%lu)\n", rate);
206
207         ret = clk_set_rate(clk, rate);
208
209         freqs.new = clk_get_rate(clk) / 1000;
210         /* notifiers */
211         cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
212
213         cpufreq_cpu_put(policy);
214         return ret;
215 }
216
217 static int cluster_cpus_freq_dvfs_init(u32 cluster_id, char *dvfs_name)
218 {
219         clk_cpu_dvfs_node[cluster_id] = clk_get_dvfs_node(dvfs_name);
220
221         if (!clk_cpu_dvfs_node[cluster_id]) {
222                 FREQ_ERR("%s:cluster_id=%d,get dvfs err\n",
223                          __func__, cluster_id);
224                 return -EINVAL;
225         }
226         dvfs_clk_register_set_rate_callback(
227                 clk_cpu_dvfs_node[cluster_id],
228                 rockchip_bl_cpufreq_scale_rate_for_dvfs);
229         freq_table[cluster_id] =
230                 dvfs_get_freq_volt_table(clk_cpu_dvfs_node[cluster_id]);
231         if (freq_table[cluster_id] == NULL) {
232                 freq_table[cluster_id] = default_freq_table;
233         } else {
234                 int v = INT_MAX;
235                 int i;
236
237                 for (i = 0; freq_table[cluster_id][i].frequency !=
238                      CPUFREQ_TABLE_END; i++) {
239                         if (freq_table[cluster_id][i].index >= suspend_volt &&
240                             v > freq_table[cluster_id][i].index) {
241                                 suspend_freq[cluster_id] =
242                                         freq_table[cluster_id][i].frequency;
243                                 v = freq_table[cluster_id][i].index;
244                         }
245                 }
246         }
247         low_battery_freq[cluster_id] =
248                 get_freq_from_table(low_battery_freq[cluster_id], cluster_id);
249         clk_enable_dvfs(clk_cpu_dvfs_node[cluster_id]);
250         return 0;
251 }
252
253 static int rockchip_bl_cpufreq_init_cpu0(struct cpufreq_policy *policy)
254 {
255         clk_gpu_dvfs_node = clk_get_dvfs_node("clk_gpu");
256         if (clk_gpu_dvfs_node)
257                 clk_enable_dvfs(clk_gpu_dvfs_node);
258
259         clk_ddr_dvfs_node = clk_get_dvfs_node("clk_ddr");
260         if (clk_ddr_dvfs_node)
261                 clk_enable_dvfs(clk_ddr_dvfs_node);
262
263         cluster_cpus_freq_dvfs_init(0, "clk_core_b");
264         cluster_cpus_freq_dvfs_init(1, "clk_core_l");
265
266         cpufreq_register_notifier(&notifier_policy_block,
267                                   CPUFREQ_POLICY_NOTIFIER);
268
269         pr_info("version " VERSION ", suspend freq %d %d MHz\n",
270                 suspend_freq[0] / 1000, suspend_freq[1] / 1000);
271         return 0;
272 }
273
274 static int rockchip_bl_cpufreq_init(struct cpufreq_policy *policy)
275 {
276         static int cpu0_err;
277         u32 cur_cluster = cpu_to_cluster(policy->cpu);
278
279         if (policy->cpu == 0)
280                 cpu0_err = rockchip_bl_cpufreq_init_cpu0(policy);
281         if (cpu0_err)
282                 return cpu0_err;
283
284         cluster_policy_cpu[cur_cluster] = policy->cpu;
285
286         /* set freq min max */
287         cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
288         /* sys nod */
289         cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
290
291         if (cur_cluster < RK_MAX_CLUSTERS)
292                 cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
293
294         policy->cur = clk_get_rate(clk_cpu_dvfs_node[cur_cluster]->clk) / 1000;
295
296         /* make ondemand default sampling_rate to 40000 */
297         policy->cpuinfo.transition_latency = 40 * NSEC_PER_USEC;
298
299         return 0;
300 }
301
302 static int rockchip_bl_cpufreq_exit(struct cpufreq_policy *policy)
303 {
304         u32 cur_cluster = cpu_to_cluster(policy->cpu);
305
306         if (policy->cpu == 0) {
307                 cpufreq_unregister_notifier(&notifier_policy_block,
308                                             CPUFREQ_POLICY_NOTIFIER);
309         }
310         cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
311         clk_put_dvfs_node(clk_cpu_dvfs_node[cur_cluster]);
312
313         return 0;
314 }
315
316 static struct freq_attr *rockchip_bl_cpufreq_attr[] = {
317         &cpufreq_freq_attr_scaling_available_freqs,
318         NULL,
319 };
320
321 #ifdef CONFIG_CHARGER_DISPLAY
322 extern int rk_get_system_battery_capacity(void);
323 #else
324 static int rk_get_system_battery_capacity(void)
325 {
326         return 100;
327 }
328 #endif
329
330 static unsigned int
331 rockchip_bl_cpufreq_scale_limit(unsigned int target_freq,
332                                 struct cpufreq_policy *policy, bool is_private)
333 {
334         bool is_ondemand = cpufreq_is_ondemand(policy);
335         u32 cur_cluster = cpu_to_cluster(policy->cpu);
336
337         if (!is_ondemand)
338                 return target_freq;
339
340         if (is_booting) {
341                 s64 boottime_ms = ktime_to_ms(ktime_get_boottime());
342
343                 if (boottime_ms > 60 * MSEC_PER_SEC) {
344                         is_booting = false;
345                 } else if (target_freq > low_battery_freq[cur_cluster] &&
346                            rk_get_system_battery_capacity() <=
347                            low_battery_capacity) {
348                         target_freq = low_battery_freq[cur_cluster];
349                 }
350         }
351
352         return target_freq;
353 }
354
355 static int rockchip_bl_cpufreq_target(struct cpufreq_policy *policy,
356                                       unsigned int target_freq,
357                                       unsigned int relation)
358 {
359         unsigned int i, new_freq = target_freq, new_rate, cur_rate;
360         int ret = 0;
361         bool is_private;
362         u32 cur_cluster = cpu_to_cluster(policy->cpu);
363
364         if (!freq_table[cur_cluster]) {
365                 FREQ_ERR("no freq table!\n");
366                 return -EINVAL;
367         }
368
369         mutex_lock(&cpufreq_mutex);
370
371         is_private = relation & CPUFREQ_PRIVATE;
372         relation &= ~CPUFREQ_PRIVATE;
373
374         if ((relation & ENABLE_FURTHER_CPUFREQ) &&
375             no_cpufreq_access[cur_cluster])
376                 no_cpufreq_access[cur_cluster]--;
377         if (no_cpufreq_access[cur_cluster]) {
378                 FREQ_LOG("denied access to %s as it is disabled temporarily\n",
379                          __func__);
380                 ret = -EINVAL;
381                 goto out;
382         }
383         if (relation & DISABLE_FURTHER_CPUFREQ)
384                 no_cpufreq_access[cur_cluster]++;
385         relation &= ~MASK_FURTHER_CPUFREQ;
386
387         ret = cpufreq_frequency_table_target(policy, freq_table[cur_cluster],
388                                              target_freq, relation, &i);
389         if (ret) {
390                 FREQ_ERR("no freq match for %d(ret=%d)\n", target_freq, ret);
391                 goto out;
392         }
393         new_freq = freq_table[cur_cluster][i].frequency;
394         if (!no_cpufreq_access[cur_cluster])
395                 new_freq =
396                     rockchip_bl_cpufreq_scale_limit(new_freq, policy,
397                                                     is_private);
398
399         new_rate = new_freq * 1000;
400         cur_rate = dvfs_clk_get_rate(clk_cpu_dvfs_node[cur_cluster]);
401         FREQ_LOG("req = %7u new = %7u (was = %7u)\n", target_freq,
402                  new_freq, cur_rate / 1000);
403         if (new_rate == cur_rate)
404                 goto out;
405         ret = dvfs_clk_set_rate(clk_cpu_dvfs_node[cur_cluster], new_rate);
406
407 out:
408         FREQ_DBG("set freq (%7u) end, ret %d\n", new_freq, ret);
409         mutex_unlock(&cpufreq_mutex);
410         return ret;
411 }
412
413 static int rockchip_bl_cpufreq_pm_notifier_event(struct notifier_block *this,
414                                                  unsigned long event, void *ptr)
415 {
416         int ret = NOTIFY_DONE;
417         int i;
418
419         for (i = 0; i < RK_MAX_CLUSTERS; i++) {
420                 struct cpufreq_policy *policy =
421                         cpufreq_cpu_get(cluster_policy_cpu[i]);
422
423                 if (!policy)
424                         return ret;
425
426                 if (!cpufreq_is_ondemand(policy))
427                         goto out;
428
429                 switch (event) {
430                 case PM_SUSPEND_PREPARE:
431                         policy->cur++;
432                         ret = cpufreq_driver_target(policy, suspend_freq[i],
433                                                     DISABLE_FURTHER_CPUFREQ |
434                                                     CPUFREQ_RELATION_H);
435                         if (ret < 0) {
436                                 ret = NOTIFY_BAD;
437                                 goto out;
438                         }
439                         ret = NOTIFY_OK;
440                         break;
441                 case PM_POST_RESTORE:
442                 case PM_POST_SUSPEND:
443                         /* if (target_freq == policy->cur) then
444                            cpufreq_driver_target will return, and
445                            our target will not be called, it casue
446                            ENABLE_FURTHER_CPUFREQ flag invalid,
447                            avoid that. */
448                         policy->cur++;
449                         cpufreq_driver_target(policy, suspend_freq[i],
450                                               ENABLE_FURTHER_CPUFREQ |
451                                               CPUFREQ_RELATION_H);
452                         ret = NOTIFY_OK;
453                         break;
454                 }
455 out:
456                 cpufreq_cpu_put(policy);
457         }
458         return ret;
459 }
460
461 static struct notifier_block rockchip_bl_cpufreq_pm_notifier = {
462         .notifier_call = rockchip_bl_cpufreq_pm_notifier_event,
463 };
464
465 static int rockchip_bl_cpufreq_reboot_limit_freq(void)
466 {
467         struct regulator *regulator;
468         int volt = 0;
469         u32 rate;
470         int i;
471
472         dvfs_disable_temp_limit();
473
474         for (i = 0; i < RK_MAX_CLUSTERS; i++) {
475                 dvfs_clk_enable_limit(clk_cpu_dvfs_node[i],
476                                       1000 * suspend_freq[i],
477                                       1000 * suspend_freq[i]);
478                 rate = dvfs_clk_get_rate(clk_cpu_dvfs_node[i]);
479         }
480
481         regulator = dvfs_get_regulator("vdd_arm");
482         if (regulator)
483                 volt = regulator_get_voltage(regulator);
484         else
485                 pr_info("get arm regulator failed\n");
486         pr_info("reboot set cluster0 rate=%lu, cluster1 rate=%lu, volt=%d\n",
487                 dvfs_clk_get_rate(clk_cpu_dvfs_node[0]),
488                 dvfs_clk_get_rate(clk_cpu_dvfs_node[1]), volt);
489
490         return 0;
491 }
492
493 static int rockchip_bl_cpufreq_reboot_notifier_event(struct notifier_block
494                                                      *this, unsigned long event,
495                                                      void *ptr)
496 {
497         rockchip_set_system_status(SYS_STATUS_REBOOT);
498         rockchip_bl_cpufreq_reboot_limit_freq();
499
500         return NOTIFY_OK;
501 };
502
503 static struct notifier_block rockchip_bl_cpufreq_reboot_notifier = {
504         .notifier_call = rockchip_bl_cpufreq_reboot_notifier_event,
505 };
506
507 static struct cpufreq_driver rockchip_bl_cpufreq_driver = {
508         .flags = CPUFREQ_CONST_LOOPS,
509         .verify = rockchip_bl_cpufreq_verify,
510         .target = rockchip_bl_cpufreq_target,
511         .get = rockchip_bl_cpufreq_get_rate,
512         .init = rockchip_bl_cpufreq_init,
513         .exit = rockchip_bl_cpufreq_exit,
514         .name = "rockchip-bl",
515         .have_governor_per_policy = true,
516         .attr = rockchip_bl_cpufreq_attr,
517 };
518
519 static const struct of_device_id rockchip_bl_cpufreq_match[] = {
520         {
521                 .compatible = "rockchip,rk3368-cpufreq",
522         },
523         {},
524 };
525 MODULE_DEVICE_TABLE(of, rockchip_bl_cpufreq_match);
526
527 static int rockchip_bl_cpufreq_probe(struct platform_device *pdev)
528 {
529         struct device_node *np;
530         struct regmap *grf_regmap;
531         unsigned int big_bits, litt_bits;
532         int ret;
533
534         np = pdev->dev.of_node;
535         if (!np)
536                 return -ENODEV;
537
538         grf_regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
539         if (IS_ERR(grf_regmap)) {
540                 FREQ_ERR("Cpufreq couldn't find grf regmap\n");
541                 return PTR_ERR(grf_regmap);
542         }
543         ret = regmap_read(grf_regmap, RK3368_GRF_CPU_CON(3), &big_bits);
544         if (ret != 0) {
545                 FREQ_ERR("Cpufreq couldn't read to GRF\n");
546                 return -1;
547         }
548         ret = regmap_read(grf_regmap, RK3368_GRF_CPU_CON(1), &litt_bits);
549         if (ret != 0) {
550                 FREQ_ERR("Cpufreq couldn't read to GRF\n");
551                 return -1;
552         }
553
554         big_bits = (big_bits >> 8) & 0x03;
555         litt_bits = (litt_bits >> 8) & 0x03;
556
557         if (big_bits == 0x01 && litt_bits == 0x00)
558                 big_little = 1;
559         else if (big_bits == 0x0 && litt_bits == 0x01)
560                 big_little = 0;
561         pr_info("boot from %d\n", big_little);
562
563         register_reboot_notifier(&rockchip_bl_cpufreq_reboot_notifier);
564         register_pm_notifier(&rockchip_bl_cpufreq_pm_notifier);
565
566         return cpufreq_register_driver(&rockchip_bl_cpufreq_driver);
567 }
568
569 static int rockchip_bl_cpufreq_remove(struct platform_device *pdev)
570 {
571         cpufreq_unregister_driver(&rockchip_bl_cpufreq_driver);
572         return 0;
573 }
574
575 static struct platform_driver rockchip_bl_cpufreq_platdrv = {
576         .driver = {
577                 .name   = "rockchip-bl-cpufreq",
578                 .owner  = THIS_MODULE,
579                 .of_match_table = rockchip_bl_cpufreq_match,
580         },
581         .probe          = rockchip_bl_cpufreq_probe,
582         .remove         = rockchip_bl_cpufreq_remove,
583 };
584
585 module_platform_driver(rockchip_bl_cpufreq_platdrv);
586
587 MODULE_AUTHOR("Xiao Feng <xf@rock-chips.com>");
588 MODULE_LICENSE("GPL");