cpufreq: rockchip_big_little: check cpufreq_cpu_get return value
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / rockchip_big_little.c
1 /*
2  * Copyright (C) 2015 Fuzhou Rockchip Electronics Co., Ltd
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/clk.h>
18 #include <linux/cpufreq.h>
19 #include <linux/err.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/init.h>
22 #include <linux/reboot.h>
23 #include <linux/suspend.h>
24 #include <linux/tick.h>
25 #include <linux/workqueue.h>
26 #include <linux/delay.h>
27 #include <linux/regulator/consumer.h>
28 #include <linux/fs.h>
29 #include <linux/miscdevice.h>
30 #include <linux/string.h>
31 #include <linux/rockchip/cpu.h>
32 #include <linux/rockchip/dvfs.h>
33 #include <asm/smp_plat.h>
34 #include <asm/unistd.h>
35 #include <linux/uaccess.h>
36 #include <asm/system_misc.h>
37 #include <linux/cpu.h>
38 #include <linux/of.h>
39 #include <linux/mfd/syscon.h>
40 #include <linux/regmap.h>
41 #include <linux/rockchip/common.h>
42 #include <dt-bindings/clock/rk_system_status.h>
43 #include <linux/platform_device.h>
44 #include <linux/module.h>
45 #include "../../../drivers/clk/rockchip/clk-pd.h"
46
47 #define RK3368_GRF_CPU_CON(n) (0x500 + 4*n)
48
49 #define VERSION "1.0"
50 #define RK_MAX_CLUSTERS 2
51
52 #ifdef DEBUG
53 #define FREQ_DBG(fmt, args...) pr_debug(fmt, ## args)
54 #define FREQ_LOG(fmt, args...) pr_debug(fmt, ## args)
55 #else
56 #define FREQ_DBG(fmt, args...) do {} while (0)
57 #define FREQ_LOG(fmt, args...) do {} while (0)
58 #endif
59 #define FREQ_ERR(fmt, args...) pr_err(fmt, ## args)
60
61 /* Frequency table index must be sequential starting at 0 */
62 static struct cpufreq_frequency_table default_freq_table[] = {
63         {.frequency = 312 * 1000,       .index = 875 * 1000},
64         {.frequency = 504 * 1000,       .index = 925 * 1000},
65         {.frequency = 816 * 1000,       .index = 975 * 1000},
66         {.frequency = 1008 * 1000,      .index = 1075 * 1000},
67         {.frequency = 1200 * 1000,      .index = 1150 * 1000},
68         {.frequency = 1416 * 1000,      .index = 1250 * 1000},
69         {.frequency = 1608 * 1000,      .index = 1350 * 1000},
70         {.frequency = CPUFREQ_TABLE_END},
71 };
72
73 static struct cpufreq_frequency_table *freq_table[RK_MAX_CLUSTERS + 1];
74 /*********************************************************/
75 /* additional symantics for "relation" in cpufreq with pm */
76 #define DISABLE_FURTHER_CPUFREQ         0x10
77 #define ENABLE_FURTHER_CPUFREQ          0x20
78 #define MASK_FURTHER_CPUFREQ            0x30
79 /* With 0x00(NOCHANGE), it depends on the previous "further" status */
80 #define CPUFREQ_PRIVATE                 0x100
81 static unsigned int no_cpufreq_access[RK_MAX_CLUSTERS] = { 0 };
82 static unsigned int suspend_freq[RK_MAX_CLUSTERS] = { 816 * 1000, 816 * 1000 };
83 static unsigned int suspend_volt = 1100000;
84 static unsigned int low_battery_freq[RK_MAX_CLUSTERS] = { 600 * 1000,
85         600 * 1000 };
86 static unsigned int low_battery_capacity = 5;
87 static bool is_booting = true;
88 static DEFINE_MUTEX(cpufreq_mutex);
89 static struct dvfs_node *clk_cpu_dvfs_node[RK_MAX_CLUSTERS];
90 static struct dvfs_node *clk_gpu_dvfs_node;
91 static struct dvfs_node *clk_ddr_dvfs_node;
92 static u32 cluster_policy_cpu[RK_MAX_CLUSTERS];
93 static unsigned int big_little = 1;
94
95 /*******************************************************/
96 static inline int cpu_to_cluster(int cpu)
97 {
98         int id = topology_physical_package_id(cpu);
99         if (id < 0)
100                 id = 0;
101         return id;
102 }
103
104 static unsigned int rockchip_bl_cpufreq_get_rate(unsigned int cpu)
105 {
106         u32 cur_cluster = cpu_to_cluster(cpu);
107
108         if (clk_cpu_dvfs_node[cur_cluster])
109                 return clk_get_rate(clk_cpu_dvfs_node[cur_cluster]->clk) / 1000;
110
111         return 0;
112 }
113
114 static bool cpufreq_is_ondemand(struct cpufreq_policy *policy)
115 {
116         char c = 0;
117
118         if (policy && policy->governor)
119                 c = policy->governor->name[0];
120         return (c == 'o' || c == 'i' || c == 'c' || c == 'h');
121 }
122
123 static unsigned int get_freq_from_table(unsigned int max_freq,
124                                         unsigned int cluster)
125 {
126         unsigned int i;
127         unsigned int target_freq = 0;
128
129         for (i = 0; freq_table[cluster][i].frequency != CPUFREQ_TABLE_END;
130              i++) {
131                 unsigned int freq = freq_table[cluster][i].frequency;
132
133                 if (freq <= max_freq && target_freq < freq)
134                         target_freq = freq;
135         }
136         if (!target_freq)
137                 target_freq = max_freq;
138         return target_freq;
139 }
140
141 static int rockchip_bl_cpufreq_notifier_policy(struct notifier_block *nb,
142                                                unsigned long val,
143                                                void *data)
144 {
145         static unsigned int min_rate = 0, max_rate = -1;
146         struct cpufreq_policy *policy = data;
147         u32 cur_cluster = cpu_to_cluster(policy->cpu);
148
149         if (val != CPUFREQ_ADJUST)
150                 return 0;
151
152         if (cpufreq_is_ondemand(policy)) {
153                 FREQ_DBG("queue work\n");
154                 dvfs_clk_enable_limit(clk_cpu_dvfs_node[cur_cluster],
155                                       min_rate, max_rate);
156         } else {
157                 FREQ_DBG("cancel work\n");
158                 dvfs_clk_get_limit(clk_cpu_dvfs_node[cur_cluster],
159                                    &min_rate, &max_rate);
160         }
161
162         return 0;
163 }
164
165 static struct notifier_block notifier_policy_block = {
166         .notifier_call = rockchip_bl_cpufreq_notifier_policy
167 };
168
169 static int rockchip_bl_cpufreq_verify(struct cpufreq_policy *policy)
170 {
171         u32 cur_cluster = cpu_to_cluster(policy->cpu);
172
173         if (!freq_table[cur_cluster])
174                 return -EINVAL;
175         return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]);
176 }
177
178 static int clk_node_get_cluster_id(struct clk *clk)
179 {
180         int i;
181
182         for (i = 0; i < RK_MAX_CLUSTERS; i++) {
183                 if (clk_cpu_dvfs_node[i]->clk == clk)
184                         return i;
185         }
186         return 0;
187 }
188
189 static int rockchip_bl_cpufreq_scale_rate_for_dvfs(struct clk *clk,
190                                                    unsigned long rate)
191 {
192         int ret;
193         struct cpufreq_freqs freqs;
194         struct cpufreq_policy *policy;
195         u32 cur_cluster;
196
197         cur_cluster = clk_node_get_cluster_id(clk);
198         policy = cpufreq_cpu_get(cluster_policy_cpu[cur_cluster]);
199         if (!policy)
200                 return 0;
201
202         freqs.new = rate / 1000;
203         freqs.old = clk_get_rate(clk) / 1000;
204
205         cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
206
207         FREQ_DBG("cpufreq_scale_rate_for_dvfs(%lu)\n", rate);
208
209         ret = clk_set_rate(clk, rate);
210
211         freqs.new = clk_get_rate(clk) / 1000;
212         /* notifiers */
213         cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
214
215         cpufreq_cpu_put(policy);
216         return ret;
217 }
218
219 static int cluster_cpus_freq_dvfs_init(u32 cluster_id, char *dvfs_name)
220 {
221         clk_cpu_dvfs_node[cluster_id] = clk_get_dvfs_node(dvfs_name);
222
223         if (!clk_cpu_dvfs_node[cluster_id]) {
224                 FREQ_ERR("%s:cluster_id=%d,get dvfs err\n",
225                          __func__, cluster_id);
226                 return -EINVAL;
227         }
228         dvfs_clk_register_set_rate_callback(
229                 clk_cpu_dvfs_node[cluster_id],
230                 rockchip_bl_cpufreq_scale_rate_for_dvfs);
231         freq_table[cluster_id] =
232                 dvfs_get_freq_volt_table(clk_cpu_dvfs_node[cluster_id]);
233         if (freq_table[cluster_id] == NULL) {
234                 freq_table[cluster_id] = default_freq_table;
235         } else {
236                 int v = INT_MAX;
237                 int i;
238
239                 for (i = 0; freq_table[cluster_id][i].frequency !=
240                      CPUFREQ_TABLE_END; i++) {
241                         if (freq_table[cluster_id][i].index >= suspend_volt &&
242                             v > freq_table[cluster_id][i].index) {
243                                 suspend_freq[cluster_id] =
244                                         freq_table[cluster_id][i].frequency;
245                                 v = freq_table[cluster_id][i].index;
246                         }
247                 }
248         }
249         low_battery_freq[cluster_id] =
250                 get_freq_from_table(low_battery_freq[cluster_id], cluster_id);
251         clk_enable_dvfs(clk_cpu_dvfs_node[cluster_id]);
252         return 0;
253 }
254
255 static int rockchip_bl_cpufreq_init_cpu0(struct cpufreq_policy *policy)
256 {
257         clk_gpu_dvfs_node = clk_get_dvfs_node("clk_gpu");
258         if (clk_gpu_dvfs_node)
259                 clk_enable_dvfs(clk_gpu_dvfs_node);
260
261         clk_ddr_dvfs_node = clk_get_dvfs_node("clk_ddr");
262         if (clk_ddr_dvfs_node)
263                 clk_enable_dvfs(clk_ddr_dvfs_node);
264
265         cluster_cpus_freq_dvfs_init(0, "clk_core_b");
266         cluster_cpus_freq_dvfs_init(1, "clk_core_l");
267
268         cpufreq_register_notifier(&notifier_policy_block,
269                                   CPUFREQ_POLICY_NOTIFIER);
270
271         pr_info("version " VERSION ", suspend freq %d %d MHz\n",
272                 suspend_freq[0] / 1000, suspend_freq[1] / 1000);
273         return 0;
274 }
275
276 static int rockchip_bl_cpufreq_init(struct cpufreq_policy *policy)
277 {
278         static int cpu0_err;
279         u32 cur_cluster = cpu_to_cluster(policy->cpu);
280
281         if (policy->cpu == 0)
282                 cpu0_err = rockchip_bl_cpufreq_init_cpu0(policy);
283         if (cpu0_err)
284                 return cpu0_err;
285
286         cluster_policy_cpu[cur_cluster] = policy->cpu;
287
288         /* set freq min max */
289         cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
290         /* sys nod */
291         cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
292
293         if (cur_cluster < RK_MAX_CLUSTERS)
294                 cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
295
296         policy->cur = clk_get_rate(clk_cpu_dvfs_node[cur_cluster]->clk) / 1000;
297
298         /* make ondemand default sampling_rate to 40000 */
299         policy->cpuinfo.transition_latency = 40 * NSEC_PER_USEC;
300
301         return 0;
302 }
303
304 static int rockchip_bl_cpufreq_exit(struct cpufreq_policy *policy)
305 {
306         u32 cur_cluster = cpu_to_cluster(policy->cpu);
307
308         if (policy->cpu == 0) {
309                 cpufreq_unregister_notifier(&notifier_policy_block,
310                                             CPUFREQ_POLICY_NOTIFIER);
311         }
312         cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
313         clk_put_dvfs_node(clk_cpu_dvfs_node[cur_cluster]);
314
315         return 0;
316 }
317
318 static struct freq_attr *rockchip_bl_cpufreq_attr[] = {
319         &cpufreq_freq_attr_scaling_available_freqs,
320         NULL,
321 };
322
323 #ifdef CONFIG_CHARGER_DISPLAY
324 extern int rk_get_system_battery_capacity(void);
325 #else
326 static int rk_get_system_battery_capacity(void)
327 {
328         return 100;
329 }
330 #endif
331
332 static unsigned int
333 rockchip_bl_cpufreq_scale_limit(unsigned int target_freq,
334                                 struct cpufreq_policy *policy, bool is_private)
335 {
336         bool is_ondemand = cpufreq_is_ondemand(policy);
337         u32 cur_cluster = cpu_to_cluster(policy->cpu);
338
339         if (!is_ondemand)
340                 return target_freq;
341
342         if (is_booting) {
343                 s64 boottime_ms = ktime_to_ms(ktime_get_boottime());
344
345                 if (boottime_ms > 60 * MSEC_PER_SEC) {
346                         is_booting = false;
347                 } else if (target_freq > low_battery_freq[cur_cluster] &&
348                            rk_get_system_battery_capacity() <=
349                            low_battery_capacity) {
350                         target_freq = low_battery_freq[cur_cluster];
351                 }
352         }
353
354         return target_freq;
355 }
356
357 static int rockchip_bl_cpufreq_target(struct cpufreq_policy *policy,
358                                       unsigned int target_freq,
359                                       unsigned int relation)
360 {
361         unsigned int i, new_freq = target_freq, new_rate, cur_rate;
362         int ret = 0;
363         bool is_private;
364         u32 cur_cluster = cpu_to_cluster(policy->cpu);
365
366         if (!freq_table[cur_cluster]) {
367                 FREQ_ERR("no freq table!\n");
368                 return -EINVAL;
369         }
370
371         mutex_lock(&cpufreq_mutex);
372
373         is_private = relation & CPUFREQ_PRIVATE;
374         relation &= ~CPUFREQ_PRIVATE;
375
376         if ((relation & ENABLE_FURTHER_CPUFREQ) &&
377             no_cpufreq_access[cur_cluster])
378                 no_cpufreq_access[cur_cluster]--;
379         if (no_cpufreq_access[cur_cluster]) {
380                 FREQ_LOG("denied access to %s as it is disabled temporarily\n",
381                          __func__);
382                 ret = -EINVAL;
383                 goto out;
384         }
385         if (relation & DISABLE_FURTHER_CPUFREQ)
386                 no_cpufreq_access[cur_cluster]++;
387         relation &= ~MASK_FURTHER_CPUFREQ;
388
389         ret = cpufreq_frequency_table_target(policy, freq_table[cur_cluster],
390                                              target_freq, relation, &i);
391         if (ret) {
392                 FREQ_ERR("no freq match for %d(ret=%d)\n", target_freq, ret);
393                 goto out;
394         }
395         new_freq = freq_table[cur_cluster][i].frequency;
396         if (!no_cpufreq_access[cur_cluster])
397                 new_freq =
398                     rockchip_bl_cpufreq_scale_limit(new_freq, policy,
399                                                     is_private);
400
401         new_rate = new_freq * 1000;
402         cur_rate = dvfs_clk_get_rate(clk_cpu_dvfs_node[cur_cluster]);
403         FREQ_LOG("req = %7u new = %7u (was = %7u)\n", target_freq,
404                  new_freq, cur_rate / 1000);
405         if (new_rate == cur_rate)
406                 goto out;
407         ret = dvfs_clk_set_rate(clk_cpu_dvfs_node[cur_cluster], new_rate);
408
409 out:
410         FREQ_DBG("set freq (%7u) end, ret %d\n", new_freq, ret);
411         mutex_unlock(&cpufreq_mutex);
412         return ret;
413 }
414
415 static int rockchip_bl_cpufreq_pm_notifier_event(struct notifier_block *this,
416                                                  unsigned long event, void *ptr)
417 {
418         int ret = NOTIFY_DONE;
419         int i;
420
421         for (i = 0; i < RK_MAX_CLUSTERS; i++) {
422                 struct cpufreq_policy *policy =
423                         cpufreq_cpu_get(cluster_policy_cpu[i]);
424
425                 if (!policy)
426                         return ret;
427
428                 if (!cpufreq_is_ondemand(policy))
429                         goto out;
430
431                 switch (event) {
432                 case PM_SUSPEND_PREPARE:
433                         policy->cur++;
434                         ret = cpufreq_driver_target(policy, suspend_freq[i],
435                                                     DISABLE_FURTHER_CPUFREQ |
436                                                     CPUFREQ_RELATION_H);
437                         if (ret < 0) {
438                                 ret = NOTIFY_BAD;
439                                 goto out;
440                         }
441                         ret = NOTIFY_OK;
442                         break;
443                 case PM_POST_RESTORE:
444                 case PM_POST_SUSPEND:
445                         /* if (target_freq == policy->cur) then
446                            cpufreq_driver_target will return, and
447                            our target will not be called, it casue
448                            ENABLE_FURTHER_CPUFREQ flag invalid,
449                            avoid that. */
450                         policy->cur++;
451                         cpufreq_driver_target(policy, suspend_freq[i],
452                                               ENABLE_FURTHER_CPUFREQ |
453                                               CPUFREQ_RELATION_H);
454                         ret = NOTIFY_OK;
455                         break;
456                 }
457 out:
458                 cpufreq_cpu_put(policy);
459         }
460         return ret;
461 }
462
463 static struct notifier_block rockchip_bl_cpufreq_pm_notifier = {
464         .notifier_call = rockchip_bl_cpufreq_pm_notifier_event,
465 };
466
467 static int rockchip_bl_cpufreq_reboot_limit_freq(void)
468 {
469         struct regulator *regulator;
470         int volt = 0;
471         u32 rate;
472         int i;
473
474         dvfs_disable_temp_limit();
475
476         for (i = 0; i < RK_MAX_CLUSTERS; i++) {
477                 dvfs_clk_enable_limit(clk_cpu_dvfs_node[i],
478                                       1000 * suspend_freq[i],
479                                       1000 * suspend_freq[i]);
480                 rate = dvfs_clk_get_rate(clk_cpu_dvfs_node[i]);
481         }
482
483         regulator = dvfs_get_regulator("vdd_arm");
484         if (regulator)
485                 volt = regulator_get_voltage(regulator);
486         else
487                 pr_info("get arm regulator failed\n");
488         pr_info("reboot set cluster0 rate=%lu, cluster1 rate=%lu, volt=%d\n",
489                 dvfs_clk_get_rate(clk_cpu_dvfs_node[0]),
490                 dvfs_clk_get_rate(clk_cpu_dvfs_node[1]), volt);
491
492         return 0;
493 }
494
495 static int rockchip_bl_cpufreq_reboot_notifier_event(struct notifier_block
496                                                      *this, unsigned long event,
497                                                      void *ptr)
498 {
499         rockchip_set_system_status(SYS_STATUS_REBOOT);
500         rockchip_bl_cpufreq_reboot_limit_freq();
501
502         return NOTIFY_OK;
503 };
504
505 static struct notifier_block rockchip_bl_cpufreq_reboot_notifier = {
506         .notifier_call = rockchip_bl_cpufreq_reboot_notifier_event,
507 };
508
509 static struct cpufreq_driver rockchip_bl_cpufreq_driver = {
510         .flags = CPUFREQ_CONST_LOOPS,
511         .verify = rockchip_bl_cpufreq_verify,
512         .target = rockchip_bl_cpufreq_target,
513         .get = rockchip_bl_cpufreq_get_rate,
514         .init = rockchip_bl_cpufreq_init,
515         .exit = rockchip_bl_cpufreq_exit,
516         .name = "rockchip-bl",
517         .have_governor_per_policy = true,
518         .attr = rockchip_bl_cpufreq_attr,
519 };
520
521 static const struct of_device_id rockchip_bl_cpufreq_match[] = {
522         {
523                 .compatible = "rockchip,rk3368-cpufreq",
524         },
525         {},
526 };
527 MODULE_DEVICE_TABLE(of, rockchip_bl_cpufreq_match);
528
529 static int rockchip_bl_cpufreq_probe(struct platform_device *pdev)
530 {
531         struct device_node *np;
532         struct regmap *grf_regmap;
533         unsigned int big_bits, litt_bits;
534         int ret;
535
536         np = pdev->dev.of_node;
537         if (!np)
538                 return -ENODEV;
539
540         grf_regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
541         if (IS_ERR(grf_regmap)) {
542                 FREQ_ERR("Cpufreq couldn't find grf regmap\n");
543                 return PTR_ERR(grf_regmap);
544         }
545         ret = regmap_read(grf_regmap, RK3368_GRF_CPU_CON(3), &big_bits);
546         if (ret != 0) {
547                 FREQ_ERR("Cpufreq couldn't read to GRF\n");
548                 return -1;
549         }
550         ret = regmap_read(grf_regmap, RK3368_GRF_CPU_CON(1), &litt_bits);
551         if (ret != 0) {
552                 FREQ_ERR("Cpufreq couldn't read to GRF\n");
553                 return -1;
554         }
555
556         big_bits = (big_bits >> 8) & 0x03;
557         litt_bits = (litt_bits >> 8) & 0x03;
558
559         if (big_bits == 0x01 && litt_bits == 0x00)
560                 big_little = 1;
561         else if (big_bits == 0x0 && litt_bits == 0x01)
562                 big_little = 0;
563         pr_info("boot from %d\n", big_little);
564
565         register_reboot_notifier(&rockchip_bl_cpufreq_reboot_notifier);
566         register_pm_notifier(&rockchip_bl_cpufreq_pm_notifier);
567
568         return cpufreq_register_driver(&rockchip_bl_cpufreq_driver);
569 }
570
571 static int rockchip_bl_cpufreq_remove(struct platform_device *pdev)
572 {
573         cpufreq_unregister_driver(&rockchip_bl_cpufreq_driver);
574         return 0;
575 }
576
577 static struct platform_driver rockchip_bl_cpufreq_platdrv = {
578         .driver = {
579                 .name   = "rockchip-bl-cpufreq",
580                 .owner  = THIS_MODULE,
581                 .of_match_table = rockchip_bl_cpufreq_match,
582         },
583         .probe          = rockchip_bl_cpufreq_probe,
584         .remove         = rockchip_bl_cpufreq_remove,
585 };
586
587 module_platform_driver(rockchip_bl_cpufreq_platdrv);
588
589 MODULE_AUTHOR("Xiao Feng <xf@rock-chips.com>");
590 MODULE_LICENSE("GPL");