rk3368: cpufreq: fix pm_notifier return NOTIFY_BAD
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / rockchip_big_little.c
1 /*
2  * Copyright (C) 2015 Fuzhou Rockchip Electronics Co., Ltd
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/clk.h>
18 #include <linux/cpufreq.h>
19 #include <linux/err.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/init.h>
22 #include <linux/reboot.h>
23 #include <linux/suspend.h>
24 #include <linux/tick.h>
25 #include <linux/workqueue.h>
26 #include <linux/delay.h>
27 #include <linux/regulator/consumer.h>
28 #include <linux/fs.h>
29 #include <linux/miscdevice.h>
30 #include <linux/string.h>
31 #ifdef CONFIG_ROCKCHIP_CPUQUIET
32 #include <linux/cpuquiet.h>
33 #include <linux/pm_qos.h>
34 #endif
35 #include <linux/rockchip/cpu.h>
36 #include <linux/rockchip/dvfs.h>
37 #include <asm/smp_plat.h>
38 #include <asm/unistd.h>
39 #include <linux/uaccess.h>
40 #include <asm/system_misc.h>
41 #include <linux/cpu.h>
42 #include <linux/of.h>
43 #include <linux/mfd/syscon.h>
44 #include <linux/regmap.h>
45 #include <linux/rockchip/common.h>
46 #include <dt-bindings/clock/rk_system_status.h>
47 #include <linux/platform_device.h>
48 #include <linux/module.h>
49 #include "../../../drivers/clk/rockchip/clk-pd.h"
50
51 #define VERSION "1.0"
52 #define MAX_CLUSTERS 2
53 #define B_CLUSTER       0
54 #define L_CLUSTER       1
55
56 #ifdef DEBUG
57 #define FREQ_DBG(fmt, args...) pr_debug(fmt, ## args)
58 #define FREQ_LOG(fmt, args...) pr_debug(fmt, ## args)
59 #else
60 #define FREQ_DBG(fmt, args...) do {} while (0)
61 #define FREQ_LOG(fmt, args...) do {} while (0)
62 #endif
63 #define FREQ_ERR(fmt, args...) pr_err(fmt, ## args)
64
65 static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
66 /*********************************************************/
67 /* additional symantics for "relation" in cpufreq with pm */
68 #define DISABLE_FURTHER_CPUFREQ         0x10
69 #define ENABLE_FURTHER_CPUFREQ          0x20
70 #define MASK_FURTHER_CPUFREQ            0x30
71 /* With 0x00(NOCHANGE), it depends on the previous "further" status */
72 #define CPUFREQ_PRIVATE                 0x100
73 static unsigned int no_cpufreq_access[MAX_CLUSTERS] = { 0 };
74 static unsigned int suspend_freq[MAX_CLUSTERS] = { 816 * 1000, 816 * 1000 };
75 static unsigned int suspend_volt = 1100000;
76 static unsigned int low_battery_freq[MAX_CLUSTERS] = { 600 * 1000,
77         600 * 1000 };
78 static unsigned int low_battery_capacity = 5;
79 static bool is_booting = true;
80 static DEFINE_MUTEX(cpufreq_mutex);
81 static struct dvfs_node *clk_cpu_dvfs_node[MAX_CLUSTERS];
82 static struct dvfs_node *clk_gpu_dvfs_node;
83 static struct dvfs_node *clk_ddr_dvfs_node;
84 static cpumask_var_t cluster_policy_mask[MAX_CLUSTERS];
85
86 #ifdef CONFIG_ROCKCHIP_CPUQUIET
87 static void rockchip_bl_balanced_cpufreq_transition(unsigned int cluster,
88                                                     unsigned int cpu_freq);
89 static struct cpuquiet_governor rockchip_bl_balanced_governor;
90 #endif
91
92 /*******************************************************/
93 static inline int cpu_to_cluster(int cpu)
94 {
95         int id = topology_physical_package_id(cpu);
96         if (id < 0)
97                 id = 0;
98         return id;
99 }
100
101 static unsigned int rockchip_bl_cpufreq_get_rate(unsigned int cpu)
102 {
103         u32 cur_cluster = cpu_to_cluster(cpu);
104
105         if (clk_cpu_dvfs_node[cur_cluster])
106                 return clk_get_rate(clk_cpu_dvfs_node[cur_cluster]->clk) / 1000;
107
108         return 0;
109 }
110
111 static bool cpufreq_is_ondemand(struct cpufreq_policy *policy)
112 {
113         char c = 0;
114
115         if (policy && policy->governor)
116                 c = policy->governor->name[0];
117         return (c == 'o' || c == 'i' || c == 'c' || c == 'h');
118 }
119
120 static unsigned int get_freq_from_table(unsigned int max_freq,
121                                         unsigned int cluster)
122 {
123         unsigned int i;
124         unsigned int target_freq = 0;
125
126         for (i = 0; freq_table[cluster][i].frequency != CPUFREQ_TABLE_END;
127              i++) {
128                 unsigned int freq = freq_table[cluster][i].frequency;
129
130                 if (freq <= max_freq && target_freq < freq)
131                         target_freq = freq;
132         }
133         if (!target_freq)
134                 target_freq = max_freq;
135         return target_freq;
136 }
137
138 static int rockchip_bl_cpufreq_notifier_policy(struct notifier_block *nb,
139                                                unsigned long val,
140                                                void *data)
141 {
142         static unsigned int min_rate = 0, max_rate = -1;
143         struct cpufreq_policy *policy = data;
144         u32 cur_cluster = cpu_to_cluster(policy->cpu);
145
146         if (val != CPUFREQ_ADJUST)
147                 return 0;
148
149         if (cpufreq_is_ondemand(policy)) {
150                 FREQ_DBG("queue work\n");
151                 dvfs_clk_enable_limit(clk_cpu_dvfs_node[cur_cluster],
152                                       min_rate, max_rate);
153         } else {
154                 FREQ_DBG("cancel work\n");
155                 dvfs_clk_get_limit(clk_cpu_dvfs_node[cur_cluster],
156                                    &min_rate, &max_rate);
157         }
158
159         return 0;
160 }
161
162 static struct notifier_block notifier_policy_block = {
163         .notifier_call = rockchip_bl_cpufreq_notifier_policy
164 };
165
166 static int rockchip_bl_cpufreq_verify(struct cpufreq_policy *policy)
167 {
168         u32 cur_cluster = cpu_to_cluster(policy->cpu);
169
170         if (!freq_table[cur_cluster])
171                 return -EINVAL;
172         return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]);
173 }
174
175 static int clk_node_get_cluster_id(struct clk *clk)
176 {
177         int i;
178
179         for (i = 0; i < MAX_CLUSTERS; i++) {
180                 if (clk_cpu_dvfs_node[i]->clk == clk)
181                         return i;
182         }
183         return 0;
184 }
185
186 static int rockchip_bl_cpufreq_scale_rate_for_dvfs(struct clk *clk,
187                                                    unsigned long rate)
188 {
189         int ret;
190         struct cpufreq_freqs freqs;
191         struct cpufreq_policy *policy;
192         u32 cur_cluster, cpu;
193
194         cur_cluster = clk_node_get_cluster_id(clk);
195         cpu = cpumask_first_and(cluster_policy_mask[cur_cluster],
196                 cpu_online_mask);
197         if (cpu >= nr_cpu_ids)
198                 return 0;
199         policy = cpufreq_cpu_get(cpu);
200         if (!policy)
201                 return 0;
202
203         freqs.new = rate / 1000;
204         freqs.old = clk_get_rate(clk) / 1000;
205
206         cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
207
208         FREQ_DBG("cpufreq_scale_rate_for_dvfs(%lu)\n", rate);
209
210         ret = clk_set_rate(clk, rate);
211
212         freqs.new = clk_get_rate(clk) / 1000;
213
214 #ifdef CONFIG_ROCKCHIP_CPUQUIET
215         rockchip_bl_balanced_cpufreq_transition(cur_cluster, freqs.new);
216 #endif
217
218         /* notifiers */
219         cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
220
221         cpufreq_cpu_put(policy);
222         return ret;
223 }
224
225 static int cluster_cpus_freq_dvfs_init(u32 cluster_id, char *dvfs_name)
226 {
227         int v = INT_MAX;
228         int i;
229
230         clk_cpu_dvfs_node[cluster_id] = clk_get_dvfs_node(dvfs_name);
231
232         if (!clk_cpu_dvfs_node[cluster_id]) {
233                 FREQ_ERR("%s:cluster_id=%d,get dvfs err\n",
234                          __func__, cluster_id);
235                 return -EINVAL;
236         }
237         dvfs_clk_register_set_rate_callback(
238                 clk_cpu_dvfs_node[cluster_id],
239                 rockchip_bl_cpufreq_scale_rate_for_dvfs);
240         freq_table[cluster_id] =
241                 dvfs_get_freq_volt_table(clk_cpu_dvfs_node[cluster_id]);
242         if (!freq_table[cluster_id]) {
243                 FREQ_ERR("No freq table for cluster %d\n", cluster_id);
244                 return -EINVAL;
245         }
246
247         for (i = 0; freq_table[cluster_id][i].frequency != CPUFREQ_TABLE_END;
248              i++) {
249                 if (freq_table[cluster_id][i].index >= suspend_volt &&
250                     v > freq_table[cluster_id][i].index) {
251                         suspend_freq[cluster_id] =
252                                 freq_table[cluster_id][i].frequency;
253                         v = freq_table[cluster_id][i].index;
254                 }
255         }
256         low_battery_freq[cluster_id] =
257                 get_freq_from_table(low_battery_freq[cluster_id], cluster_id);
258         clk_enable_dvfs(clk_cpu_dvfs_node[cluster_id]);
259         return 0;
260 }
261
262 static int rockchip_bl_cpufreq_init_cpu0(struct cpufreq_policy *policy)
263 {
264         clk_gpu_dvfs_node = clk_get_dvfs_node("clk_gpu");
265         if (clk_gpu_dvfs_node)
266                 clk_enable_dvfs(clk_gpu_dvfs_node);
267
268         clk_ddr_dvfs_node = clk_get_dvfs_node("clk_ddr");
269         if (clk_ddr_dvfs_node)
270                 clk_enable_dvfs(clk_ddr_dvfs_node);
271
272         cluster_cpus_freq_dvfs_init(B_CLUSTER, "clk_core_b");
273         cluster_cpus_freq_dvfs_init(L_CLUSTER, "clk_core_l");
274
275         cpufreq_register_notifier(&notifier_policy_block,
276                                   CPUFREQ_POLICY_NOTIFIER);
277
278         pr_info("version " VERSION ", suspend freq %d %d MHz\n",
279                 suspend_freq[0] / 1000, suspend_freq[1] / 1000);
280         return 0;
281 }
282
283 static int rockchip_bl_cpufreq_init(struct cpufreq_policy *policy)
284 {
285         static int cpu0_err;
286         u32 cur_cluster = cpu_to_cluster(policy->cpu);
287
288         if (policy->cpu == 0)
289                 cpu0_err = rockchip_bl_cpufreq_init_cpu0(policy);
290         if (cpu0_err)
291                 return cpu0_err;
292
293         /* set freq min max */
294         cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
295         /* sys nod */
296         cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
297
298         if (cur_cluster < MAX_CLUSTERS) {
299                 cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
300                 cpumask_copy(cluster_policy_mask[cur_cluster],
301                              topology_core_cpumask(policy->cpu));
302         }
303
304         policy->cur = clk_get_rate(clk_cpu_dvfs_node[cur_cluster]->clk) / 1000;
305
306         /* make ondemand default sampling_rate to 40000 */
307         policy->cpuinfo.transition_latency = 40 * NSEC_PER_USEC;
308
309         return 0;
310 }
311
312 static int rockchip_bl_cpufreq_exit(struct cpufreq_policy *policy)
313 {
314         u32 cur_cluster = cpu_to_cluster(policy->cpu);
315
316         if (policy->cpu == 0) {
317                 cpufreq_unregister_notifier(&notifier_policy_block,
318                                             CPUFREQ_POLICY_NOTIFIER);
319         }
320         cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
321         clk_put_dvfs_node(clk_cpu_dvfs_node[cur_cluster]);
322
323         return 0;
324 }
325
326 static struct freq_attr *rockchip_bl_cpufreq_attr[] = {
327         &cpufreq_freq_attr_scaling_available_freqs,
328         NULL,
329 };
330
331 #ifdef CONFIG_CHARGER_DISPLAY
332 extern int rk_get_system_battery_capacity(void);
333 #else
334 static int rk_get_system_battery_capacity(void)
335 {
336         return 100;
337 }
338 #endif
339
340 static unsigned int
341 rockchip_bl_cpufreq_scale_limit(unsigned int target_freq,
342                                 struct cpufreq_policy *policy, bool is_private)
343 {
344         bool is_ondemand = cpufreq_is_ondemand(policy);
345         u32 cur_cluster = cpu_to_cluster(policy->cpu);
346
347         if (!is_ondemand)
348                 return target_freq;
349
350         if (is_booting) {
351                 s64 boottime_ms = ktime_to_ms(ktime_get_boottime());
352
353                 if (boottime_ms > 60 * MSEC_PER_SEC) {
354                         is_booting = false;
355                 } else if (target_freq > low_battery_freq[cur_cluster] &&
356                            rk_get_system_battery_capacity() <=
357                            low_battery_capacity) {
358                         target_freq = low_battery_freq[cur_cluster];
359                 }
360         }
361
362         return target_freq;
363 }
364
365 static int rockchip_bl_cpufreq_target(struct cpufreq_policy *policy,
366                                       unsigned int target_freq,
367                                       unsigned int relation)
368 {
369         unsigned int i, new_freq = target_freq, new_rate, cur_rate;
370         int ret = 0;
371         bool is_private;
372         u32 cur_cluster = cpu_to_cluster(policy->cpu);
373
374         if (!freq_table[cur_cluster]) {
375                 FREQ_ERR("no freq table!\n");
376                 return -EINVAL;
377         }
378
379         mutex_lock(&cpufreq_mutex);
380
381         is_private = relation & CPUFREQ_PRIVATE;
382         relation &= ~CPUFREQ_PRIVATE;
383
384         if ((relation & ENABLE_FURTHER_CPUFREQ) &&
385             no_cpufreq_access[cur_cluster])
386                 no_cpufreq_access[cur_cluster]--;
387         if (no_cpufreq_access[cur_cluster]) {
388                 FREQ_LOG("denied access to %s as it is disabled temporarily\n",
389                          __func__);
390                 ret = -EINVAL;
391                 goto out;
392         }
393         if (relation & DISABLE_FURTHER_CPUFREQ)
394                 no_cpufreq_access[cur_cluster]++;
395         relation &= ~MASK_FURTHER_CPUFREQ;
396
397         ret = cpufreq_frequency_table_target(policy, freq_table[cur_cluster],
398                                              target_freq, relation, &i);
399         if (ret) {
400                 FREQ_ERR("no freq match for %d(ret=%d)\n", target_freq, ret);
401                 goto out;
402         }
403         new_freq = freq_table[cur_cluster][i].frequency;
404         if (!no_cpufreq_access[cur_cluster])
405                 new_freq =
406                     rockchip_bl_cpufreq_scale_limit(new_freq, policy,
407                                                     is_private);
408
409         new_rate = new_freq * 1000;
410         cur_rate = dvfs_clk_get_rate(clk_cpu_dvfs_node[cur_cluster]);
411         FREQ_LOG("req = %7u new = %7u (was = %7u)\n", target_freq,
412                  new_freq, cur_rate / 1000);
413         if (new_rate == cur_rate)
414                 goto out;
415         ret = dvfs_clk_set_rate(clk_cpu_dvfs_node[cur_cluster], new_rate);
416
417 out:
418         FREQ_DBG("set freq (%7u) end, ret %d\n", new_freq, ret);
419         mutex_unlock(&cpufreq_mutex);
420         return ret;
421 }
422
423 static int rockchip_bl_cpufreq_pm_notifier_event(struct notifier_block *this,
424                                                  unsigned long event, void *ptr)
425 {
426         int ret = NOTIFY_DONE;
427         int i;
428         struct cpufreq_policy *policy;
429         u32 cpu;
430
431         for (i = 0; i < MAX_CLUSTERS; i++) {
432                 cpu = cpumask_first_and(cluster_policy_mask[i],
433                         cpu_online_mask);
434                 if (cpu >= nr_cpu_ids)
435                         continue;
436                 policy = cpufreq_cpu_get(cpu);
437                 if (!policy)
438                         continue;
439
440                 if (!cpufreq_is_ondemand(policy))
441                         goto out;
442
443                 switch (event) {
444                 case PM_SUSPEND_PREPARE:
445                         policy->cur++;
446                         ret = cpufreq_driver_target(policy, suspend_freq[i],
447                                                     DISABLE_FURTHER_CPUFREQ |
448                                                     CPUFREQ_RELATION_H);
449                         if (ret < 0) {
450                                 ret = NOTIFY_BAD;
451                                 goto out;
452                         }
453                         ret = NOTIFY_OK;
454                         break;
455                 case PM_POST_RESTORE:
456                 case PM_POST_SUSPEND:
457                         /* if (target_freq == policy->cur) then
458                            cpufreq_driver_target will return, and
459                            our target will not be called, it casue
460                            ENABLE_FURTHER_CPUFREQ flag invalid,
461                            avoid that. */
462                         policy->cur++;
463                         cpufreq_driver_target(policy, suspend_freq[i],
464                                               ENABLE_FURTHER_CPUFREQ |
465                                               CPUFREQ_RELATION_H);
466                         ret = NOTIFY_OK;
467                         break;
468                 }
469 out:
470                 cpufreq_cpu_put(policy);
471         }
472
473         return ret;
474 }
475
476 static struct notifier_block rockchip_bl_cpufreq_pm_notifier = {
477         .notifier_call = rockchip_bl_cpufreq_pm_notifier_event,
478 };
479
480 static int rockchip_bl_cpufreq_reboot_limit_freq(void)
481 {
482         struct regulator *regulator;
483         int volt = 0;
484         u32 rate;
485         int i;
486
487         dvfs_disable_temp_limit();
488
489         for (i = 0; i < MAX_CLUSTERS; i++) {
490                 dvfs_clk_enable_limit(clk_cpu_dvfs_node[i],
491                                       1000 * suspend_freq[i],
492                                       1000 * suspend_freq[i]);
493                 rate = dvfs_clk_get_rate(clk_cpu_dvfs_node[i]);
494         }
495
496         regulator = dvfs_get_regulator("vdd_arm");
497         if (regulator)
498                 volt = regulator_get_voltage(regulator);
499         else
500                 pr_info("get arm regulator failed\n");
501         pr_info("reboot set cluster0 rate=%lu, cluster1 rate=%lu, volt=%d\n",
502                 dvfs_clk_get_rate(clk_cpu_dvfs_node[0]),
503                 dvfs_clk_get_rate(clk_cpu_dvfs_node[1]), volt);
504
505         return 0;
506 }
507
508 static int rockchip_bl_cpufreq_reboot_notifier_event(struct notifier_block
509                                                      *this, unsigned long event,
510                                                      void *ptr)
511 {
512         rockchip_set_system_status(SYS_STATUS_REBOOT);
513         rockchip_bl_cpufreq_reboot_limit_freq();
514
515         return NOTIFY_OK;
516 };
517
518 static struct notifier_block rockchip_bl_cpufreq_reboot_notifier = {
519         .notifier_call = rockchip_bl_cpufreq_reboot_notifier_event,
520 };
521
522 static struct cpufreq_driver rockchip_bl_cpufreq_driver = {
523         .flags = CPUFREQ_CONST_LOOPS,
524         .verify = rockchip_bl_cpufreq_verify,
525         .target = rockchip_bl_cpufreq_target,
526         .get = rockchip_bl_cpufreq_get_rate,
527         .init = rockchip_bl_cpufreq_init,
528         .exit = rockchip_bl_cpufreq_exit,
529         .name = "rockchip-bl",
530         .have_governor_per_policy = true,
531         .attr = rockchip_bl_cpufreq_attr,
532 };
533
534 static const struct of_device_id rockchip_bl_cpufreq_match[] = {
535         {
536                 .compatible = "rockchip,rk3368-cpufreq",
537         },
538         {},
539 };
540 MODULE_DEVICE_TABLE(of, rockchip_bl_cpufreq_match);
541
542 static int __init rockchip_bl_cpufreq_probe(struct platform_device *pdev)
543 {
544         int ret, i;
545
546         for (i = 0; i < MAX_CLUSTERS; i++) {
547                 if (!alloc_cpumask_var(&cluster_policy_mask[i], GFP_KERNEL))
548                         return -ENOMEM;
549         }
550
551         register_reboot_notifier(&rockchip_bl_cpufreq_reboot_notifier);
552         register_pm_notifier(&rockchip_bl_cpufreq_pm_notifier);
553
554         ret = cpufreq_register_driver(&rockchip_bl_cpufreq_driver);
555
556 #ifdef CONFIG_ROCKCHIP_CPUQUIET
557         ret = cpuquiet_register_governor(&rockchip_bl_balanced_governor);
558 #endif
559
560         return ret;
561 }
562
563 static int rockchip_bl_cpufreq_remove(struct platform_device *pdev)
564 {
565         int i;
566
567         for (i = 0; i < MAX_CLUSTERS; i++)
568                 free_cpumask_var(cluster_policy_mask[i]);
569         cpufreq_unregister_driver(&rockchip_bl_cpufreq_driver);
570         return 0;
571 }
572
573 static struct platform_driver rockchip_bl_cpufreq_platdrv = {
574         .driver = {
575                 .name   = "rockchip-bl-cpufreq",
576                 .owner  = THIS_MODULE,
577                 .of_match_table = rockchip_bl_cpufreq_match,
578         },
579         .remove         = rockchip_bl_cpufreq_remove,
580 };
581
582 module_platform_driver_probe(rockchip_bl_cpufreq_platdrv,
583                              rockchip_bl_cpufreq_probe);
584
585 MODULE_AUTHOR("Xiao Feng <xf@rock-chips.com>");
586 MODULE_LICENSE("GPL");
587
588 #ifdef CONFIG_ROCKCHIP_CPUQUIET
589 extern struct cpumask hmp_slow_cpu_mask;
590
591 enum cpu_speed_balance {
592         CPU_SPEED_BALANCED,
593         CPU_SPEED_BIASED,
594         CPU_SPEED_SKEWED,
595         CPU_SPEED_BOOST,
596 };
597
598 enum balanced_state {
599         IDLE,
600         DOWN,
601         UP,
602 };
603
604 struct idle_info {
605         u64 idle_last_us;
606         u64 idle_current_us;
607 };
608
609 static u64 idleinfo_timestamp_us;
610 static u64 idleinfo_last_timestamp_us;
611 static DEFINE_PER_CPU(struct idle_info, idleinfo);
612 static DEFINE_PER_CPU(unsigned int, cpu_load);
613
614 static struct timer_list load_timer;
615 static bool load_timer_active;
616
617 /* configurable parameters */
618 static unsigned int  balance_level = 60;
619 static unsigned int  idle_bottom_freq[MAX_CLUSTERS];
620 static unsigned int  idle_top_freq[MAX_CLUSTERS];
621 static unsigned int  cpu_freq[MAX_CLUSTERS];
622 static unsigned long up_delay_jiffies;
623 static unsigned long down_delay_jiffies;
624 static unsigned long last_change_time_jiffies;
625 static unsigned int  load_sample_rate_jiffies = 20 / (MSEC_PER_SEC / HZ);
626 static unsigned int  little_high_load = 80;
627 static unsigned int  little_low_load = 20;
628 static unsigned int  big_low_load = 20;
629 static struct workqueue_struct *rockchip_bl_balanced_wq;
630 static struct delayed_work rockchip_bl_balanced_work;
631 static enum balanced_state rockchip_bl_balanced_state;
632 static struct kobject *rockchip_bl_balanced_kobj;
633 static DEFINE_MUTEX(rockchip_bl_balanced_lock);
634 static bool rockchip_bl_balanced_enable;
635
636 #define GOVERNOR_NAME "bl_balanced"
637
638 static u64 get_idle_us(int cpu)
639 {
640         return get_cpu_idle_time(cpu, NULL, 1 /* io_busy */);
641 }
642
643 static void calculate_load_timer(unsigned long data)
644 {
645         int i;
646         u64 elapsed_time;
647
648         if (!load_timer_active)
649                 return;
650
651         idleinfo_last_timestamp_us = idleinfo_timestamp_us;
652         idleinfo_timestamp_us = ktime_to_us(ktime_get());
653         elapsed_time = idleinfo_timestamp_us - idleinfo_last_timestamp_us;
654
655         for_each_present_cpu(i) {
656                 struct idle_info *iinfo = &per_cpu(idleinfo, i);
657                 unsigned int *load = &per_cpu(cpu_load, i);
658                 u64 idle_time;
659
660                 iinfo->idle_last_us = iinfo->idle_current_us;
661                 iinfo->idle_current_us = get_idle_us(i);
662
663                 idle_time = iinfo->idle_current_us - iinfo->idle_last_us;
664                 idle_time *= 100;
665                 do_div(idle_time, elapsed_time);
666                 if (idle_time > 100)
667                         idle_time = 100;
668                 *load = 100 - idle_time;
669         }
670         mod_timer(&load_timer, jiffies + load_sample_rate_jiffies);
671 }
672
673 static void start_load_timer(void)
674 {
675         int i;
676
677         if (load_timer_active)
678                 return;
679
680         idleinfo_timestamp_us = ktime_to_us(ktime_get());
681         for_each_present_cpu(i) {
682                 struct idle_info *iinfo = &per_cpu(idleinfo, i);
683
684                 iinfo->idle_current_us = get_idle_us(i);
685         }
686         mod_timer(&load_timer, jiffies + load_sample_rate_jiffies);
687
688         load_timer_active = true;
689 }
690
691 static void stop_load_timer(void)
692 {
693         if (!load_timer_active)
694                 return;
695
696         load_timer_active = false;
697         del_timer(&load_timer);
698 }
699
700 static unsigned int get_slowest_cpu(void)
701 {
702         unsigned int cpu = nr_cpu_ids;
703         unsigned long minload = ULONG_MAX;
704         int i;
705
706         for_each_online_cpu(i) {
707                 unsigned int load = per_cpu(cpu_load, i);
708
709                 if ((i > 0) && (minload >= load)) {
710                         cpu = i;
711                         minload = load;
712                 }
713         }
714
715         return cpu;
716 }
717
718 static unsigned int get_offline_big_cpu(void)
719 {
720         struct cpumask big, offline_big;
721
722         cpumask_andnot(&big, cpu_present_mask, &hmp_slow_cpu_mask);
723         cpumask_andnot(&offline_big, &big, cpu_online_mask);
724         return cpumask_first(&offline_big);
725 }
726
727 static unsigned int cpu_highest_speed(void)
728 {
729         unsigned int maxload = 0;
730         int i;
731
732         for_each_online_cpu(i) {
733                 unsigned int load = per_cpu(cpu_load, i);
734
735                 maxload = max(maxload, load);
736         }
737
738         return maxload;
739 }
740
741 static unsigned int count_slow_cpus(unsigned int limit)
742 {
743         unsigned int cnt = 0;
744         int i;
745
746         for_each_online_cpu(i) {
747                 unsigned int load = per_cpu(cpu_load, i);
748
749                 if (load <= limit)
750                         cnt++;
751         }
752
753         return cnt;
754 }
755
756 #define NR_FSHIFT       2
757
758 static unsigned int rt_profile[NR_CPUS] = {
759 /*      1,  2,  3,  4,  5,  6,  7,  8 - on-line cpus target */
760         5,  9, 10, 11, 12, 13, 14,  UINT_MAX
761 };
762
763 static unsigned int nr_run_hysteresis = 2;      /* 0.5 thread */
764 static unsigned int nr_run_last;
765
766 struct runnables_avg_sample {
767         u64 previous_integral;
768         unsigned int avg;
769         bool integral_sampled;
770         u64 prev_timestamp;     /* ns */
771 };
772
773 static DEFINE_PER_CPU(struct runnables_avg_sample, avg_nr_sample);
774
775 static unsigned int get_avg_nr_runnables(void)
776 {
777         unsigned int i, sum = 0;
778         struct runnables_avg_sample *sample;
779         u64 integral, old_integral, delta_integral, delta_time, cur_time;
780
781         cur_time = ktime_to_ns(ktime_get());
782
783         for_each_online_cpu(i) {
784                 sample = &per_cpu(avg_nr_sample, i);
785                 integral = nr_running_integral(i);
786                 old_integral = sample->previous_integral;
787                 sample->previous_integral = integral;
788                 delta_time = cur_time - sample->prev_timestamp;
789                 sample->prev_timestamp = cur_time;
790
791                 if (!sample->integral_sampled) {
792                         sample->integral_sampled = true;
793                         /* First sample to initialize prev_integral, skip
794                          * avg calculation
795                          */
796                         continue;
797                 }
798
799                 if (integral < old_integral) {
800                         /* Overflow */
801                         delta_integral = (ULLONG_MAX - old_integral) + integral;
802                 } else {
803                         delta_integral = integral - old_integral;
804                 }
805
806                 /* Calculate average for the previous sample window */
807                 do_div(delta_integral, delta_time);
808                 sample->avg = delta_integral;
809                 sum += sample->avg;
810         }
811
812         return sum;
813 }
814
815 static bool rockchip_bl_balanced_speed_boost(void)
816 {
817         unsigned int cpu;
818         struct cpumask online_little;
819         unsigned int big_cpu;
820         bool has_low_load_little_cpu = false;
821
822         if (cpu_freq[L_CLUSTER] < idle_top_freq[L_CLUSTER])
823                 return false;
824
825         cpumask_and(&online_little, cpu_online_mask, &hmp_slow_cpu_mask);
826
827         for_each_cpu(cpu, &online_little) {
828                 if (per_cpu(cpu_load, cpu) < little_low_load) {
829                         has_low_load_little_cpu = true;
830                         break;
831                 }
832         }
833
834         for_each_cpu(cpu, &online_little) {
835                 unsigned int load;
836                 unsigned int avg;
837                 struct cpumask online_big;
838                 bool has_low_load_big_cpu;
839
840                 load = per_cpu(cpu_load, cpu);
841                 /* skip low load cpu */
842                 if (load < little_high_load)
843                         continue;
844
845                 avg = per_cpu(avg_nr_sample, cpu).avg;
846                 /*
847                  * skip when we have low load cpu,
848                  * when cpu load is high because run many task.
849                  * we can migrate the task to low load cpu
850                  */
851                 if (has_low_load_little_cpu &&
852                     (avg >> (FSHIFT - NR_FSHIFT)) >= 4)
853                         continue;
854
855                 /*
856                  * found one cpu which is busy by run one thread,
857                  * break if no big cpu offline
858                  */
859                 if (get_offline_big_cpu() >= nr_cpu_ids)
860                         break;
861
862                 cpumask_andnot(&online_big,
863                                cpu_online_mask, &hmp_slow_cpu_mask);
864
865                 has_low_load_big_cpu = false;
866                 for_each_cpu(big_cpu, &online_big) {
867                         unsigned int big_load;
868
869                         big_load = per_cpu(cpu_load, big_cpu);
870                         if (big_load < big_low_load) {
871                                 has_low_load_big_cpu = true;
872                                 break;
873                         }
874                 }
875                 /* if we have idle big cpu, never up new one */
876                 if (has_low_load_big_cpu)
877                         break;
878
879                 return true;
880         }
881
882         return false;
883 }
884
885 static enum cpu_speed_balance rockchip_bl_balanced_speed_balance(void)
886 {
887         unsigned long highest_speed = cpu_highest_speed();
888         unsigned long balanced_speed = highest_speed * balance_level / 100;
889         unsigned long skewed_speed = balanced_speed / 2;
890         unsigned int nr_cpus = num_online_cpus();
891         unsigned int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS);
892         unsigned int min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);
893         unsigned int avg_nr_run = get_avg_nr_runnables();
894         unsigned int nr_run;
895
896         if (max_cpus > nr_cpu_ids || max_cpus == 0)
897                 max_cpus = nr_cpu_ids;
898
899         if (rockchip_bl_balanced_speed_boost())
900                 return CPU_SPEED_BOOST;
901
902         /* balanced: freq targets for all CPUs are above 60% of highest speed
903            biased: freq target for at least one CPU is below 60% threshold
904            skewed: freq targets for at least 2 CPUs are below 30% threshold */
905         for (nr_run = 1; nr_run < ARRAY_SIZE(rt_profile); nr_run++) {
906                 unsigned int nr_threshold = rt_profile[nr_run - 1];
907
908                 if (nr_run_last <= nr_run)
909                         nr_threshold += nr_run_hysteresis;
910                 if (avg_nr_run <= (nr_threshold << (FSHIFT - NR_FSHIFT)))
911                         break;
912         }
913         nr_run_last = nr_run;
914
915         if ((count_slow_cpus(skewed_speed) >= 2 ||
916              nr_run < nr_cpus ||
917              (cpu_freq[B_CLUSTER] <= idle_bottom_freq[B_CLUSTER] &&
918               cpu_freq[L_CLUSTER] <= idle_bottom_freq[L_CLUSTER]) ||
919              nr_cpus > max_cpus) &&
920             nr_cpus > min_cpus)
921                 return CPU_SPEED_SKEWED;
922
923         if ((count_slow_cpus(balanced_speed) >= 1 ||
924              nr_run <= nr_cpus ||
925              (cpu_freq[B_CLUSTER] <= idle_bottom_freq[B_CLUSTER] &&
926               cpu_freq[L_CLUSTER] <= idle_bottom_freq[L_CLUSTER]) ||
927              nr_cpus == max_cpus) &&
928             nr_cpus >= min_cpus)
929                 return CPU_SPEED_BIASED;
930
931         return CPU_SPEED_BALANCED;
932 }
933
934 static void rockchip_bl_balanced_work_func(struct work_struct *work)
935 {
936         bool up = false;
937         unsigned int cpu = nr_cpu_ids;
938         unsigned long now = jiffies;
939         struct workqueue_struct *wq = rockchip_bl_balanced_wq;
940         struct delayed_work *dwork = to_delayed_work(work);
941         enum cpu_speed_balance balance;
942
943         mutex_lock(&rockchip_bl_balanced_lock);
944
945         if (!rockchip_bl_balanced_enable)
946                 goto out;
947
948         switch (rockchip_bl_balanced_state) {
949         case IDLE:
950                 break;
951         case DOWN:
952                 cpu = get_slowest_cpu();
953                 if (cpu < nr_cpu_ids) {
954                         up = false;
955                         queue_delayed_work(wq, dwork, up_delay_jiffies);
956                 } else {
957                         stop_load_timer();
958                 }
959                 break;
960         case UP:
961                 balance = rockchip_bl_balanced_speed_balance();
962                 switch (balance) {
963                 case CPU_SPEED_BOOST:
964                         cpu = get_offline_big_cpu();
965                         if (cpu < nr_cpu_ids)
966                                 up = true;
967                         break;
968                 /* cpu speed is up and balanced - one more on-line */
969                 case CPU_SPEED_BALANCED:
970                         cpu = cpumask_next_zero(0, cpu_online_mask);
971                         if (cpu < nr_cpu_ids)
972                                 up = true;
973                         break;
974                 /* cpu speed is up, but skewed - remove one core */
975                 case CPU_SPEED_SKEWED:
976                         cpu = get_slowest_cpu();
977                         if (cpu < nr_cpu_ids)
978                                 up = false;
979                         break;
980                 /* cpu speed is up, but under-utilized - do nothing */
981                 case CPU_SPEED_BIASED:
982                 default:
983                         break;
984                 }
985                 queue_delayed_work(wq, dwork, up_delay_jiffies);
986                 break;
987         default:
988                 pr_err("%s: invalid cpuquiet governor state %d\n",
989                        __func__, rockchip_bl_balanced_state);
990         }
991
992         if (!up && ((now - last_change_time_jiffies) < down_delay_jiffies))
993                 cpu = nr_cpu_ids;
994
995         if (cpu < nr_cpu_ids) {
996                 last_change_time_jiffies = now;
997                 if (up)
998                         cpuquiet_wake_cpu(cpu, false);
999                 else
1000                         cpuquiet_quiesence_cpu(cpu, false);
1001         }
1002
1003 out:
1004         mutex_unlock(&rockchip_bl_balanced_lock);
1005 }
1006
1007 static void rockchip_bl_balanced_cpufreq_transition(unsigned int cluster,
1008                                                     unsigned int new_cpu_freq)
1009 {
1010         struct workqueue_struct *wq;
1011         struct delayed_work *dwork;
1012
1013         mutex_lock(&rockchip_bl_balanced_lock);
1014
1015         if (!rockchip_bl_balanced_enable)
1016                 goto out;
1017
1018         wq = rockchip_bl_balanced_wq;
1019         dwork = &rockchip_bl_balanced_work;
1020         cpu_freq[cluster] = new_cpu_freq;
1021
1022         switch (rockchip_bl_balanced_state) {
1023         case IDLE:
1024                 if (cpu_freq[B_CLUSTER] >= idle_top_freq[B_CLUSTER] ||
1025                     cpu_freq[L_CLUSTER] >= idle_top_freq[L_CLUSTER]) {
1026                         rockchip_bl_balanced_state = UP;
1027                         queue_delayed_work(wq, dwork, up_delay_jiffies);
1028                         start_load_timer();
1029                 } else if (cpu_freq[B_CLUSTER] <= idle_bottom_freq[B_CLUSTER] &&
1030                            cpu_freq[L_CLUSTER] <= idle_bottom_freq[L_CLUSTER]) {
1031                         rockchip_bl_balanced_state = DOWN;
1032                         queue_delayed_work(wq, dwork, down_delay_jiffies);
1033                         start_load_timer();
1034                 }
1035                 break;
1036         case DOWN:
1037                 if (cpu_freq[B_CLUSTER] >= idle_top_freq[B_CLUSTER] ||
1038                     cpu_freq[L_CLUSTER] >= idle_top_freq[L_CLUSTER]) {
1039                         rockchip_bl_balanced_state = UP;
1040                         queue_delayed_work(wq, dwork, up_delay_jiffies);
1041                         start_load_timer();
1042                 }
1043                 break;
1044         case UP:
1045                 if (cpu_freq[B_CLUSTER] <= idle_bottom_freq[B_CLUSTER] &&
1046                     cpu_freq[L_CLUSTER] <= idle_bottom_freq[L_CLUSTER]) {
1047                         rockchip_bl_balanced_state = DOWN;
1048                         queue_delayed_work(wq, dwork, up_delay_jiffies);
1049                         start_load_timer();
1050                 }
1051                 break;
1052         default:
1053                 pr_err("%s: invalid cpuquiet governor state %d\n",
1054                        __func__, rockchip_bl_balanced_state);
1055         }
1056
1057 out:
1058         mutex_unlock(&rockchip_bl_balanced_lock);
1059 }
1060
1061 static void delay_callback(struct cpuquiet_attribute *attr)
1062 {
1063         unsigned long val;
1064
1065         if (attr) {
1066                 val = (*((unsigned long *)(attr->param)));
1067                 (*((unsigned long *)(attr->param))) = msecs_to_jiffies(val);
1068         }
1069 }
1070
1071 #define CPQ_BASIC_ATTRIBUTE_B(_name, _mode, _type) \
1072         static struct cpuquiet_attribute _name ## _b_attr = {           \
1073                 .attr = {.name = __stringify(_name ## _b), .mode = _mode },\
1074                 .show = show_ ## _type ## _attribute,                   \
1075                 .store = store_ ## _type ## _attribute,                 \
1076                 .param = &_name[B_CLUSTER],                             \
1077 }
1078 #define CPQ_BASIC_ATTRIBUTE_L(_name, _mode, _type) \
1079         static struct cpuquiet_attribute _name ## _l_attr = {           \
1080                 .attr = {.name = __stringify(_name ## _l), .mode = _mode },\
1081                 .show = show_ ## _type ## _attribute,                   \
1082                 .store = store_ ## _type ## _attribute,                 \
1083                 .param = &_name[L_CLUSTER],                             \
1084 }
1085 CPQ_BASIC_ATTRIBUTE(balance_level, 0644, uint);
1086 CPQ_BASIC_ATTRIBUTE_B(idle_bottom_freq, 0644, uint);
1087 CPQ_BASIC_ATTRIBUTE_L(idle_bottom_freq, 0644, uint);
1088 CPQ_BASIC_ATTRIBUTE_B(idle_top_freq, 0644, uint);
1089 CPQ_BASIC_ATTRIBUTE_L(idle_top_freq, 0644, uint);
1090 CPQ_BASIC_ATTRIBUTE(load_sample_rate_jiffies, 0644, uint);
1091 CPQ_BASIC_ATTRIBUTE(nr_run_hysteresis, 0644, uint);
1092 CPQ_BASIC_ATTRIBUTE(little_high_load, 0644, uint);
1093 CPQ_BASIC_ATTRIBUTE(little_low_load, 0644, uint);
1094 CPQ_BASIC_ATTRIBUTE(big_low_load, 0644, uint);
1095 CPQ_ATTRIBUTE(up_delay_jiffies, 0644, ulong, delay_callback);
1096 CPQ_ATTRIBUTE(down_delay_jiffies, 0644, ulong, delay_callback);
1097
1098 #define MAX_BYTES 100
1099
1100 static ssize_t show_rt_profile(struct cpuquiet_attribute *attr, char *buf)
1101 {
1102         char buffer[MAX_BYTES];
1103         unsigned int i;
1104         int size = 0;
1105
1106         buffer[0] = 0;
1107         for (i = 0; i < ARRAY_SIZE(rt_profile); i++) {
1108                 size += snprintf(buffer + size, sizeof(buffer) - size,
1109                                 "%u ", rt_profile[i]);
1110         }
1111         return snprintf(buf, sizeof(buffer), "%s\n", buffer);
1112 }
1113
1114 static ssize_t store_rt_profile(struct cpuquiet_attribute *attr,
1115                                 const char *buf, size_t count)
1116 {
1117         int ret, i = 0;
1118         char *val, *str, input[MAX_BYTES];
1119         unsigned int profile[ARRAY_SIZE(rt_profile)];
1120
1121         if (!count || count >= MAX_BYTES)
1122                 return -EINVAL;
1123         strncpy(input, buf, count);
1124         input[count] = '\0';
1125         str = input;
1126         memcpy(profile, rt_profile, sizeof(rt_profile));
1127         while ((val = strsep(&str, " ")) != NULL) {
1128                 if (*val == '\0')
1129                         continue;
1130                 if (i == ARRAY_SIZE(rt_profile) - 1)
1131                         break;
1132                 ret = kstrtouint(val, 10, &profile[i]);
1133                 if (ret)
1134                         return -EINVAL;
1135                 i++;
1136         }
1137
1138         memcpy(rt_profile, profile, sizeof(profile));
1139
1140         return count;
1141 }
1142 CPQ_ATTRIBUTE_CUSTOM(rt_profile, 0644,
1143                      show_rt_profile, store_rt_profile);
1144
1145 static struct attribute *rockchip_bl_balanced_attributes[] = {
1146         &balance_level_attr.attr,
1147         &idle_bottom_freq_b_attr.attr,
1148         &idle_bottom_freq_l_attr.attr,
1149         &idle_top_freq_b_attr.attr,
1150         &idle_top_freq_l_attr.attr,
1151         &up_delay_jiffies_attr.attr,
1152         &down_delay_jiffies_attr.attr,
1153         &load_sample_rate_jiffies_attr.attr,
1154         &nr_run_hysteresis_attr.attr,
1155         &rt_profile_attr.attr,
1156         &little_high_load_attr.attr,
1157         &little_low_load_attr.attr,
1158         &big_low_load_attr.attr,
1159         NULL,
1160 };
1161
1162 static const struct sysfs_ops rockchip_bl_balanced_sysfs_ops = {
1163         .show = cpuquiet_auto_sysfs_show,
1164         .store = cpuquiet_auto_sysfs_store,
1165 };
1166
1167 static struct kobj_type rockchip_bl_balanced_ktype = {
1168         .sysfs_ops = &rockchip_bl_balanced_sysfs_ops,
1169         .default_attrs = rockchip_bl_balanced_attributes,
1170 };
1171
1172 static int rockchip_bl_balanced_sysfs(void)
1173 {
1174         int err;
1175         struct kobject *kobj;
1176
1177         kobj = kzalloc(sizeof(*kobj), GFP_KERNEL);
1178
1179         if (!kobj)
1180                 return -ENOMEM;
1181
1182         err = cpuquiet_kobject_init(kobj, &rockchip_bl_balanced_ktype,
1183                                     GOVERNOR_NAME);
1184
1185         if (err)
1186                 kfree(kobj);
1187
1188         rockchip_bl_balanced_kobj = kobj;
1189
1190         return err;
1191 }
1192
1193 static void rockchip_bl_balanced_stop(void)
1194 {
1195         mutex_lock(&rockchip_bl_balanced_lock);
1196
1197         rockchip_bl_balanced_enable = false;
1198         /* now we can force the governor to be idle */
1199         rockchip_bl_balanced_state = IDLE;
1200
1201         mutex_unlock(&rockchip_bl_balanced_lock);
1202
1203         cancel_delayed_work_sync(&rockchip_bl_balanced_work);
1204
1205         destroy_workqueue(rockchip_bl_balanced_wq);
1206         rockchip_bl_balanced_wq = NULL;
1207         del_timer_sync(&load_timer);
1208
1209         kobject_put(rockchip_bl_balanced_kobj);
1210         kfree(rockchip_bl_balanced_kobj);
1211         rockchip_bl_balanced_kobj = NULL;
1212 }
1213
1214 static int rockchip_bl_balanced_start(void)
1215 {
1216         int err, count, cluster;
1217         struct cpufreq_frequency_table *table;
1218         unsigned int initial_freq;
1219
1220         err = rockchip_bl_balanced_sysfs();
1221         if (err)
1222                 return err;
1223
1224         up_delay_jiffies = msecs_to_jiffies(100);
1225         down_delay_jiffies = msecs_to_jiffies(2000);
1226
1227         for (cluster = 0; cluster < MAX_CLUSTERS; cluster++) {
1228                 table = freq_table[cluster];
1229                 if (!table)
1230                         return -EINVAL;
1231
1232                 for (count = 0; table[count].frequency != CPUFREQ_TABLE_END;
1233                      count++)
1234                         ;
1235
1236                 if (count < 4)
1237                         return -EINVAL;
1238
1239                 idle_top_freq[cluster] = table[(count / 2) - 1].frequency;
1240                 idle_bottom_freq[cluster] = table[(count / 2) - 2].frequency;
1241         }
1242
1243         rockchip_bl_balanced_wq
1244                 = alloc_workqueue(GOVERNOR_NAME, WQ_UNBOUND | WQ_FREEZABLE, 1);
1245         if (!rockchip_bl_balanced_wq)
1246                 return -ENOMEM;
1247
1248         INIT_DELAYED_WORK(&rockchip_bl_balanced_work,
1249                           rockchip_bl_balanced_work_func);
1250
1251         init_timer(&load_timer);
1252         load_timer.function = calculate_load_timer;
1253
1254         mutex_lock(&rockchip_bl_balanced_lock);
1255         rockchip_bl_balanced_enable = true;
1256         if (clk_cpu_dvfs_node[L_CLUSTER])
1257                 cpu_freq[L_CLUSTER] =
1258                         clk_get_rate(clk_cpu_dvfs_node[L_CLUSTER]->clk) / 1000;
1259         if (clk_cpu_dvfs_node[B_CLUSTER])
1260                 cpu_freq[B_CLUSTER] =
1261                         clk_get_rate(clk_cpu_dvfs_node[B_CLUSTER]->clk) / 1000;
1262         mutex_unlock(&rockchip_bl_balanced_lock);
1263
1264         /* Kick start the state machine */
1265         initial_freq = cpufreq_get(0);
1266         if (initial_freq)
1267                 rockchip_bl_balanced_cpufreq_transition(L_CLUSTER,
1268                                                         initial_freq);
1269
1270         return 0;
1271 }
1272
1273 static struct cpuquiet_governor rockchip_bl_balanced_governor = {
1274         .name           = GOVERNOR_NAME,
1275         .start          = rockchip_bl_balanced_start,
1276         .stop           = rockchip_bl_balanced_stop,
1277         .owner          = THIS_MODULE,
1278 };
1279 #endif