Merge remote-tracking branches 'spi/fix/bcm63xx', 'spi/fix/doc', 'spi/fix/mediatek...
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq.c
1 /*
2  *  linux/drivers/cpufreq/cpufreq.c
3  *
4  *  Copyright (C) 2001 Russell King
5  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6  *            (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
7  *
8  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9  *      Added handling for CPU hotplug
10  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11  *      Fix handling for CPU hotplug -- affected CPUs
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License version 2 as
15  * published by the Free Software Foundation.
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
33
34 static LIST_HEAD(cpufreq_policy_list);
35
36 static inline bool policy_is_inactive(struct cpufreq_policy *policy)
37 {
38         return cpumask_empty(policy->cpus);
39 }
40
41 static bool suitable_policy(struct cpufreq_policy *policy, bool active)
42 {
43         return active == !policy_is_inactive(policy);
44 }
45
46 /* Finds Next Acive/Inactive policy */
47 static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
48                                           bool active)
49 {
50         do {
51                 policy = list_next_entry(policy, policy_list);
52
53                 /* No more policies in the list */
54                 if (&policy->policy_list == &cpufreq_policy_list)
55                         return NULL;
56         } while (!suitable_policy(policy, active));
57
58         return policy;
59 }
60
61 static struct cpufreq_policy *first_policy(bool active)
62 {
63         struct cpufreq_policy *policy;
64
65         /* No policies in the list */
66         if (list_empty(&cpufreq_policy_list))
67                 return NULL;
68
69         policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
70                                   policy_list);
71
72         if (!suitable_policy(policy, active))
73                 policy = next_policy(policy, active);
74
75         return policy;
76 }
77
78 /* Macros to iterate over CPU policies */
79 #define for_each_suitable_policy(__policy, __active)    \
80         for (__policy = first_policy(__active);         \
81              __policy;                                  \
82              __policy = next_policy(__policy, __active))
83
84 #define for_each_active_policy(__policy)                \
85         for_each_suitable_policy(__policy, true)
86 #define for_each_inactive_policy(__policy)              \
87         for_each_suitable_policy(__policy, false)
88
89 #define for_each_policy(__policy)                       \
90         list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
91
92 /* Iterate over governors */
93 static LIST_HEAD(cpufreq_governor_list);
94 #define for_each_governor(__governor)                           \
95         list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
96
97 /**
98  * The "cpufreq driver" - the arch- or hardware-dependent low
99  * level driver of CPUFreq support, and its spinlock. This lock
100  * also protects the cpufreq_cpu_data array.
101  */
102 static struct cpufreq_driver *cpufreq_driver;
103 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
104 static DEFINE_RWLOCK(cpufreq_driver_lock);
105 DEFINE_MUTEX(cpufreq_governor_lock);
106
107 /* Flag to suspend/resume CPUFreq governors */
108 static bool cpufreq_suspended;
109
110 static inline bool has_target(void)
111 {
112         return cpufreq_driver->target_index || cpufreq_driver->target;
113 }
114
115 /* internal prototypes */
116 static int __cpufreq_governor(struct cpufreq_policy *policy,
117                 unsigned int event);
118 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
119 static void handle_update(struct work_struct *work);
120
121 /**
122  * Two notifier lists: the "policy" list is involved in the
123  * validation process for a new CPU frequency policy; the
124  * "transition" list for kernel code that needs to handle
125  * changes to devices when the CPU clock speed changes.
126  * The mutex locks both lists.
127  */
128 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
129 static struct srcu_notifier_head cpufreq_transition_notifier_list;
130
131 static bool init_cpufreq_transition_notifier_list_called;
132 static int __init init_cpufreq_transition_notifier_list(void)
133 {
134         srcu_init_notifier_head(&cpufreq_transition_notifier_list);
135         init_cpufreq_transition_notifier_list_called = true;
136         return 0;
137 }
138 pure_initcall(init_cpufreq_transition_notifier_list);
139
140 static int off __read_mostly;
141 static int cpufreq_disabled(void)
142 {
143         return off;
144 }
145 void disable_cpufreq(void)
146 {
147         off = 1;
148 }
149 static DEFINE_MUTEX(cpufreq_governor_mutex);
150
151 bool have_governor_per_policy(void)
152 {
153         return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
154 }
155 EXPORT_SYMBOL_GPL(have_governor_per_policy);
156
157 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
158 {
159         if (have_governor_per_policy())
160                 return &policy->kobj;
161         else
162                 return cpufreq_global_kobject;
163 }
164 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
165
166 struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
167 {
168         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
169
170         return policy && !policy_is_inactive(policy) ?
171                 policy->freq_table : NULL;
172 }
173 EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
174
175 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
176 {
177         u64 idle_time;
178         u64 cur_wall_time;
179         u64 busy_time;
180
181         cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
182
183         busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
184         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
185         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
186         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
187         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
188         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
189
190         idle_time = cur_wall_time - busy_time;
191         if (wall)
192                 *wall = cputime_to_usecs(cur_wall_time);
193
194         return cputime_to_usecs(idle_time);
195 }
196
197 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
198 {
199         u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
200
201         if (idle_time == -1ULL)
202                 return get_cpu_idle_time_jiffy(cpu, wall);
203         else if (!io_busy)
204                 idle_time += get_cpu_iowait_time_us(cpu, wall);
205
206         return idle_time;
207 }
208 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
209
210 /*
211  * This is a generic cpufreq init() routine which can be used by cpufreq
212  * drivers of SMP systems. It will do following:
213  * - validate & show freq table passed
214  * - set policies transition latency
215  * - policy->cpus with all possible CPUs
216  */
217 int cpufreq_generic_init(struct cpufreq_policy *policy,
218                 struct cpufreq_frequency_table *table,
219                 unsigned int transition_latency)
220 {
221         int ret;
222
223         ret = cpufreq_table_validate_and_show(policy, table);
224         if (ret) {
225                 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
226                 return ret;
227         }
228
229         policy->cpuinfo.transition_latency = transition_latency;
230
231         /*
232          * The driver only supports the SMP configuration where all processors
233          * share the clock and voltage and clock.
234          */
235         cpumask_setall(policy->cpus);
236
237         return 0;
238 }
239 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
240
241 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
242 {
243         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
244
245         return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
246 }
247 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
248
249 unsigned int cpufreq_generic_get(unsigned int cpu)
250 {
251         struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
252
253         if (!policy || IS_ERR(policy->clk)) {
254                 pr_err("%s: No %s associated to cpu: %d\n",
255                        __func__, policy ? "clk" : "policy", cpu);
256                 return 0;
257         }
258
259         return clk_get_rate(policy->clk) / 1000;
260 }
261 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
262
263 /**
264  * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
265  *
266  * @cpu: cpu to find policy for.
267  *
268  * This returns policy for 'cpu', returns NULL if it doesn't exist.
269  * It also increments the kobject reference count to mark it busy and so would
270  * require a corresponding call to cpufreq_cpu_put() to decrement it back.
271  * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
272  * freed as that depends on the kobj count.
273  *
274  * Return: A valid policy on success, otherwise NULL on failure.
275  */
276 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
277 {
278         struct cpufreq_policy *policy = NULL;
279         unsigned long flags;
280
281         if (WARN_ON(cpu >= nr_cpu_ids))
282                 return NULL;
283
284         /* get the cpufreq driver */
285         read_lock_irqsave(&cpufreq_driver_lock, flags);
286
287         if (cpufreq_driver) {
288                 /* get the CPU */
289                 policy = cpufreq_cpu_get_raw(cpu);
290                 if (policy)
291                         kobject_get(&policy->kobj);
292         }
293
294         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
295
296         return policy;
297 }
298 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
299
300 /**
301  * cpufreq_cpu_put: Decrements the usage count of a policy
302  *
303  * @policy: policy earlier returned by cpufreq_cpu_get().
304  *
305  * This decrements the kobject reference count incremented earlier by calling
306  * cpufreq_cpu_get().
307  */
308 void cpufreq_cpu_put(struct cpufreq_policy *policy)
309 {
310         kobject_put(&policy->kobj);
311 }
312 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
313
314 /*********************************************************************
315  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
316  *********************************************************************/
317
318 /**
319  * adjust_jiffies - adjust the system "loops_per_jiffy"
320  *
321  * This function alters the system "loops_per_jiffy" for the clock
322  * speed change. Note that loops_per_jiffy cannot be updated on SMP
323  * systems as each CPU might be scaled differently. So, use the arch
324  * per-CPU loops_per_jiffy value wherever possible.
325  */
326 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
327 {
328 #ifndef CONFIG_SMP
329         static unsigned long l_p_j_ref;
330         static unsigned int l_p_j_ref_freq;
331
332         if (ci->flags & CPUFREQ_CONST_LOOPS)
333                 return;
334
335         if (!l_p_j_ref_freq) {
336                 l_p_j_ref = loops_per_jiffy;
337                 l_p_j_ref_freq = ci->old;
338                 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
339                          l_p_j_ref, l_p_j_ref_freq);
340         }
341         if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
342                 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
343                                                                 ci->new);
344                 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
345                          loops_per_jiffy, ci->new);
346         }
347 #endif
348 }
349
350 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
351                 struct cpufreq_freqs *freqs, unsigned int state)
352 {
353         BUG_ON(irqs_disabled());
354
355         if (cpufreq_disabled())
356                 return;
357
358         freqs->flags = cpufreq_driver->flags;
359         pr_debug("notification %u of frequency transition to %u kHz\n",
360                  state, freqs->new);
361
362         switch (state) {
363
364         case CPUFREQ_PRECHANGE:
365                 /* detect if the driver reported a value as "old frequency"
366                  * which is not equal to what the cpufreq core thinks is
367                  * "old frequency".
368                  */
369                 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
370                         if ((policy) && (policy->cpu == freqs->cpu) &&
371                             (policy->cur) && (policy->cur != freqs->old)) {
372                                 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
373                                          freqs->old, policy->cur);
374                                 freqs->old = policy->cur;
375                         }
376                 }
377                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
378                                 CPUFREQ_PRECHANGE, freqs);
379                 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
380                 break;
381
382         case CPUFREQ_POSTCHANGE:
383                 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
384                 pr_debug("FREQ: %lu - CPU: %lu\n",
385                          (unsigned long)freqs->new, (unsigned long)freqs->cpu);
386                 trace_cpu_frequency(freqs->new, freqs->cpu);
387                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
388                                 CPUFREQ_POSTCHANGE, freqs);
389                 if (likely(policy) && likely(policy->cpu == freqs->cpu))
390                         policy->cur = freqs->new;
391                 break;
392         }
393 }
394
395 /**
396  * cpufreq_notify_transition - call notifier chain and adjust_jiffies
397  * on frequency transition.
398  *
399  * This function calls the transition notifiers and the "adjust_jiffies"
400  * function. It is called twice on all CPU frequency changes that have
401  * external effects.
402  */
403 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
404                 struct cpufreq_freqs *freqs, unsigned int state)
405 {
406         for_each_cpu(freqs->cpu, policy->cpus)
407                 __cpufreq_notify_transition(policy, freqs, state);
408 }
409
410 /* Do post notifications when there are chances that transition has failed */
411 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
412                 struct cpufreq_freqs *freqs, int transition_failed)
413 {
414         cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
415         if (!transition_failed)
416                 return;
417
418         swap(freqs->old, freqs->new);
419         cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
420         cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
421 }
422
423 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
424                 struct cpufreq_freqs *freqs)
425 {
426
427         /*
428          * Catch double invocations of _begin() which lead to self-deadlock.
429          * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
430          * doesn't invoke _begin() on their behalf, and hence the chances of
431          * double invocations are very low. Moreover, there are scenarios
432          * where these checks can emit false-positive warnings in these
433          * drivers; so we avoid that by skipping them altogether.
434          */
435         WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
436                                 && current == policy->transition_task);
437
438 wait:
439         wait_event(policy->transition_wait, !policy->transition_ongoing);
440
441         spin_lock(&policy->transition_lock);
442
443         if (unlikely(policy->transition_ongoing)) {
444                 spin_unlock(&policy->transition_lock);
445                 goto wait;
446         }
447
448         policy->transition_ongoing = true;
449         policy->transition_task = current;
450
451         spin_unlock(&policy->transition_lock);
452
453         cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
454 }
455 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
456
457 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
458                 struct cpufreq_freqs *freqs, int transition_failed)
459 {
460         if (unlikely(WARN_ON(!policy->transition_ongoing)))
461                 return;
462
463         cpufreq_notify_post_transition(policy, freqs, transition_failed);
464
465         policy->transition_ongoing = false;
466         policy->transition_task = NULL;
467
468         wake_up(&policy->transition_wait);
469 }
470 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
471
472
473 /*********************************************************************
474  *                          SYSFS INTERFACE                          *
475  *********************************************************************/
476 static ssize_t show_boost(struct kobject *kobj,
477                                  struct attribute *attr, char *buf)
478 {
479         return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
480 }
481
482 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
483                                   const char *buf, size_t count)
484 {
485         int ret, enable;
486
487         ret = sscanf(buf, "%d", &enable);
488         if (ret != 1 || enable < 0 || enable > 1)
489                 return -EINVAL;
490
491         if (cpufreq_boost_trigger_state(enable)) {
492                 pr_err("%s: Cannot %s BOOST!\n",
493                        __func__, enable ? "enable" : "disable");
494                 return -EINVAL;
495         }
496
497         pr_debug("%s: cpufreq BOOST %s\n",
498                  __func__, enable ? "enabled" : "disabled");
499
500         return count;
501 }
502 define_one_global_rw(boost);
503
504 static struct cpufreq_governor *find_governor(const char *str_governor)
505 {
506         struct cpufreq_governor *t;
507
508         for_each_governor(t)
509                 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
510                         return t;
511
512         return NULL;
513 }
514
515 /**
516  * cpufreq_parse_governor - parse a governor string
517  */
518 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
519                                 struct cpufreq_governor **governor)
520 {
521         int err = -EINVAL;
522
523         if (cpufreq_driver->setpolicy) {
524                 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
525                         *policy = CPUFREQ_POLICY_PERFORMANCE;
526                         err = 0;
527                 } else if (!strncasecmp(str_governor, "powersave",
528                                                 CPUFREQ_NAME_LEN)) {
529                         *policy = CPUFREQ_POLICY_POWERSAVE;
530                         err = 0;
531                 }
532         } else {
533                 struct cpufreq_governor *t;
534
535                 mutex_lock(&cpufreq_governor_mutex);
536
537                 t = find_governor(str_governor);
538
539                 if (t == NULL) {
540                         int ret;
541
542                         mutex_unlock(&cpufreq_governor_mutex);
543                         ret = request_module("cpufreq_%s", str_governor);
544                         mutex_lock(&cpufreq_governor_mutex);
545
546                         if (ret == 0)
547                                 t = find_governor(str_governor);
548                 }
549
550                 if (t != NULL) {
551                         *governor = t;
552                         err = 0;
553                 }
554
555                 mutex_unlock(&cpufreq_governor_mutex);
556         }
557         return err;
558 }
559
560 /**
561  * cpufreq_per_cpu_attr_read() / show_##file_name() -
562  * print out cpufreq information
563  *
564  * Write out information from cpufreq_driver->policy[cpu]; object must be
565  * "unsigned int".
566  */
567
568 #define show_one(file_name, object)                     \
569 static ssize_t show_##file_name                         \
570 (struct cpufreq_policy *policy, char *buf)              \
571 {                                                       \
572         return sprintf(buf, "%u\n", policy->object);    \
573 }
574
575 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
576 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
577 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
578 show_one(scaling_min_freq, min);
579 show_one(scaling_max_freq, max);
580
581 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
582 {
583         ssize_t ret;
584
585         if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
586                 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
587         else
588                 ret = sprintf(buf, "%u\n", policy->cur);
589         return ret;
590 }
591
592 static int cpufreq_set_policy(struct cpufreq_policy *policy,
593                                 struct cpufreq_policy *new_policy);
594
595 /**
596  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
597  */
598 #define store_one(file_name, object)                    \
599 static ssize_t store_##file_name                                        \
600 (struct cpufreq_policy *policy, const char *buf, size_t count)          \
601 {                                                                       \
602         int ret, temp;                                                  \
603         struct cpufreq_policy new_policy;                               \
604                                                                         \
605         memcpy(&new_policy, policy, sizeof(*policy));                   \
606                                                                         \
607         ret = sscanf(buf, "%u", &new_policy.object);                    \
608         if (ret != 1)                                                   \
609                 return -EINVAL;                                         \
610                                                                         \
611         temp = new_policy.object;                                       \
612         ret = cpufreq_set_policy(policy, &new_policy);          \
613         if (!ret)                                                       \
614                 policy->user_policy.object = temp;                      \
615                                                                         \
616         return ret ? ret : count;                                       \
617 }
618
619 store_one(scaling_min_freq, min);
620 store_one(scaling_max_freq, max);
621
622 /**
623  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
624  */
625 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
626                                         char *buf)
627 {
628         unsigned int cur_freq = __cpufreq_get(policy);
629         if (!cur_freq)
630                 return sprintf(buf, "<unknown>");
631         return sprintf(buf, "%u\n", cur_freq);
632 }
633
634 /**
635  * show_scaling_governor - show the current policy for the specified CPU
636  */
637 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
638 {
639         if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
640                 return sprintf(buf, "powersave\n");
641         else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
642                 return sprintf(buf, "performance\n");
643         else if (policy->governor)
644                 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
645                                 policy->governor->name);
646         return -EINVAL;
647 }
648
649 /**
650  * store_scaling_governor - store policy for the specified CPU
651  */
652 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
653                                         const char *buf, size_t count)
654 {
655         int ret;
656         char    str_governor[16];
657         struct cpufreq_policy new_policy;
658
659         memcpy(&new_policy, policy, sizeof(*policy));
660
661         ret = sscanf(buf, "%15s", str_governor);
662         if (ret != 1)
663                 return -EINVAL;
664
665         if (cpufreq_parse_governor(str_governor, &new_policy.policy,
666                                                 &new_policy.governor))
667                 return -EINVAL;
668
669         ret = cpufreq_set_policy(policy, &new_policy);
670         return ret ? ret : count;
671 }
672
673 /**
674  * show_scaling_driver - show the cpufreq driver currently loaded
675  */
676 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
677 {
678         return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
679 }
680
681 /**
682  * show_scaling_available_governors - show the available CPUfreq governors
683  */
684 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
685                                                 char *buf)
686 {
687         ssize_t i = 0;
688         struct cpufreq_governor *t;
689
690         if (!has_target()) {
691                 i += sprintf(buf, "performance powersave");
692                 goto out;
693         }
694
695         for_each_governor(t) {
696                 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
697                     - (CPUFREQ_NAME_LEN + 2)))
698                         goto out;
699                 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
700         }
701 out:
702         i += sprintf(&buf[i], "\n");
703         return i;
704 }
705
706 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
707 {
708         ssize_t i = 0;
709         unsigned int cpu;
710
711         for_each_cpu(cpu, mask) {
712                 if (i)
713                         i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
714                 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
715                 if (i >= (PAGE_SIZE - 5))
716                         break;
717         }
718         i += sprintf(&buf[i], "\n");
719         return i;
720 }
721 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
722
723 /**
724  * show_related_cpus - show the CPUs affected by each transition even if
725  * hw coordination is in use
726  */
727 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
728 {
729         return cpufreq_show_cpus(policy->related_cpus, buf);
730 }
731
732 /**
733  * show_affected_cpus - show the CPUs affected by each transition
734  */
735 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
736 {
737         return cpufreq_show_cpus(policy->cpus, buf);
738 }
739
740 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
741                                         const char *buf, size_t count)
742 {
743         unsigned int freq = 0;
744         unsigned int ret;
745
746         if (!policy->governor || !policy->governor->store_setspeed)
747                 return -EINVAL;
748
749         ret = sscanf(buf, "%u", &freq);
750         if (ret != 1)
751                 return -EINVAL;
752
753         policy->governor->store_setspeed(policy, freq);
754
755         return count;
756 }
757
758 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
759 {
760         if (!policy->governor || !policy->governor->show_setspeed)
761                 return sprintf(buf, "<unsupported>\n");
762
763         return policy->governor->show_setspeed(policy, buf);
764 }
765
766 /**
767  * show_bios_limit - show the current cpufreq HW/BIOS limitation
768  */
769 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
770 {
771         unsigned int limit;
772         int ret;
773         if (cpufreq_driver->bios_limit) {
774                 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
775                 if (!ret)
776                         return sprintf(buf, "%u\n", limit);
777         }
778         return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
779 }
780
781 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
782 cpufreq_freq_attr_ro(cpuinfo_min_freq);
783 cpufreq_freq_attr_ro(cpuinfo_max_freq);
784 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
785 cpufreq_freq_attr_ro(scaling_available_governors);
786 cpufreq_freq_attr_ro(scaling_driver);
787 cpufreq_freq_attr_ro(scaling_cur_freq);
788 cpufreq_freq_attr_ro(bios_limit);
789 cpufreq_freq_attr_ro(related_cpus);
790 cpufreq_freq_attr_ro(affected_cpus);
791 cpufreq_freq_attr_rw(scaling_min_freq);
792 cpufreq_freq_attr_rw(scaling_max_freq);
793 cpufreq_freq_attr_rw(scaling_governor);
794 cpufreq_freq_attr_rw(scaling_setspeed);
795
796 static struct attribute *default_attrs[] = {
797         &cpuinfo_min_freq.attr,
798         &cpuinfo_max_freq.attr,
799         &cpuinfo_transition_latency.attr,
800         &scaling_min_freq.attr,
801         &scaling_max_freq.attr,
802         &affected_cpus.attr,
803         &related_cpus.attr,
804         &scaling_governor.attr,
805         &scaling_driver.attr,
806         &scaling_available_governors.attr,
807         &scaling_setspeed.attr,
808         NULL
809 };
810
811 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
812 #define to_attr(a) container_of(a, struct freq_attr, attr)
813
814 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
815 {
816         struct cpufreq_policy *policy = to_policy(kobj);
817         struct freq_attr *fattr = to_attr(attr);
818         ssize_t ret;
819
820         down_read(&policy->rwsem);
821
822         if (fattr->show)
823                 ret = fattr->show(policy, buf);
824         else
825                 ret = -EIO;
826
827         up_read(&policy->rwsem);
828
829         return ret;
830 }
831
832 static ssize_t store(struct kobject *kobj, struct attribute *attr,
833                      const char *buf, size_t count)
834 {
835         struct cpufreq_policy *policy = to_policy(kobj);
836         struct freq_attr *fattr = to_attr(attr);
837         ssize_t ret = -EINVAL;
838
839         get_online_cpus();
840
841         if (!cpu_online(policy->cpu))
842                 goto unlock;
843
844         down_write(&policy->rwsem);
845
846         if (fattr->store)
847                 ret = fattr->store(policy, buf, count);
848         else
849                 ret = -EIO;
850
851         up_write(&policy->rwsem);
852 unlock:
853         put_online_cpus();
854
855         return ret;
856 }
857
858 static void cpufreq_sysfs_release(struct kobject *kobj)
859 {
860         struct cpufreq_policy *policy = to_policy(kobj);
861         pr_debug("last reference is dropped\n");
862         complete(&policy->kobj_unregister);
863 }
864
865 static const struct sysfs_ops sysfs_ops = {
866         .show   = show,
867         .store  = store,
868 };
869
870 static struct kobj_type ktype_cpufreq = {
871         .sysfs_ops      = &sysfs_ops,
872         .default_attrs  = default_attrs,
873         .release        = cpufreq_sysfs_release,
874 };
875
876 static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
877 {
878         struct device *cpu_dev;
879
880         pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu);
881
882         if (!policy)
883                 return 0;
884
885         cpu_dev = get_cpu_device(cpu);
886         if (WARN_ON(!cpu_dev))
887                 return 0;
888
889         return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq");
890 }
891
892 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
893 {
894         struct device *cpu_dev;
895
896         pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu);
897
898         cpu_dev = get_cpu_device(cpu);
899         if (WARN_ON(!cpu_dev))
900                 return;
901
902         sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
903 }
904
905 /* Add/remove symlinks for all related CPUs */
906 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
907 {
908         unsigned int j;
909         int ret = 0;
910
911         /* Some related CPUs might not be present (physically hotplugged) */
912         for_each_cpu(j, policy->real_cpus) {
913                 ret = add_cpu_dev_symlink(policy, j);
914                 if (ret)
915                         break;
916         }
917
918         return ret;
919 }
920
921 static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
922 {
923         unsigned int j;
924
925         /* Some related CPUs might not be present (physically hotplugged) */
926         for_each_cpu(j, policy->real_cpus)
927                 remove_cpu_dev_symlink(policy, j);
928 }
929
930 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
931 {
932         struct freq_attr **drv_attr;
933         int ret = 0;
934
935         /* set up files for this cpu device */
936         drv_attr = cpufreq_driver->attr;
937         while (drv_attr && *drv_attr) {
938                 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
939                 if (ret)
940                         return ret;
941                 drv_attr++;
942         }
943         if (cpufreq_driver->get) {
944                 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
945                 if (ret)
946                         return ret;
947         }
948
949         ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
950         if (ret)
951                 return ret;
952
953         if (cpufreq_driver->bios_limit) {
954                 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
955                 if (ret)
956                         return ret;
957         }
958
959         return cpufreq_add_dev_symlink(policy);
960 }
961
962 static int cpufreq_init_policy(struct cpufreq_policy *policy)
963 {
964         struct cpufreq_governor *gov = NULL;
965         struct cpufreq_policy new_policy;
966
967         memcpy(&new_policy, policy, sizeof(*policy));
968
969         /* Update governor of new_policy to the governor used before hotplug */
970         gov = find_governor(policy->last_governor);
971         if (gov)
972                 pr_debug("Restoring governor %s for cpu %d\n",
973                                 policy->governor->name, policy->cpu);
974         else
975                 gov = CPUFREQ_DEFAULT_GOVERNOR;
976
977         new_policy.governor = gov;
978
979         /* Use the default policy if its valid. */
980         if (cpufreq_driver->setpolicy)
981                 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
982
983         /* set default policy */
984         return cpufreq_set_policy(policy, &new_policy);
985 }
986
987 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
988 {
989         int ret = 0;
990
991         /* Has this CPU been taken care of already? */
992         if (cpumask_test_cpu(cpu, policy->cpus))
993                 return 0;
994
995         if (has_target()) {
996                 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
997                 if (ret) {
998                         pr_err("%s: Failed to stop governor\n", __func__);
999                         return ret;
1000                 }
1001         }
1002
1003         down_write(&policy->rwsem);
1004         cpumask_set_cpu(cpu, policy->cpus);
1005         up_write(&policy->rwsem);
1006
1007         if (has_target()) {
1008                 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1009                 if (!ret)
1010                         ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1011
1012                 if (ret) {
1013                         pr_err("%s: Failed to start governor\n", __func__);
1014                         return ret;
1015                 }
1016         }
1017
1018         return 0;
1019 }
1020
1021 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1022 {
1023         struct device *dev = get_cpu_device(cpu);
1024         struct cpufreq_policy *policy;
1025
1026         if (WARN_ON(!dev))
1027                 return NULL;
1028
1029         policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1030         if (!policy)
1031                 return NULL;
1032
1033         if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1034                 goto err_free_policy;
1035
1036         if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1037                 goto err_free_cpumask;
1038
1039         if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1040                 goto err_free_rcpumask;
1041
1042         kobject_init(&policy->kobj, &ktype_cpufreq);
1043         INIT_LIST_HEAD(&policy->policy_list);
1044         init_rwsem(&policy->rwsem);
1045         spin_lock_init(&policy->transition_lock);
1046         init_waitqueue_head(&policy->transition_wait);
1047         init_completion(&policy->kobj_unregister);
1048         INIT_WORK(&policy->update, handle_update);
1049
1050         policy->cpu = cpu;
1051         return policy;
1052
1053 err_free_rcpumask:
1054         free_cpumask_var(policy->related_cpus);
1055 err_free_cpumask:
1056         free_cpumask_var(policy->cpus);
1057 err_free_policy:
1058         kfree(policy);
1059
1060         return NULL;
1061 }
1062
1063 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
1064 {
1065         struct kobject *kobj;
1066         struct completion *cmp;
1067
1068         if (notify)
1069                 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1070                                              CPUFREQ_REMOVE_POLICY, policy);
1071
1072         down_write(&policy->rwsem);
1073         cpufreq_remove_dev_symlink(policy);
1074         kobj = &policy->kobj;
1075         cmp = &policy->kobj_unregister;
1076         up_write(&policy->rwsem);
1077         kobject_put(kobj);
1078
1079         /*
1080          * We need to make sure that the underlying kobj is
1081          * actually not referenced anymore by anybody before we
1082          * proceed with unloading.
1083          */
1084         pr_debug("waiting for dropping of refcount\n");
1085         wait_for_completion(cmp);
1086         pr_debug("wait complete\n");
1087 }
1088
1089 static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
1090 {
1091         unsigned long flags;
1092         int cpu;
1093
1094         /* Remove policy from list */
1095         write_lock_irqsave(&cpufreq_driver_lock, flags);
1096         list_del(&policy->policy_list);
1097
1098         for_each_cpu(cpu, policy->related_cpus)
1099                 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1100         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1101
1102         cpufreq_policy_put_kobj(policy, notify);
1103         free_cpumask_var(policy->real_cpus);
1104         free_cpumask_var(policy->related_cpus);
1105         free_cpumask_var(policy->cpus);
1106         kfree(policy);
1107 }
1108
1109 static int cpufreq_online(unsigned int cpu)
1110 {
1111         struct cpufreq_policy *policy;
1112         bool new_policy;
1113         unsigned long flags;
1114         unsigned int j;
1115         int ret;
1116
1117         pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1118
1119         /* Check if this CPU already has a policy to manage it */
1120         policy = per_cpu(cpufreq_cpu_data, cpu);
1121         if (policy) {
1122                 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1123                 if (!policy_is_inactive(policy))
1124                         return cpufreq_add_policy_cpu(policy, cpu);
1125
1126                 /* This is the only online CPU for the policy.  Start over. */
1127                 new_policy = false;
1128                 down_write(&policy->rwsem);
1129                 policy->cpu = cpu;
1130                 policy->governor = NULL;
1131                 up_write(&policy->rwsem);
1132         } else {
1133                 new_policy = true;
1134                 policy = cpufreq_policy_alloc(cpu);
1135                 if (!policy)
1136                         return -ENOMEM;
1137         }
1138
1139         cpumask_copy(policy->cpus, cpumask_of(cpu));
1140
1141         /* call driver. From then on the cpufreq must be able
1142          * to accept all calls to ->verify and ->setpolicy for this CPU
1143          */
1144         ret = cpufreq_driver->init(policy);
1145         if (ret) {
1146                 pr_debug("initialization failed\n");
1147                 goto out_free_policy;
1148         }
1149
1150         down_write(&policy->rwsem);
1151
1152         if (new_policy) {
1153                 /* related_cpus should at least include policy->cpus. */
1154                 cpumask_copy(policy->related_cpus, policy->cpus);
1155                 /* Remember CPUs present at the policy creation time. */
1156                 cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
1157
1158                 /* Name and add the kobject */
1159                 ret = kobject_add(&policy->kobj, cpufreq_global_kobject,
1160                                   "policy%u",
1161                                   cpumask_first(policy->related_cpus));
1162                 if (ret) {
1163                         pr_err("%s: failed to add policy->kobj: %d\n", __func__,
1164                                ret);
1165                         goto out_exit_policy;
1166                 }
1167         }
1168
1169         /*
1170          * affected cpus must always be the one, which are online. We aren't
1171          * managing offline cpus here.
1172          */
1173         cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1174
1175         if (new_policy) {
1176                 policy->user_policy.min = policy->min;
1177                 policy->user_policy.max = policy->max;
1178
1179                 write_lock_irqsave(&cpufreq_driver_lock, flags);
1180                 for_each_cpu(j, policy->related_cpus)
1181                         per_cpu(cpufreq_cpu_data, j) = policy;
1182                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1183         }
1184
1185         if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1186                 policy->cur = cpufreq_driver->get(policy->cpu);
1187                 if (!policy->cur) {
1188                         pr_err("%s: ->get() failed\n", __func__);
1189                         goto out_exit_policy;
1190                 }
1191         }
1192
1193         /*
1194          * Sometimes boot loaders set CPU frequency to a value outside of
1195          * frequency table present with cpufreq core. In such cases CPU might be
1196          * unstable if it has to run on that frequency for long duration of time
1197          * and so its better to set it to a frequency which is specified in
1198          * freq-table. This also makes cpufreq stats inconsistent as
1199          * cpufreq-stats would fail to register because current frequency of CPU
1200          * isn't found in freq-table.
1201          *
1202          * Because we don't want this change to effect boot process badly, we go
1203          * for the next freq which is >= policy->cur ('cur' must be set by now,
1204          * otherwise we will end up setting freq to lowest of the table as 'cur'
1205          * is initialized to zero).
1206          *
1207          * We are passing target-freq as "policy->cur - 1" otherwise
1208          * __cpufreq_driver_target() would simply fail, as policy->cur will be
1209          * equal to target-freq.
1210          */
1211         if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1212             && has_target()) {
1213                 /* Are we running at unknown frequency ? */
1214                 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1215                 if (ret == -EINVAL) {
1216                         /* Warn user and fix it */
1217                         pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1218                                 __func__, policy->cpu, policy->cur);
1219                         ret = __cpufreq_driver_target(policy, policy->cur - 1,
1220                                 CPUFREQ_RELATION_L);
1221
1222                         /*
1223                          * Reaching here after boot in a few seconds may not
1224                          * mean that system will remain stable at "unknown"
1225                          * frequency for longer duration. Hence, a BUG_ON().
1226                          */
1227                         BUG_ON(ret);
1228                         pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1229                                 __func__, policy->cpu, policy->cur);
1230                 }
1231         }
1232
1233         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1234                                      CPUFREQ_START, policy);
1235
1236         if (new_policy) {
1237                 ret = cpufreq_add_dev_interface(policy);
1238                 if (ret)
1239                         goto out_exit_policy;
1240                 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1241                                 CPUFREQ_CREATE_POLICY, policy);
1242
1243                 write_lock_irqsave(&cpufreq_driver_lock, flags);
1244                 list_add(&policy->policy_list, &cpufreq_policy_list);
1245                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1246         }
1247
1248         ret = cpufreq_init_policy(policy);
1249         if (ret) {
1250                 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1251                        __func__, cpu, ret);
1252                 /* cpufreq_policy_free() will notify based on this */
1253                 new_policy = false;
1254                 goto out_exit_policy;
1255         }
1256
1257         up_write(&policy->rwsem);
1258
1259         kobject_uevent(&policy->kobj, KOBJ_ADD);
1260
1261         /* Callback for handling stuff after policy is ready */
1262         if (cpufreq_driver->ready)
1263                 cpufreq_driver->ready(policy);
1264
1265         pr_debug("initialization complete\n");
1266
1267         return 0;
1268
1269 out_exit_policy:
1270         up_write(&policy->rwsem);
1271
1272         if (cpufreq_driver->exit)
1273                 cpufreq_driver->exit(policy);
1274 out_free_policy:
1275         cpufreq_policy_free(policy, !new_policy);
1276         return ret;
1277 }
1278
1279 /**
1280  * cpufreq_add_dev - the cpufreq interface for a CPU device.
1281  * @dev: CPU device.
1282  * @sif: Subsystem interface structure pointer (not used)
1283  */
1284 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1285 {
1286         unsigned cpu = dev->id;
1287         int ret;
1288
1289         dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1290
1291         if (cpu_online(cpu)) {
1292                 ret = cpufreq_online(cpu);
1293         } else {
1294                 /*
1295                  * A hotplug notifier will follow and we will handle it as CPU
1296                  * online then.  For now, just create the sysfs link, unless
1297                  * there is no policy or the link is already present.
1298                  */
1299                 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1300
1301                 ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
1302                         ? add_cpu_dev_symlink(policy, cpu) : 0;
1303         }
1304
1305         return ret;
1306 }
1307
1308 static void cpufreq_offline_prepare(unsigned int cpu)
1309 {
1310         struct cpufreq_policy *policy;
1311
1312         pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1313
1314         policy = cpufreq_cpu_get_raw(cpu);
1315         if (!policy) {
1316                 pr_debug("%s: No cpu_data found\n", __func__);
1317                 return;
1318         }
1319
1320         if (has_target()) {
1321                 int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1322                 if (ret)
1323                         pr_err("%s: Failed to stop governor\n", __func__);
1324         }
1325
1326         down_write(&policy->rwsem);
1327         cpumask_clear_cpu(cpu, policy->cpus);
1328
1329         if (policy_is_inactive(policy)) {
1330                 if (has_target())
1331                         strncpy(policy->last_governor, policy->governor->name,
1332                                 CPUFREQ_NAME_LEN);
1333         } else if (cpu == policy->cpu) {
1334                 /* Nominate new CPU */
1335                 policy->cpu = cpumask_any(policy->cpus);
1336         }
1337         up_write(&policy->rwsem);
1338
1339         /* Start governor again for active policy */
1340         if (!policy_is_inactive(policy)) {
1341                 if (has_target()) {
1342                         int ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1343                         if (!ret)
1344                                 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1345
1346                         if (ret)
1347                                 pr_err("%s: Failed to start governor\n", __func__);
1348                 }
1349         } else if (cpufreq_driver->stop_cpu) {
1350                 cpufreq_driver->stop_cpu(policy);
1351         }
1352 }
1353
1354 static void cpufreq_offline_finish(unsigned int cpu)
1355 {
1356         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1357
1358         if (!policy) {
1359                 pr_debug("%s: No cpu_data found\n", __func__);
1360                 return;
1361         }
1362
1363         /* Only proceed for inactive policies */
1364         if (!policy_is_inactive(policy))
1365                 return;
1366
1367         /* If cpu is last user of policy, free policy */
1368         if (has_target()) {
1369                 int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
1370                 if (ret)
1371                         pr_err("%s: Failed to exit governor\n", __func__);
1372         }
1373
1374         /*
1375          * Perform the ->exit() even during light-weight tear-down,
1376          * since this is a core component, and is essential for the
1377          * subsequent light-weight ->init() to succeed.
1378          */
1379         if (cpufreq_driver->exit) {
1380                 cpufreq_driver->exit(policy);
1381                 policy->freq_table = NULL;
1382         }
1383 }
1384
1385 /**
1386  * cpufreq_remove_dev - remove a CPU device
1387  *
1388  * Removes the cpufreq interface for a CPU device.
1389  */
1390 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1391 {
1392         unsigned int cpu = dev->id;
1393         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1394
1395         if (!policy)
1396                 return;
1397
1398         if (cpu_online(cpu)) {
1399                 cpufreq_offline_prepare(cpu);
1400                 cpufreq_offline_finish(cpu);
1401         }
1402
1403         cpumask_clear_cpu(cpu, policy->real_cpus);
1404         remove_cpu_dev_symlink(policy, cpu);
1405
1406         if (cpumask_empty(policy->real_cpus))
1407                 cpufreq_policy_free(policy, true);
1408 }
1409
1410 static void handle_update(struct work_struct *work)
1411 {
1412         struct cpufreq_policy *policy =
1413                 container_of(work, struct cpufreq_policy, update);
1414         unsigned int cpu = policy->cpu;
1415         pr_debug("handle_update for cpu %u called\n", cpu);
1416         cpufreq_update_policy(cpu);
1417 }
1418
1419 /**
1420  *      cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1421  *      in deep trouble.
1422  *      @policy: policy managing CPUs
1423  *      @new_freq: CPU frequency the CPU actually runs at
1424  *
1425  *      We adjust to current frequency first, and need to clean up later.
1426  *      So either call to cpufreq_update_policy() or schedule handle_update()).
1427  */
1428 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1429                                 unsigned int new_freq)
1430 {
1431         struct cpufreq_freqs freqs;
1432
1433         pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1434                  policy->cur, new_freq);
1435
1436         freqs.old = policy->cur;
1437         freqs.new = new_freq;
1438
1439         cpufreq_freq_transition_begin(policy, &freqs);
1440         cpufreq_freq_transition_end(policy, &freqs, 0);
1441 }
1442
1443 /**
1444  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1445  * @cpu: CPU number
1446  *
1447  * This is the last known freq, without actually getting it from the driver.
1448  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1449  */
1450 unsigned int cpufreq_quick_get(unsigned int cpu)
1451 {
1452         struct cpufreq_policy *policy;
1453         unsigned int ret_freq = 0;
1454
1455         if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1456                 return cpufreq_driver->get(cpu);
1457
1458         policy = cpufreq_cpu_get(cpu);
1459         if (policy) {
1460                 ret_freq = policy->cur;
1461                 cpufreq_cpu_put(policy);
1462         }
1463
1464         return ret_freq;
1465 }
1466 EXPORT_SYMBOL(cpufreq_quick_get);
1467
1468 /**
1469  * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1470  * @cpu: CPU number
1471  *
1472  * Just return the max possible frequency for a given CPU.
1473  */
1474 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1475 {
1476         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1477         unsigned int ret_freq = 0;
1478
1479         if (policy) {
1480                 ret_freq = policy->max;
1481                 cpufreq_cpu_put(policy);
1482         }
1483
1484         return ret_freq;
1485 }
1486 EXPORT_SYMBOL(cpufreq_quick_get_max);
1487
1488 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1489 {
1490         unsigned int ret_freq = 0;
1491
1492         if (!cpufreq_driver->get)
1493                 return ret_freq;
1494
1495         ret_freq = cpufreq_driver->get(policy->cpu);
1496
1497         /* Updating inactive policies is invalid, so avoid doing that. */
1498         if (unlikely(policy_is_inactive(policy)))
1499                 return ret_freq;
1500
1501         if (ret_freq && policy->cur &&
1502                 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1503                 /* verify no discrepancy between actual and
1504                                         saved value exists */
1505                 if (unlikely(ret_freq != policy->cur)) {
1506                         cpufreq_out_of_sync(policy, ret_freq);
1507                         schedule_work(&policy->update);
1508                 }
1509         }
1510
1511         return ret_freq;
1512 }
1513
1514 /**
1515  * cpufreq_get - get the current CPU frequency (in kHz)
1516  * @cpu: CPU number
1517  *
1518  * Get the CPU current (static) CPU frequency
1519  */
1520 unsigned int cpufreq_get(unsigned int cpu)
1521 {
1522         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1523         unsigned int ret_freq = 0;
1524
1525         if (policy) {
1526                 down_read(&policy->rwsem);
1527                 ret_freq = __cpufreq_get(policy);
1528                 up_read(&policy->rwsem);
1529
1530                 cpufreq_cpu_put(policy);
1531         }
1532
1533         return ret_freq;
1534 }
1535 EXPORT_SYMBOL(cpufreq_get);
1536
1537 static struct subsys_interface cpufreq_interface = {
1538         .name           = "cpufreq",
1539         .subsys         = &cpu_subsys,
1540         .add_dev        = cpufreq_add_dev,
1541         .remove_dev     = cpufreq_remove_dev,
1542 };
1543
1544 /*
1545  * In case platform wants some specific frequency to be configured
1546  * during suspend..
1547  */
1548 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1549 {
1550         int ret;
1551
1552         if (!policy->suspend_freq) {
1553                 pr_debug("%s: suspend_freq not defined\n", __func__);
1554                 return 0;
1555         }
1556
1557         pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1558                         policy->suspend_freq);
1559
1560         ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1561                         CPUFREQ_RELATION_H);
1562         if (ret)
1563                 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1564                                 __func__, policy->suspend_freq, ret);
1565
1566         return ret;
1567 }
1568 EXPORT_SYMBOL(cpufreq_generic_suspend);
1569
1570 /**
1571  * cpufreq_suspend() - Suspend CPUFreq governors
1572  *
1573  * Called during system wide Suspend/Hibernate cycles for suspending governors
1574  * as some platforms can't change frequency after this point in suspend cycle.
1575  * Because some of the devices (like: i2c, regulators, etc) they use for
1576  * changing frequency are suspended quickly after this point.
1577  */
1578 void cpufreq_suspend(void)
1579 {
1580         struct cpufreq_policy *policy;
1581
1582         if (!cpufreq_driver)
1583                 return;
1584
1585         if (!has_target())
1586                 goto suspend;
1587
1588         pr_debug("%s: Suspending Governors\n", __func__);
1589
1590         for_each_active_policy(policy) {
1591                 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1592                         pr_err("%s: Failed to stop governor for policy: %p\n",
1593                                 __func__, policy);
1594                 else if (cpufreq_driver->suspend
1595                     && cpufreq_driver->suspend(policy))
1596                         pr_err("%s: Failed to suspend driver: %p\n", __func__,
1597                                 policy);
1598         }
1599
1600 suspend:
1601         cpufreq_suspended = true;
1602 }
1603
1604 /**
1605  * cpufreq_resume() - Resume CPUFreq governors
1606  *
1607  * Called during system wide Suspend/Hibernate cycle for resuming governors that
1608  * are suspended with cpufreq_suspend().
1609  */
1610 void cpufreq_resume(void)
1611 {
1612         struct cpufreq_policy *policy;
1613
1614         if (!cpufreq_driver)
1615                 return;
1616
1617         cpufreq_suspended = false;
1618
1619         if (!has_target())
1620                 return;
1621
1622         pr_debug("%s: Resuming Governors\n", __func__);
1623
1624         for_each_active_policy(policy) {
1625                 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1626                         pr_err("%s: Failed to resume driver: %p\n", __func__,
1627                                 policy);
1628                 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1629                     || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1630                         pr_err("%s: Failed to start governor for policy: %p\n",
1631                                 __func__, policy);
1632         }
1633
1634         /*
1635          * schedule call cpufreq_update_policy() for first-online CPU, as that
1636          * wouldn't be hotplugged-out on suspend. It will verify that the
1637          * current freq is in sync with what we believe it to be.
1638          */
1639         policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1640         if (WARN_ON(!policy))
1641                 return;
1642
1643         schedule_work(&policy->update);
1644 }
1645
1646 /**
1647  *      cpufreq_get_current_driver - return current driver's name
1648  *
1649  *      Return the name string of the currently loaded cpufreq driver
1650  *      or NULL, if none.
1651  */
1652 const char *cpufreq_get_current_driver(void)
1653 {
1654         if (cpufreq_driver)
1655                 return cpufreq_driver->name;
1656
1657         return NULL;
1658 }
1659 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1660
1661 /**
1662  *      cpufreq_get_driver_data - return current driver data
1663  *
1664  *      Return the private data of the currently loaded cpufreq
1665  *      driver, or NULL if no cpufreq driver is loaded.
1666  */
1667 void *cpufreq_get_driver_data(void)
1668 {
1669         if (cpufreq_driver)
1670                 return cpufreq_driver->driver_data;
1671
1672         return NULL;
1673 }
1674 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1675
1676 /*********************************************************************
1677  *                     NOTIFIER LISTS INTERFACE                      *
1678  *********************************************************************/
1679
1680 /**
1681  *      cpufreq_register_notifier - register a driver with cpufreq
1682  *      @nb: notifier function to register
1683  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1684  *
1685  *      Add a driver to one of two lists: either a list of drivers that
1686  *      are notified about clock rate changes (once before and once after
1687  *      the transition), or a list of drivers that are notified about
1688  *      changes in cpufreq policy.
1689  *
1690  *      This function may sleep, and has the same return conditions as
1691  *      blocking_notifier_chain_register.
1692  */
1693 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1694 {
1695         int ret;
1696
1697         if (cpufreq_disabled())
1698                 return -EINVAL;
1699
1700         WARN_ON(!init_cpufreq_transition_notifier_list_called);
1701
1702         switch (list) {
1703         case CPUFREQ_TRANSITION_NOTIFIER:
1704                 ret = srcu_notifier_chain_register(
1705                                 &cpufreq_transition_notifier_list, nb);
1706                 break;
1707         case CPUFREQ_POLICY_NOTIFIER:
1708                 ret = blocking_notifier_chain_register(
1709                                 &cpufreq_policy_notifier_list, nb);
1710                 break;
1711         default:
1712                 ret = -EINVAL;
1713         }
1714
1715         return ret;
1716 }
1717 EXPORT_SYMBOL(cpufreq_register_notifier);
1718
1719 /**
1720  *      cpufreq_unregister_notifier - unregister a driver with cpufreq
1721  *      @nb: notifier block to be unregistered
1722  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1723  *
1724  *      Remove a driver from the CPU frequency notifier list.
1725  *
1726  *      This function may sleep, and has the same return conditions as
1727  *      blocking_notifier_chain_unregister.
1728  */
1729 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1730 {
1731         int ret;
1732
1733         if (cpufreq_disabled())
1734                 return -EINVAL;
1735
1736         switch (list) {
1737         case CPUFREQ_TRANSITION_NOTIFIER:
1738                 ret = srcu_notifier_chain_unregister(
1739                                 &cpufreq_transition_notifier_list, nb);
1740                 break;
1741         case CPUFREQ_POLICY_NOTIFIER:
1742                 ret = blocking_notifier_chain_unregister(
1743                                 &cpufreq_policy_notifier_list, nb);
1744                 break;
1745         default:
1746                 ret = -EINVAL;
1747         }
1748
1749         return ret;
1750 }
1751 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1752
1753
1754 /*********************************************************************
1755  *                              GOVERNORS                            *
1756  *********************************************************************/
1757
1758 /* Must set freqs->new to intermediate frequency */
1759 static int __target_intermediate(struct cpufreq_policy *policy,
1760                                  struct cpufreq_freqs *freqs, int index)
1761 {
1762         int ret;
1763
1764         freqs->new = cpufreq_driver->get_intermediate(policy, index);
1765
1766         /* We don't need to switch to intermediate freq */
1767         if (!freqs->new)
1768                 return 0;
1769
1770         pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1771                  __func__, policy->cpu, freqs->old, freqs->new);
1772
1773         cpufreq_freq_transition_begin(policy, freqs);
1774         ret = cpufreq_driver->target_intermediate(policy, index);
1775         cpufreq_freq_transition_end(policy, freqs, ret);
1776
1777         if (ret)
1778                 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1779                        __func__, ret);
1780
1781         return ret;
1782 }
1783
1784 static int __target_index(struct cpufreq_policy *policy,
1785                           struct cpufreq_frequency_table *freq_table, int index)
1786 {
1787         struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1788         unsigned int intermediate_freq = 0;
1789         int retval = -EINVAL;
1790         bool notify;
1791
1792         notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1793         if (notify) {
1794                 /* Handle switching to intermediate frequency */
1795                 if (cpufreq_driver->get_intermediate) {
1796                         retval = __target_intermediate(policy, &freqs, index);
1797                         if (retval)
1798                                 return retval;
1799
1800                         intermediate_freq = freqs.new;
1801                         /* Set old freq to intermediate */
1802                         if (intermediate_freq)
1803                                 freqs.old = freqs.new;
1804                 }
1805
1806                 freqs.new = freq_table[index].frequency;
1807                 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1808                          __func__, policy->cpu, freqs.old, freqs.new);
1809
1810                 cpufreq_freq_transition_begin(policy, &freqs);
1811         }
1812
1813         retval = cpufreq_driver->target_index(policy, index);
1814         if (retval)
1815                 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1816                        retval);
1817
1818         if (notify) {
1819                 cpufreq_freq_transition_end(policy, &freqs, retval);
1820
1821                 /*
1822                  * Failed after setting to intermediate freq? Driver should have
1823                  * reverted back to initial frequency and so should we. Check
1824                  * here for intermediate_freq instead of get_intermediate, in
1825                  * case we haven't switched to intermediate freq at all.
1826                  */
1827                 if (unlikely(retval && intermediate_freq)) {
1828                         freqs.old = intermediate_freq;
1829                         freqs.new = policy->restore_freq;
1830                         cpufreq_freq_transition_begin(policy, &freqs);
1831                         cpufreq_freq_transition_end(policy, &freqs, 0);
1832                 }
1833         }
1834
1835         return retval;
1836 }
1837
1838 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1839                             unsigned int target_freq,
1840                             unsigned int relation)
1841 {
1842         unsigned int old_target_freq = target_freq;
1843         int retval = -EINVAL;
1844
1845         if (cpufreq_disabled())
1846                 return -ENODEV;
1847
1848         /* Make sure that target_freq is within supported range */
1849         if (target_freq > policy->max)
1850                 target_freq = policy->max;
1851         if (target_freq < policy->min)
1852                 target_freq = policy->min;
1853
1854         pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1855                  policy->cpu, target_freq, relation, old_target_freq);
1856
1857         /*
1858          * This might look like a redundant call as we are checking it again
1859          * after finding index. But it is left intentionally for cases where
1860          * exactly same freq is called again and so we can save on few function
1861          * calls.
1862          */
1863         if (target_freq == policy->cur)
1864                 return 0;
1865
1866         /* Save last value to restore later on errors */
1867         policy->restore_freq = policy->cur;
1868
1869         if (cpufreq_driver->target)
1870                 retval = cpufreq_driver->target(policy, target_freq, relation);
1871         else if (cpufreq_driver->target_index) {
1872                 struct cpufreq_frequency_table *freq_table;
1873                 int index;
1874
1875                 freq_table = cpufreq_frequency_get_table(policy->cpu);
1876                 if (unlikely(!freq_table)) {
1877                         pr_err("%s: Unable to find freq_table\n", __func__);
1878                         goto out;
1879                 }
1880
1881                 retval = cpufreq_frequency_table_target(policy, freq_table,
1882                                 target_freq, relation, &index);
1883                 if (unlikely(retval)) {
1884                         pr_err("%s: Unable to find matching freq\n", __func__);
1885                         goto out;
1886                 }
1887
1888                 if (freq_table[index].frequency == policy->cur) {
1889                         retval = 0;
1890                         goto out;
1891                 }
1892
1893                 retval = __target_index(policy, freq_table, index);
1894         }
1895
1896 out:
1897         return retval;
1898 }
1899 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1900
1901 int cpufreq_driver_target(struct cpufreq_policy *policy,
1902                           unsigned int target_freq,
1903                           unsigned int relation)
1904 {
1905         int ret = -EINVAL;
1906
1907         down_write(&policy->rwsem);
1908
1909         ret = __cpufreq_driver_target(policy, target_freq, relation);
1910
1911         up_write(&policy->rwsem);
1912
1913         return ret;
1914 }
1915 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1916
1917 static int __cpufreq_governor(struct cpufreq_policy *policy,
1918                                         unsigned int event)
1919 {
1920         int ret;
1921
1922         /* Only must be defined when default governor is known to have latency
1923            restrictions, like e.g. conservative or ondemand.
1924            That this is the case is already ensured in Kconfig
1925         */
1926 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1927         struct cpufreq_governor *gov = &cpufreq_gov_performance;
1928 #else
1929         struct cpufreq_governor *gov = NULL;
1930 #endif
1931
1932         /* Don't start any governor operations if we are entering suspend */
1933         if (cpufreq_suspended)
1934                 return 0;
1935         /*
1936          * Governor might not be initiated here if ACPI _PPC changed
1937          * notification happened, so check it.
1938          */
1939         if (!policy->governor)
1940                 return -EINVAL;
1941
1942         if (policy->governor->max_transition_latency &&
1943             policy->cpuinfo.transition_latency >
1944             policy->governor->max_transition_latency) {
1945                 if (!gov)
1946                         return -EINVAL;
1947                 else {
1948                         pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
1949                                 policy->governor->name, gov->name);
1950                         policy->governor = gov;
1951                 }
1952         }
1953
1954         if (event == CPUFREQ_GOV_POLICY_INIT)
1955                 if (!try_module_get(policy->governor->owner))
1956                         return -EINVAL;
1957
1958         pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
1959
1960         mutex_lock(&cpufreq_governor_lock);
1961         if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
1962             || (!policy->governor_enabled
1963             && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
1964                 mutex_unlock(&cpufreq_governor_lock);
1965                 return -EBUSY;
1966         }
1967
1968         if (event == CPUFREQ_GOV_STOP)
1969                 policy->governor_enabled = false;
1970         else if (event == CPUFREQ_GOV_START)
1971                 policy->governor_enabled = true;
1972
1973         mutex_unlock(&cpufreq_governor_lock);
1974
1975         ret = policy->governor->governor(policy, event);
1976
1977         if (!ret) {
1978                 if (event == CPUFREQ_GOV_POLICY_INIT)
1979                         policy->governor->initialized++;
1980                 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1981                         policy->governor->initialized--;
1982         } else {
1983                 /* Restore original values */
1984                 mutex_lock(&cpufreq_governor_lock);
1985                 if (event == CPUFREQ_GOV_STOP)
1986                         policy->governor_enabled = true;
1987                 else if (event == CPUFREQ_GOV_START)
1988                         policy->governor_enabled = false;
1989                 mutex_unlock(&cpufreq_governor_lock);
1990         }
1991
1992         if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1993                         ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1994                 module_put(policy->governor->owner);
1995
1996         return ret;
1997 }
1998
1999 int cpufreq_register_governor(struct cpufreq_governor *governor)
2000 {
2001         int err;
2002
2003         if (!governor)
2004                 return -EINVAL;
2005
2006         if (cpufreq_disabled())
2007                 return -ENODEV;
2008
2009         mutex_lock(&cpufreq_governor_mutex);
2010
2011         governor->initialized = 0;
2012         err = -EBUSY;
2013         if (!find_governor(governor->name)) {
2014                 err = 0;
2015                 list_add(&governor->governor_list, &cpufreq_governor_list);
2016         }
2017
2018         mutex_unlock(&cpufreq_governor_mutex);
2019         return err;
2020 }
2021 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2022
2023 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2024 {
2025         struct cpufreq_policy *policy;
2026         unsigned long flags;
2027
2028         if (!governor)
2029                 return;
2030
2031         if (cpufreq_disabled())
2032                 return;
2033
2034         /* clear last_governor for all inactive policies */
2035         read_lock_irqsave(&cpufreq_driver_lock, flags);
2036         for_each_inactive_policy(policy) {
2037                 if (!strcmp(policy->last_governor, governor->name)) {
2038                         policy->governor = NULL;
2039                         strcpy(policy->last_governor, "\0");
2040                 }
2041         }
2042         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2043
2044         mutex_lock(&cpufreq_governor_mutex);
2045         list_del(&governor->governor_list);
2046         mutex_unlock(&cpufreq_governor_mutex);
2047         return;
2048 }
2049 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2050
2051
2052 /*********************************************************************
2053  *                          POLICY INTERFACE                         *
2054  *********************************************************************/
2055
2056 /**
2057  * cpufreq_get_policy - get the current cpufreq_policy
2058  * @policy: struct cpufreq_policy into which the current cpufreq_policy
2059  *      is written
2060  *
2061  * Reads the current cpufreq policy.
2062  */
2063 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2064 {
2065         struct cpufreq_policy *cpu_policy;
2066         if (!policy)
2067                 return -EINVAL;
2068
2069         cpu_policy = cpufreq_cpu_get(cpu);
2070         if (!cpu_policy)
2071                 return -EINVAL;
2072
2073         memcpy(policy, cpu_policy, sizeof(*policy));
2074
2075         cpufreq_cpu_put(cpu_policy);
2076         return 0;
2077 }
2078 EXPORT_SYMBOL(cpufreq_get_policy);
2079
2080 /*
2081  * policy : current policy.
2082  * new_policy: policy to be set.
2083  */
2084 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2085                                 struct cpufreq_policy *new_policy)
2086 {
2087         struct cpufreq_governor *old_gov;
2088         int ret;
2089
2090         pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2091                  new_policy->cpu, new_policy->min, new_policy->max);
2092
2093         memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2094
2095         /*
2096         * This check works well when we store new min/max freq attributes,
2097         * because new_policy is a copy of policy with one field updated.
2098         */
2099         if (new_policy->min > new_policy->max)
2100                 return -EINVAL;
2101
2102         /* verify the cpu speed can be set within this limit */
2103         ret = cpufreq_driver->verify(new_policy);
2104         if (ret)
2105                 return ret;
2106
2107         /* adjust if necessary - all reasons */
2108         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2109                         CPUFREQ_ADJUST, new_policy);
2110
2111         /*
2112          * verify the cpu speed can be set within this limit, which might be
2113          * different to the first one
2114          */
2115         ret = cpufreq_driver->verify(new_policy);
2116         if (ret)
2117                 return ret;
2118
2119         /* notification of the new policy */
2120         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2121                         CPUFREQ_NOTIFY, new_policy);
2122
2123         policy->min = new_policy->min;
2124         policy->max = new_policy->max;
2125
2126         pr_debug("new min and max freqs are %u - %u kHz\n",
2127                  policy->min, policy->max);
2128
2129         if (cpufreq_driver->setpolicy) {
2130                 policy->policy = new_policy->policy;
2131                 pr_debug("setting range\n");
2132                 return cpufreq_driver->setpolicy(new_policy);
2133         }
2134
2135         if (new_policy->governor == policy->governor)
2136                 goto out;
2137
2138         pr_debug("governor switch\n");
2139
2140         /* save old, working values */
2141         old_gov = policy->governor;
2142         /* end old governor */
2143         if (old_gov) {
2144                 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2145                 if (ret) {
2146                         /* This can happen due to race with other operations */
2147                         pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
2148                                  __func__, old_gov->name, ret);
2149                         return ret;
2150                 }
2151
2152                 up_write(&policy->rwsem);
2153                 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2154                 down_write(&policy->rwsem);
2155
2156                 if (ret) {
2157                         pr_err("%s: Failed to Exit Governor: %s (%d)\n",
2158                                __func__, old_gov->name, ret);
2159                         return ret;
2160                 }
2161         }
2162
2163         /* start new governor */
2164         policy->governor = new_policy->governor;
2165         ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2166         if (!ret) {
2167                 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
2168                 if (!ret)
2169                         goto out;
2170
2171                 up_write(&policy->rwsem);
2172                 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2173                 down_write(&policy->rwsem);
2174         }
2175
2176         /* new governor failed, so re-start old one */
2177         pr_debug("starting governor %s failed\n", policy->governor->name);
2178         if (old_gov) {
2179                 policy->governor = old_gov;
2180                 if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
2181                         policy->governor = NULL;
2182                 else
2183                         __cpufreq_governor(policy, CPUFREQ_GOV_START);
2184         }
2185
2186         return ret;
2187
2188  out:
2189         pr_debug("governor: change or update limits\n");
2190         return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2191 }
2192
2193 /**
2194  *      cpufreq_update_policy - re-evaluate an existing cpufreq policy
2195  *      @cpu: CPU which shall be re-evaluated
2196  *
2197  *      Useful for policy notifiers which have different necessities
2198  *      at different times.
2199  */
2200 int cpufreq_update_policy(unsigned int cpu)
2201 {
2202         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2203         struct cpufreq_policy new_policy;
2204         int ret;
2205
2206         if (!policy)
2207                 return -ENODEV;
2208
2209         down_write(&policy->rwsem);
2210
2211         pr_debug("updating policy for CPU %u\n", cpu);
2212         memcpy(&new_policy, policy, sizeof(*policy));
2213         new_policy.min = policy->user_policy.min;
2214         new_policy.max = policy->user_policy.max;
2215
2216         /*
2217          * BIOS might change freq behind our back
2218          * -> ask driver for current freq and notify governors about a change
2219          */
2220         if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2221                 new_policy.cur = cpufreq_driver->get(cpu);
2222                 if (WARN_ON(!new_policy.cur)) {
2223                         ret = -EIO;
2224                         goto unlock;
2225                 }
2226
2227                 if (!policy->cur) {
2228                         pr_debug("Driver did not initialize current freq\n");
2229                         policy->cur = new_policy.cur;
2230                 } else {
2231                         if (policy->cur != new_policy.cur && has_target())
2232                                 cpufreq_out_of_sync(policy, new_policy.cur);
2233                 }
2234         }
2235
2236         ret = cpufreq_set_policy(policy, &new_policy);
2237
2238 unlock:
2239         up_write(&policy->rwsem);
2240
2241         cpufreq_cpu_put(policy);
2242         return ret;
2243 }
2244 EXPORT_SYMBOL(cpufreq_update_policy);
2245
2246 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2247                                         unsigned long action, void *hcpu)
2248 {
2249         unsigned int cpu = (unsigned long)hcpu;
2250
2251         switch (action & ~CPU_TASKS_FROZEN) {
2252         case CPU_ONLINE:
2253                 cpufreq_online(cpu);
2254                 break;
2255
2256         case CPU_DOWN_PREPARE:
2257                 cpufreq_offline_prepare(cpu);
2258                 break;
2259
2260         case CPU_POST_DEAD:
2261                 cpufreq_offline_finish(cpu);
2262                 break;
2263
2264         case CPU_DOWN_FAILED:
2265                 cpufreq_online(cpu);
2266                 break;
2267         }
2268         return NOTIFY_OK;
2269 }
2270
2271 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2272         .notifier_call = cpufreq_cpu_callback,
2273 };
2274
2275 /*********************************************************************
2276  *               BOOST                                               *
2277  *********************************************************************/
2278 static int cpufreq_boost_set_sw(int state)
2279 {
2280         struct cpufreq_frequency_table *freq_table;
2281         struct cpufreq_policy *policy;
2282         int ret = -EINVAL;
2283
2284         for_each_active_policy(policy) {
2285                 freq_table = cpufreq_frequency_get_table(policy->cpu);
2286                 if (freq_table) {
2287                         ret = cpufreq_frequency_table_cpuinfo(policy,
2288                                                         freq_table);
2289                         if (ret) {
2290                                 pr_err("%s: Policy frequency update failed\n",
2291                                        __func__);
2292                                 break;
2293                         }
2294                         policy->user_policy.max = policy->max;
2295                         __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2296                 }
2297         }
2298
2299         return ret;
2300 }
2301
2302 int cpufreq_boost_trigger_state(int state)
2303 {
2304         unsigned long flags;
2305         int ret = 0;
2306
2307         if (cpufreq_driver->boost_enabled == state)
2308                 return 0;
2309
2310         write_lock_irqsave(&cpufreq_driver_lock, flags);
2311         cpufreq_driver->boost_enabled = state;
2312         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2313
2314         ret = cpufreq_driver->set_boost(state);
2315         if (ret) {
2316                 write_lock_irqsave(&cpufreq_driver_lock, flags);
2317                 cpufreq_driver->boost_enabled = !state;
2318                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2319
2320                 pr_err("%s: Cannot %s BOOST\n",
2321                        __func__, state ? "enable" : "disable");
2322         }
2323
2324         return ret;
2325 }
2326
2327 int cpufreq_boost_supported(void)
2328 {
2329         if (likely(cpufreq_driver))
2330                 return cpufreq_driver->boost_supported;
2331
2332         return 0;
2333 }
2334 EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2335
2336 static int create_boost_sysfs_file(void)
2337 {
2338         int ret;
2339
2340         if (!cpufreq_boost_supported())
2341                 return 0;
2342
2343         /*
2344          * Check if driver provides function to enable boost -
2345          * if not, use cpufreq_boost_set_sw as default
2346          */
2347         if (!cpufreq_driver->set_boost)
2348                 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2349
2350         ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2351         if (ret)
2352                 pr_err("%s: cannot register global BOOST sysfs file\n",
2353                        __func__);
2354
2355         return ret;
2356 }
2357
2358 static void remove_boost_sysfs_file(void)
2359 {
2360         if (cpufreq_boost_supported())
2361                 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2362 }
2363
2364 int cpufreq_enable_boost_support(void)
2365 {
2366         if (!cpufreq_driver)
2367                 return -EINVAL;
2368
2369         if (cpufreq_boost_supported())
2370                 return 0;
2371
2372         cpufreq_driver->boost_supported = true;
2373
2374         /* This will get removed on driver unregister */
2375         return create_boost_sysfs_file();
2376 }
2377 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2378
2379 int cpufreq_boost_enabled(void)
2380 {
2381         return cpufreq_driver->boost_enabled;
2382 }
2383 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2384
2385 /*********************************************************************
2386  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
2387  *********************************************************************/
2388
2389 /**
2390  * cpufreq_register_driver - register a CPU Frequency driver
2391  * @driver_data: A struct cpufreq_driver containing the values#
2392  * submitted by the CPU Frequency driver.
2393  *
2394  * Registers a CPU Frequency driver to this core code. This code
2395  * returns zero on success, -EBUSY when another driver got here first
2396  * (and isn't unregistered in the meantime).
2397  *
2398  */
2399 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2400 {
2401         unsigned long flags;
2402         int ret;
2403
2404         if (cpufreq_disabled())
2405                 return -ENODEV;
2406
2407         if (!driver_data || !driver_data->verify || !driver_data->init ||
2408             !(driver_data->setpolicy || driver_data->target_index ||
2409                     driver_data->target) ||
2410              (driver_data->setpolicy && (driver_data->target_index ||
2411                     driver_data->target)) ||
2412              (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2413                 return -EINVAL;
2414
2415         pr_debug("trying to register driver %s\n", driver_data->name);
2416
2417         /* Protect against concurrent CPU online/offline. */
2418         get_online_cpus();
2419
2420         write_lock_irqsave(&cpufreq_driver_lock, flags);
2421         if (cpufreq_driver) {
2422                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2423                 ret = -EEXIST;
2424                 goto out;
2425         }
2426         cpufreq_driver = driver_data;
2427         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2428
2429         if (driver_data->setpolicy)
2430                 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2431
2432         ret = create_boost_sysfs_file();
2433         if (ret)
2434                 goto err_null_driver;
2435
2436         ret = subsys_interface_register(&cpufreq_interface);
2437         if (ret)
2438                 goto err_boost_unreg;
2439
2440         if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2441             list_empty(&cpufreq_policy_list)) {
2442                 /* if all ->init() calls failed, unregister */
2443                 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2444                          driver_data->name);
2445                 goto err_if_unreg;
2446         }
2447
2448         register_hotcpu_notifier(&cpufreq_cpu_notifier);
2449         pr_debug("driver %s up and running\n", driver_data->name);
2450
2451 out:
2452         put_online_cpus();
2453         return ret;
2454
2455 err_if_unreg:
2456         subsys_interface_unregister(&cpufreq_interface);
2457 err_boost_unreg:
2458         remove_boost_sysfs_file();
2459 err_null_driver:
2460         write_lock_irqsave(&cpufreq_driver_lock, flags);
2461         cpufreq_driver = NULL;
2462         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2463         goto out;
2464 }
2465 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2466
2467 /**
2468  * cpufreq_unregister_driver - unregister the current CPUFreq driver
2469  *
2470  * Unregister the current CPUFreq driver. Only call this if you have
2471  * the right to do so, i.e. if you have succeeded in initialising before!
2472  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2473  * currently not initialised.
2474  */
2475 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2476 {
2477         unsigned long flags;
2478
2479         if (!cpufreq_driver || (driver != cpufreq_driver))
2480                 return -EINVAL;
2481
2482         pr_debug("unregistering driver %s\n", driver->name);
2483
2484         /* Protect against concurrent cpu hotplug */
2485         get_online_cpus();
2486         subsys_interface_unregister(&cpufreq_interface);
2487         remove_boost_sysfs_file();
2488         unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2489
2490         write_lock_irqsave(&cpufreq_driver_lock, flags);
2491
2492         cpufreq_driver = NULL;
2493
2494         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2495         put_online_cpus();
2496
2497         return 0;
2498 }
2499 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2500
2501 /*
2502  * Stop cpufreq at shutdown to make sure it isn't holding any locks
2503  * or mutexes when secondary CPUs are halted.
2504  */
2505 static struct syscore_ops cpufreq_syscore_ops = {
2506         .shutdown = cpufreq_suspend,
2507 };
2508
2509 struct kobject *cpufreq_global_kobject;
2510 EXPORT_SYMBOL(cpufreq_global_kobject);
2511
2512 static int __init cpufreq_core_init(void)
2513 {
2514         if (cpufreq_disabled())
2515                 return -ENODEV;
2516
2517         cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2518         BUG_ON(!cpufreq_global_kobject);
2519
2520         register_syscore_ops(&cpufreq_syscore_ops);
2521
2522         return 0;
2523 }
2524 core_initcall(cpufreq_core_init);