Merge branch 'linux-linaro-lsk' into linux-linaro-lsk-android
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq.c
1 /*
2  *  linux/drivers/cpufreq/cpufreq.c
3  *
4  *  Copyright (C) 2001 Russell King
5  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6  *
7  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8  *      Added handling for CPU hotplug
9  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10  *      Fix handling for CPU hotplug -- affected CPUs
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  *
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <asm/cputime.h>
21 #include <linux/kernel.h>
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/notifier.h>
26 #include <linux/cpufreq.h>
27 #include <linux/delay.h>
28 #include <linux/interrupt.h>
29 #include <linux/spinlock.h>
30 #include <linux/tick.h>
31 #include <linux/device.h>
32 #include <linux/slab.h>
33 #include <linux/cpu.h>
34 #include <linux/completion.h>
35 #include <linux/mutex.h>
36 #include <linux/syscore_ops.h>
37 #include <linux/suspend.h>
38 #include <linux/tick.h>
39
40 #include <trace/events/power.h>
41
42 /**
43  * The "cpufreq driver" - the arch- or hardware-dependent low
44  * level driver of CPUFreq support, and its spinlock. This lock
45  * also protects the cpufreq_cpu_data array.
46  */
47 static struct cpufreq_driver *cpufreq_driver;
48 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
49 #ifdef CONFIG_HOTPLUG_CPU
50 /* This one keeps track of the previously set governor of a removed CPU */
51 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
52 #endif
53 static DEFINE_RWLOCK(cpufreq_driver_lock);
54 static DEFINE_MUTEX(cpufreq_governor_lock);
55
56 /* Flag to suspend/resume CPUFreq governors */
57 static bool cpufreq_suspended;
58
59 static inline bool has_target(void)
60 {
61         return cpufreq_driver->target;
62 }
63
64 /*
65  * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
66  * all cpufreq/hotplug/workqueue/etc related lock issues.
67  *
68  * The rules for this semaphore:
69  * - Any routine that wants to read from the policy structure will
70  *   do a down_read on this semaphore.
71  * - Any routine that will write to the policy structure and/or may take away
72  *   the policy altogether (eg. CPU hotplug), will hold this lock in write
73  *   mode before doing so.
74  *
75  * Additional rules:
76  * - Governor routines that can be called in cpufreq hotplug path should not
77  *   take this sem as top level hotplug notifier handler takes this.
78  * - Lock should not be held across
79  *     __cpufreq_governor(data, CPUFREQ_GOV_STOP);
80  */
81 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
82 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
83
84 #define lock_policy_rwsem(mode, cpu)                                    \
85 static int lock_policy_rwsem_##mode(int cpu)                            \
86 {                                                                       \
87         int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);              \
88         BUG_ON(policy_cpu == -1);                                       \
89         down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));            \
90                                                                         \
91         return 0;                                                       \
92 }
93
94 lock_policy_rwsem(read, cpu);
95 lock_policy_rwsem(write, cpu);
96
97 #define unlock_policy_rwsem(mode, cpu)                                  \
98 static void unlock_policy_rwsem_##mode(int cpu)                         \
99 {                                                                       \
100         int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);              \
101         BUG_ON(policy_cpu == -1);                                       \
102         up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));              \
103 }
104
105 unlock_policy_rwsem(read, cpu);
106 unlock_policy_rwsem(write, cpu);
107
108 /* internal prototypes */
109 static int __cpufreq_governor(struct cpufreq_policy *policy,
110                 unsigned int event);
111 static unsigned int __cpufreq_get(unsigned int cpu);
112 static void handle_update(struct work_struct *work);
113
114 /**
115  * Two notifier lists: the "policy" list is involved in the
116  * validation process for a new CPU frequency policy; the
117  * "transition" list for kernel code that needs to handle
118  * changes to devices when the CPU clock speed changes.
119  * The mutex locks both lists.
120  */
121 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
122 static struct srcu_notifier_head cpufreq_transition_notifier_list;
123
124 static bool init_cpufreq_transition_notifier_list_called;
125 static int __init init_cpufreq_transition_notifier_list(void)
126 {
127         srcu_init_notifier_head(&cpufreq_transition_notifier_list);
128         init_cpufreq_transition_notifier_list_called = true;
129         return 0;
130 }
131 pure_initcall(init_cpufreq_transition_notifier_list);
132
133 static int off __read_mostly;
134 static int cpufreq_disabled(void)
135 {
136         return off;
137 }
138 void disable_cpufreq(void)
139 {
140         off = 1;
141 }
142 static LIST_HEAD(cpufreq_governor_list);
143 static DEFINE_MUTEX(cpufreq_governor_mutex);
144
145 bool have_governor_per_policy(void)
146 {
147         return cpufreq_driver->have_governor_per_policy;
148 }
149 EXPORT_SYMBOL_GPL(have_governor_per_policy);
150
151 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
152 {
153         if (have_governor_per_policy())
154                 return &policy->kobj;
155         else
156                 return cpufreq_global_kobject;
157 }
158 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
159
160 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
161 {
162         u64 idle_time;
163         u64 cur_wall_time;
164         u64 busy_time;
165
166         cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
167
168         busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
169         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
170         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
171         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
172         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
173         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
174
175         idle_time = cur_wall_time - busy_time;
176         if (wall)
177                 *wall = cputime_to_usecs(cur_wall_time);
178
179         return cputime_to_usecs(idle_time);
180 }
181
182 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
183 {
184         u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
185
186         if (idle_time == -1ULL)
187                 return get_cpu_idle_time_jiffy(cpu, wall);
188         else if (!io_busy)
189                 idle_time += get_cpu_iowait_time_us(cpu, wall);
190
191         return idle_time;
192 }
193 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
194
195 static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
196 {
197         struct cpufreq_policy *data;
198         unsigned long flags;
199
200         if (cpu >= nr_cpu_ids)
201                 goto err_out;
202
203         /* get the cpufreq driver */
204         read_lock_irqsave(&cpufreq_driver_lock, flags);
205
206         if (!cpufreq_driver)
207                 goto err_out_unlock;
208
209         if (!try_module_get(cpufreq_driver->owner))
210                 goto err_out_unlock;
211
212
213         /* get the CPU */
214         data = per_cpu(cpufreq_cpu_data, cpu);
215
216         if (!data)
217                 goto err_out_put_module;
218
219         if (!sysfs && !kobject_get(&data->kobj))
220                 goto err_out_put_module;
221
222         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
223         return data;
224
225 err_out_put_module:
226         module_put(cpufreq_driver->owner);
227 err_out_unlock:
228         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
229 err_out:
230         return NULL;
231 }
232
233 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
234 {
235         if (cpufreq_disabled())
236                 return NULL;
237
238         return __cpufreq_cpu_get(cpu, false);
239 }
240 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
241
242 static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
243 {
244         return __cpufreq_cpu_get(cpu, true);
245 }
246
247 static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
248 {
249         if (!sysfs)
250                 kobject_put(&data->kobj);
251         module_put(cpufreq_driver->owner);
252 }
253
254 void cpufreq_cpu_put(struct cpufreq_policy *data)
255 {
256         if (cpufreq_disabled())
257                 return;
258
259         __cpufreq_cpu_put(data, false);
260 }
261 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
262
263 static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
264 {
265         __cpufreq_cpu_put(data, true);
266 }
267
268 /*********************************************************************
269  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
270  *********************************************************************/
271
272 /**
273  * adjust_jiffies - adjust the system "loops_per_jiffy"
274  *
275  * This function alters the system "loops_per_jiffy" for the clock
276  * speed change. Note that loops_per_jiffy cannot be updated on SMP
277  * systems as each CPU might be scaled differently. So, use the arch
278  * per-CPU loops_per_jiffy value wherever possible.
279  */
280 #ifndef CONFIG_SMP
281 static unsigned long l_p_j_ref;
282 static unsigned int  l_p_j_ref_freq;
283
284 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
285 {
286         if (ci->flags & CPUFREQ_CONST_LOOPS)
287                 return;
288
289         if (!l_p_j_ref_freq) {
290                 l_p_j_ref = loops_per_jiffy;
291                 l_p_j_ref_freq = ci->old;
292                 pr_debug("saving %lu as reference value for loops_per_jiffy; "
293                         "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
294         }
295         if ((val == CPUFREQ_POSTCHANGE  && ci->old != ci->new) ||
296             (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
297                 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
298                                                                 ci->new);
299                 pr_debug("scaling loops_per_jiffy to %lu "
300                         "for frequency %u kHz\n", loops_per_jiffy, ci->new);
301         }
302 }
303 #else
304 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
305 {
306         return;
307 }
308 #endif
309
310
311 void __cpufreq_notify_transition(struct cpufreq_policy *policy,
312                 struct cpufreq_freqs *freqs, unsigned int state)
313 {
314         BUG_ON(irqs_disabled());
315
316         if (cpufreq_disabled())
317                 return;
318
319         freqs->flags = cpufreq_driver->flags;
320         pr_debug("notification %u of frequency transition to %u kHz\n",
321                 state, freqs->new);
322
323         switch (state) {
324
325         case CPUFREQ_PRECHANGE:
326                 /* detect if the driver reported a value as "old frequency"
327                  * which is not equal to what the cpufreq core thinks is
328                  * "old frequency".
329                  */
330                 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
331                         if ((policy) && (policy->cpu == freqs->cpu) &&
332                             (policy->cur) && (policy->cur != freqs->old)) {
333                                 pr_debug("Warning: CPU frequency is"
334                                         " %u, cpufreq assumed %u kHz.\n",
335                                         freqs->old, policy->cur);
336                                 freqs->old = policy->cur;
337                         }
338                 }
339                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
340                                 CPUFREQ_PRECHANGE, freqs);
341                 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
342                 break;
343
344         case CPUFREQ_POSTCHANGE:
345                 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
346                 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
347                         (unsigned long)freqs->cpu);
348                 trace_cpu_frequency(freqs->new, freqs->cpu);
349                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
350                                 CPUFREQ_POSTCHANGE, freqs);
351                 if (likely(policy) && likely(policy->cpu == freqs->cpu))
352                         policy->cur = freqs->new;
353                 break;
354         }
355 }
356 /**
357  * cpufreq_notify_transition - call notifier chain and adjust_jiffies
358  * on frequency transition.
359  *
360  * This function calls the transition notifiers and the "adjust_jiffies"
361  * function. It is called twice on all CPU frequency changes that have
362  * external effects.
363  */
364 void cpufreq_notify_transition(struct cpufreq_policy *policy,
365                 struct cpufreq_freqs *freqs, unsigned int state)
366 {
367         for_each_cpu(freqs->cpu, policy->cpus)
368                 __cpufreq_notify_transition(policy, freqs, state);
369 }
370 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
371
372
373
374 /*********************************************************************
375  *                          SYSFS INTERFACE                          *
376  *********************************************************************/
377
378 static struct cpufreq_governor *__find_governor(const char *str_governor)
379 {
380         struct cpufreq_governor *t;
381
382         list_for_each_entry(t, &cpufreq_governor_list, governor_list)
383                 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
384                         return t;
385
386         return NULL;
387 }
388
389 /**
390  * cpufreq_parse_governor - parse a governor string
391  */
392 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
393                                 struct cpufreq_governor **governor)
394 {
395         int err = -EINVAL;
396
397         if (!cpufreq_driver)
398                 goto out;
399
400         if (cpufreq_driver->setpolicy) {
401                 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
402                         *policy = CPUFREQ_POLICY_PERFORMANCE;
403                         err = 0;
404                 } else if (!strnicmp(str_governor, "powersave",
405                                                 CPUFREQ_NAME_LEN)) {
406                         *policy = CPUFREQ_POLICY_POWERSAVE;
407                         err = 0;
408                 }
409         } else if (cpufreq_driver->target) {
410                 struct cpufreq_governor *t;
411
412                 mutex_lock(&cpufreq_governor_mutex);
413
414                 t = __find_governor(str_governor);
415
416                 if (t == NULL) {
417                         int ret;
418
419                         mutex_unlock(&cpufreq_governor_mutex);
420                         ret = request_module("cpufreq_%s", str_governor);
421                         mutex_lock(&cpufreq_governor_mutex);
422
423                         if (ret == 0)
424                                 t = __find_governor(str_governor);
425                 }
426
427                 if (t != NULL) {
428                         *governor = t;
429                         err = 0;
430                 }
431
432                 mutex_unlock(&cpufreq_governor_mutex);
433         }
434 out:
435         return err;
436 }
437
438
439 /**
440  * cpufreq_per_cpu_attr_read() / show_##file_name() -
441  * print out cpufreq information
442  *
443  * Write out information from cpufreq_driver->policy[cpu]; object must be
444  * "unsigned int".
445  */
446
447 #define show_one(file_name, object)                     \
448 static ssize_t show_##file_name                         \
449 (struct cpufreq_policy *policy, char *buf)              \
450 {                                                       \
451         return sprintf(buf, "%u\n", policy->object);    \
452 }
453
454 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
455 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
456 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
457 show_one(scaling_min_freq, min);
458 show_one(scaling_max_freq, max);
459 show_one(scaling_cur_freq, cur);
460
461 static int __cpufreq_set_policy(struct cpufreq_policy *data,
462                                 struct cpufreq_policy *policy);
463
464 /**
465  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
466  */
467 #define store_one(file_name, object)                    \
468 static ssize_t store_##file_name                                        \
469 (struct cpufreq_policy *policy, const char *buf, size_t count)          \
470 {                                                                       \
471         unsigned int ret;                                               \
472         struct cpufreq_policy new_policy;                               \
473                                                                         \
474         ret = cpufreq_get_policy(&new_policy, policy->cpu);             \
475         if (ret)                                                        \
476                 return -EINVAL;                                         \
477                                                                         \
478         ret = sscanf(buf, "%u", &new_policy.object);                    \
479         if (ret != 1)                                                   \
480                 return -EINVAL;                                         \
481                                                                         \
482         ret = __cpufreq_set_policy(policy, &new_policy);                \
483         policy->user_policy.object = policy->object;                    \
484                                                                         \
485         return ret ? ret : count;                                       \
486 }
487
488 store_one(scaling_min_freq, min);
489 store_one(scaling_max_freq, max);
490
491 /**
492  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
493  */
494 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
495                                         char *buf)
496 {
497         unsigned int cur_freq = __cpufreq_get(policy->cpu);
498         if (!cur_freq)
499                 return sprintf(buf, "<unknown>");
500         return sprintf(buf, "%u\n", cur_freq);
501 }
502
503
504 /**
505  * show_scaling_governor - show the current policy for the specified CPU
506  */
507 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
508 {
509         if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
510                 return sprintf(buf, "powersave\n");
511         else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
512                 return sprintf(buf, "performance\n");
513         else if (policy->governor)
514                 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
515                                 policy->governor->name);
516         return -EINVAL;
517 }
518
519
520 /**
521  * store_scaling_governor - store policy for the specified CPU
522  */
523 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
524                                         const char *buf, size_t count)
525 {
526         unsigned int ret;
527         char    str_governor[16];
528         struct cpufreq_policy new_policy;
529
530         ret = cpufreq_get_policy(&new_policy, policy->cpu);
531         if (ret)
532                 return ret;
533
534         ret = sscanf(buf, "%15s", str_governor);
535         if (ret != 1)
536                 return -EINVAL;
537
538         if (cpufreq_parse_governor(str_governor, &new_policy.policy,
539                                                 &new_policy.governor))
540                 return -EINVAL;
541
542         /* Do not use cpufreq_set_policy here or the user_policy.max
543            will be wrongly overridden */
544         ret = __cpufreq_set_policy(policy, &new_policy);
545
546         policy->user_policy.policy = policy->policy;
547         policy->user_policy.governor = policy->governor;
548
549         if (ret)
550                 return ret;
551         else
552                 return count;
553 }
554
555 /**
556  * show_scaling_driver - show the cpufreq driver currently loaded
557  */
558 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
559 {
560         return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
561 }
562
563 /**
564  * show_scaling_available_governors - show the available CPUfreq governors
565  */
566 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
567                                                 char *buf)
568 {
569         ssize_t i = 0;
570         struct cpufreq_governor *t;
571
572         if (!cpufreq_driver->target) {
573                 i += sprintf(buf, "performance powersave");
574                 goto out;
575         }
576
577         list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
578                 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
579                     - (CPUFREQ_NAME_LEN + 2)))
580                         goto out;
581                 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
582         }
583 out:
584         i += sprintf(&buf[i], "\n");
585         return i;
586 }
587
588 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
589 {
590         ssize_t i = 0;
591         unsigned int cpu;
592
593         for_each_cpu(cpu, mask) {
594                 if (i)
595                         i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
596                 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
597                 if (i >= (PAGE_SIZE - 5))
598                         break;
599         }
600         i += sprintf(&buf[i], "\n");
601         return i;
602 }
603
604 /**
605  * show_related_cpus - show the CPUs affected by each transition even if
606  * hw coordination is in use
607  */
608 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
609 {
610         return show_cpus(policy->related_cpus, buf);
611 }
612
613 /**
614  * show_affected_cpus - show the CPUs affected by each transition
615  */
616 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
617 {
618         return show_cpus(policy->cpus, buf);
619 }
620
621 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
622                                         const char *buf, size_t count)
623 {
624         unsigned int freq = 0;
625         unsigned int ret;
626
627         if (!policy->governor || !policy->governor->store_setspeed)
628                 return -EINVAL;
629
630         ret = sscanf(buf, "%u", &freq);
631         if (ret != 1)
632                 return -EINVAL;
633
634         policy->governor->store_setspeed(policy, freq);
635
636         return count;
637 }
638
639 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
640 {
641         if (!policy->governor || !policy->governor->show_setspeed)
642                 return sprintf(buf, "<unsupported>\n");
643
644         return policy->governor->show_setspeed(policy, buf);
645 }
646
647 /**
648  * show_bios_limit - show the current cpufreq HW/BIOS limitation
649  */
650 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
651 {
652         unsigned int limit;
653         int ret;
654         if (cpufreq_driver->bios_limit) {
655                 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
656                 if (!ret)
657                         return sprintf(buf, "%u\n", limit);
658         }
659         return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
660 }
661
662 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
663 cpufreq_freq_attr_ro(cpuinfo_min_freq);
664 cpufreq_freq_attr_ro(cpuinfo_max_freq);
665 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
666 cpufreq_freq_attr_ro(scaling_available_governors);
667 cpufreq_freq_attr_ro(scaling_driver);
668 cpufreq_freq_attr_ro(scaling_cur_freq);
669 cpufreq_freq_attr_ro(bios_limit);
670 cpufreq_freq_attr_ro(related_cpus);
671 cpufreq_freq_attr_ro(affected_cpus);
672 cpufreq_freq_attr_rw(scaling_min_freq);
673 cpufreq_freq_attr_rw(scaling_max_freq);
674 cpufreq_freq_attr_rw(scaling_governor);
675 cpufreq_freq_attr_rw(scaling_setspeed);
676
677 static struct attribute *default_attrs[] = {
678         &cpuinfo_min_freq.attr,
679         &cpuinfo_max_freq.attr,
680         &cpuinfo_transition_latency.attr,
681         &scaling_min_freq.attr,
682         &scaling_max_freq.attr,
683         &affected_cpus.attr,
684         &related_cpus.attr,
685         &scaling_governor.attr,
686         &scaling_driver.attr,
687         &scaling_available_governors.attr,
688         &scaling_setspeed.attr,
689         NULL
690 };
691
692 struct kobject *cpufreq_global_kobject;
693 EXPORT_SYMBOL(cpufreq_global_kobject);
694
695 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
696 #define to_attr(a) container_of(a, struct freq_attr, attr)
697
698 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
699 {
700         struct cpufreq_policy *policy = to_policy(kobj);
701         struct freq_attr *fattr = to_attr(attr);
702         ssize_t ret = -EINVAL;
703         policy = cpufreq_cpu_get_sysfs(policy->cpu);
704         if (!policy)
705                 goto no_policy;
706
707         if (lock_policy_rwsem_read(policy->cpu) < 0)
708                 goto fail;
709
710         if (fattr->show)
711                 ret = fattr->show(policy, buf);
712         else
713                 ret = -EIO;
714
715         unlock_policy_rwsem_read(policy->cpu);
716 fail:
717         cpufreq_cpu_put_sysfs(policy);
718 no_policy:
719         return ret;
720 }
721
722 static ssize_t store(struct kobject *kobj, struct attribute *attr,
723                      const char *buf, size_t count)
724 {
725         struct cpufreq_policy *policy = to_policy(kobj);
726         struct freq_attr *fattr = to_attr(attr);
727         ssize_t ret = -EINVAL;
728         policy = cpufreq_cpu_get_sysfs(policy->cpu);
729         if (!policy)
730                 goto no_policy;
731
732         if (lock_policy_rwsem_write(policy->cpu) < 0)
733                 goto fail;
734
735         if (fattr->store)
736                 ret = fattr->store(policy, buf, count);
737         else
738                 ret = -EIO;
739
740         unlock_policy_rwsem_write(policy->cpu);
741 fail:
742         cpufreq_cpu_put_sysfs(policy);
743 no_policy:
744         return ret;
745 }
746
747 static void cpufreq_sysfs_release(struct kobject *kobj)
748 {
749         struct cpufreq_policy *policy = to_policy(kobj);
750         pr_debug("last reference is dropped\n");
751         complete(&policy->kobj_unregister);
752 }
753
754 static const struct sysfs_ops sysfs_ops = {
755         .show   = show,
756         .store  = store,
757 };
758
759 static struct kobj_type ktype_cpufreq = {
760         .sysfs_ops      = &sysfs_ops,
761         .default_attrs  = default_attrs,
762         .release        = cpufreq_sysfs_release,
763 };
764
765 /* symlink affected CPUs */
766 static int cpufreq_add_dev_symlink(unsigned int cpu,
767                                    struct cpufreq_policy *policy)
768 {
769         unsigned int j;
770         int ret = 0;
771
772         for_each_cpu(j, policy->cpus) {
773                 struct cpufreq_policy *managed_policy;
774                 struct device *cpu_dev;
775
776                 if (j == cpu)
777                         continue;
778
779                 pr_debug("CPU %u already managed, adding link\n", j);
780                 managed_policy = cpufreq_cpu_get(cpu);
781                 cpu_dev = get_cpu_device(j);
782                 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
783                                         "cpufreq");
784                 if (ret) {
785                         cpufreq_cpu_put(managed_policy);
786                         return ret;
787                 }
788         }
789         return ret;
790 }
791
792 static int cpufreq_add_dev_interface(unsigned int cpu,
793                                      struct cpufreq_policy *policy,
794                                      struct device *dev)
795 {
796         struct cpufreq_policy new_policy;
797         struct freq_attr **drv_attr;
798         unsigned long flags;
799         int ret = 0;
800         unsigned int j;
801
802         /* prepare interface data */
803         ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
804                                    &dev->kobj, "cpufreq");
805         if (ret)
806                 return ret;
807
808         /* set up files for this cpu device */
809         drv_attr = cpufreq_driver->attr;
810         while ((drv_attr) && (*drv_attr)) {
811                 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
812                 if (ret)
813                         goto err_out_kobj_put;
814                 drv_attr++;
815         }
816         if (cpufreq_driver->get) {
817                 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
818                 if (ret)
819                         goto err_out_kobj_put;
820         }
821         if (cpufreq_driver->target) {
822                 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
823                 if (ret)
824                         goto err_out_kobj_put;
825         }
826         if (cpufreq_driver->bios_limit) {
827                 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
828                 if (ret)
829                         goto err_out_kobj_put;
830         }
831
832         write_lock_irqsave(&cpufreq_driver_lock, flags);
833         for_each_cpu(j, policy->cpus) {
834                 per_cpu(cpufreq_cpu_data, j) = policy;
835                 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
836         }
837         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
838
839         ret = cpufreq_add_dev_symlink(cpu, policy);
840         if (ret)
841                 goto err_out_kobj_put;
842
843         memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
844         /* assure that the starting sequence is run in __cpufreq_set_policy */
845         policy->governor = NULL;
846
847         /* set default policy */
848         ret = __cpufreq_set_policy(policy, &new_policy);
849         policy->user_policy.policy = policy->policy;
850         policy->user_policy.governor = policy->governor;
851
852         if (ret) {
853                 pr_debug("setting policy failed\n");
854                 if (cpufreq_driver->exit)
855                         cpufreq_driver->exit(policy);
856         }
857         return ret;
858
859 err_out_kobj_put:
860         kobject_put(&policy->kobj);
861         wait_for_completion(&policy->kobj_unregister);
862         return ret;
863 }
864
865 #ifdef CONFIG_HOTPLUG_CPU
866 static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
867                                   struct device *dev)
868 {
869         struct cpufreq_policy *policy;
870         int ret = 0, has_target = !!cpufreq_driver->target;
871         unsigned long flags;
872
873         policy = cpufreq_cpu_get(sibling);
874         WARN_ON(!policy);
875
876         if (has_target)
877                 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
878
879         lock_policy_rwsem_write(sibling);
880
881         write_lock_irqsave(&cpufreq_driver_lock, flags);
882
883         cpumask_set_cpu(cpu, policy->cpus);
884         per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
885         per_cpu(cpufreq_cpu_data, cpu) = policy;
886         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
887
888         unlock_policy_rwsem_write(sibling);
889
890         if (has_target) {
891                 __cpufreq_governor(policy, CPUFREQ_GOV_START);
892                 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
893         }
894
895         ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
896         if (ret) {
897                 cpufreq_cpu_put(policy);
898                 return ret;
899         }
900
901         return 0;
902 }
903 #endif
904
905 /**
906  * cpufreq_add_dev - add a CPU device
907  *
908  * Adds the cpufreq interface for a CPU device.
909  *
910  * The Oracle says: try running cpufreq registration/unregistration concurrently
911  * with with cpu hotplugging and all hell will break loose. Tried to clean this
912  * mess up, but more thorough testing is needed. - Mathieu
913  */
914 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
915 {
916         unsigned int j, cpu = dev->id;
917         int ret = -ENOMEM;
918         struct cpufreq_policy *policy;
919         unsigned long flags;
920 #ifdef CONFIG_HOTPLUG_CPU
921         struct cpufreq_governor *gov;
922         int sibling;
923 #endif
924
925         if (cpu_is_offline(cpu))
926                 return 0;
927
928         pr_debug("adding CPU %u\n", cpu);
929
930 #ifdef CONFIG_SMP
931         /* check whether a different CPU already registered this
932          * CPU because it is in the same boat. */
933         policy = cpufreq_cpu_get(cpu);
934         if (unlikely(policy)) {
935                 cpufreq_cpu_put(policy);
936                 return 0;
937         }
938
939 #ifdef CONFIG_HOTPLUG_CPU
940         /* Check if this cpu was hot-unplugged earlier and has siblings */
941         read_lock_irqsave(&cpufreq_driver_lock, flags);
942         for_each_online_cpu(sibling) {
943                 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
944                 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
945                         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
946                         return cpufreq_add_policy_cpu(cpu, sibling, dev);
947                 }
948         }
949         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
950 #endif
951 #endif
952
953         if (!try_module_get(cpufreq_driver->owner)) {
954                 ret = -EINVAL;
955                 goto module_out;
956         }
957
958         policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
959         if (!policy)
960                 goto nomem_out;
961
962         if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
963                 goto err_free_policy;
964
965         if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
966                 goto err_free_cpumask;
967
968         policy->cpu = cpu;
969         policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
970         cpumask_copy(policy->cpus, cpumask_of(cpu));
971
972         /* Initially set CPU itself as the policy_cpu */
973         per_cpu(cpufreq_policy_cpu, cpu) = cpu;
974
975         init_completion(&policy->kobj_unregister);
976         INIT_WORK(&policy->update, handle_update);
977
978         /* call driver. From then on the cpufreq must be able
979          * to accept all calls to ->verify and ->setpolicy for this CPU
980          */
981         ret = cpufreq_driver->init(policy);
982         if (ret) {
983                 pr_debug("initialization failed\n");
984                 goto err_set_policy_cpu;
985         }
986
987         /* related cpus should atleast have policy->cpus */
988         cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
989
990         /*
991          * affected cpus must always be the one, which are online. We aren't
992          * managing offline cpus here.
993          */
994         cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
995
996         policy->user_policy.min = policy->min;
997         policy->user_policy.max = policy->max;
998
999         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1000                                      CPUFREQ_START, policy);
1001
1002 #ifdef CONFIG_HOTPLUG_CPU
1003         gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1004         if (gov) {
1005                 policy->governor = gov;
1006                 pr_debug("Restoring governor %s for cpu %d\n",
1007                        policy->governor->name, cpu);
1008         }
1009 #endif
1010
1011         ret = cpufreq_add_dev_interface(cpu, policy, dev);
1012         if (ret)
1013                 goto err_out_unregister;
1014
1015         kobject_uevent(&policy->kobj, KOBJ_ADD);
1016         module_put(cpufreq_driver->owner);
1017         pr_debug("initialization complete\n");
1018
1019         return 0;
1020
1021 err_out_unregister:
1022         write_lock_irqsave(&cpufreq_driver_lock, flags);
1023         for_each_cpu(j, policy->cpus)
1024                 per_cpu(cpufreq_cpu_data, j) = NULL;
1025         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1026
1027         kobject_put(&policy->kobj);
1028         wait_for_completion(&policy->kobj_unregister);
1029
1030 err_set_policy_cpu:
1031         per_cpu(cpufreq_policy_cpu, cpu) = -1;
1032         free_cpumask_var(policy->related_cpus);
1033 err_free_cpumask:
1034         free_cpumask_var(policy->cpus);
1035 err_free_policy:
1036         kfree(policy);
1037 nomem_out:
1038         module_put(cpufreq_driver->owner);
1039 module_out:
1040         return ret;
1041 }
1042
1043 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1044 {
1045         int j;
1046
1047         policy->last_cpu = policy->cpu;
1048         policy->cpu = cpu;
1049
1050         for_each_cpu(j, policy->cpus)
1051                 per_cpu(cpufreq_policy_cpu, j) = cpu;
1052
1053 #ifdef CONFIG_CPU_FREQ_TABLE
1054         cpufreq_frequency_table_update_policy_cpu(policy);
1055 #endif
1056         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1057                         CPUFREQ_UPDATE_POLICY_CPU, policy);
1058 }
1059
1060 /**
1061  * __cpufreq_remove_dev - remove a CPU device
1062  *
1063  * Removes the cpufreq interface for a CPU device.
1064  * Caller should already have policy_rwsem in write mode for this CPU.
1065  * This routine frees the rwsem before returning.
1066  */
1067 static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1068 {
1069         unsigned int cpu = dev->id, ret, cpus;
1070         unsigned long flags;
1071         struct cpufreq_policy *data;
1072         struct kobject *kobj;
1073         struct completion *cmp;
1074         struct device *cpu_dev;
1075
1076         pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1077
1078         write_lock_irqsave(&cpufreq_driver_lock, flags);
1079
1080         data = per_cpu(cpufreq_cpu_data, cpu);
1081         per_cpu(cpufreq_cpu_data, cpu) = NULL;
1082
1083         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1084
1085         if (!data) {
1086                 pr_debug("%s: No cpu_data found\n", __func__);
1087                 return -EINVAL;
1088         }
1089
1090         if (cpufreq_driver->target)
1091                 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1092
1093 #ifdef CONFIG_HOTPLUG_CPU
1094         if (!cpufreq_driver->setpolicy)
1095                 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1096                         data->governor->name, CPUFREQ_NAME_LEN);
1097 #endif
1098
1099         WARN_ON(lock_policy_rwsem_write(cpu));
1100         cpus = cpumask_weight(data->cpus);
1101
1102         if (cpus > 1)
1103                 cpumask_clear_cpu(cpu, data->cpus);
1104         unlock_policy_rwsem_write(cpu);
1105
1106         if (cpu != data->cpu) {
1107                 sysfs_remove_link(&dev->kobj, "cpufreq");
1108         } else if (cpus > 1) {
1109                 /* first sibling now owns the new sysfs dir */
1110                 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1111                 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1112                 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1113                 if (ret) {
1114                         pr_err("%s: Failed to move kobj: %d", __func__, ret);
1115
1116                         WARN_ON(lock_policy_rwsem_write(cpu));
1117                         cpumask_set_cpu(cpu, data->cpus);
1118
1119                         write_lock_irqsave(&cpufreq_driver_lock, flags);
1120                         per_cpu(cpufreq_cpu_data, cpu) = data;
1121                         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1122
1123                         unlock_policy_rwsem_write(cpu);
1124
1125                         ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1126                                         "cpufreq");
1127                         return -EINVAL;
1128                 }
1129
1130                 WARN_ON(lock_policy_rwsem_write(cpu));
1131                 update_policy_cpu(data, cpu_dev->id);
1132                 unlock_policy_rwsem_write(cpu);
1133                 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1134                                 __func__, cpu_dev->id, cpu);
1135         }
1136
1137         /* If cpu is last user of policy, free policy */
1138         if (cpus == 1) {
1139                 if (cpufreq_driver->target)
1140                         __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1141
1142                 lock_policy_rwsem_read(cpu);
1143                 kobj = &data->kobj;
1144                 cmp = &data->kobj_unregister;
1145                 unlock_policy_rwsem_read(cpu);
1146                 kobject_put(kobj);
1147
1148                 /* we need to make sure that the underlying kobj is actually
1149                  * not referenced anymore by anybody before we proceed with
1150                  * unloading.
1151                  */
1152                 pr_debug("waiting for dropping of refcount\n");
1153                 wait_for_completion(cmp);
1154                 pr_debug("wait complete\n");
1155
1156                 if (cpufreq_driver->exit)
1157                         cpufreq_driver->exit(data);
1158
1159                 free_cpumask_var(data->related_cpus);
1160                 free_cpumask_var(data->cpus);
1161                 kfree(data);
1162         } else {
1163                 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1164                 cpufreq_cpu_put(data);
1165                 if (cpufreq_driver->target) {
1166                         __cpufreq_governor(data, CPUFREQ_GOV_START);
1167                         __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1168                 }
1169         }
1170
1171         per_cpu(cpufreq_policy_cpu, cpu) = -1;
1172         return 0;
1173 }
1174
1175
1176 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1177 {
1178         unsigned int cpu = dev->id;
1179         int retval;
1180
1181         if (cpu_is_offline(cpu))
1182                 return 0;
1183
1184         retval = __cpufreq_remove_dev(dev, sif);
1185         return retval;
1186 }
1187
1188
1189 static void handle_update(struct work_struct *work)
1190 {
1191         struct cpufreq_policy *policy =
1192                 container_of(work, struct cpufreq_policy, update);
1193         unsigned int cpu = policy->cpu;
1194         pr_debug("handle_update for cpu %u called\n", cpu);
1195         cpufreq_update_policy(cpu);
1196 }
1197
1198 /**
1199  *      cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1200  *      @cpu: cpu number
1201  *      @old_freq: CPU frequency the kernel thinks the CPU runs at
1202  *      @new_freq: CPU frequency the CPU actually runs at
1203  *
1204  *      We adjust to current frequency first, and need to clean up later.
1205  *      So either call to cpufreq_update_policy() or schedule handle_update()).
1206  */
1207 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1208                                 unsigned int new_freq)
1209 {
1210         struct cpufreq_policy *policy;
1211         struct cpufreq_freqs freqs;
1212         unsigned long flags;
1213
1214
1215         pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1216                "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1217
1218         freqs.old = old_freq;
1219         freqs.new = new_freq;
1220
1221         read_lock_irqsave(&cpufreq_driver_lock, flags);
1222         policy = per_cpu(cpufreq_cpu_data, cpu);
1223         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1224
1225         cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1226         cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1227 }
1228
1229
1230 /**
1231  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1232  * @cpu: CPU number
1233  *
1234  * This is the last known freq, without actually getting it from the driver.
1235  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1236  */
1237 unsigned int cpufreq_quick_get(unsigned int cpu)
1238 {
1239         struct cpufreq_policy *policy;
1240         unsigned int ret_freq = 0;
1241
1242         if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1243                 return cpufreq_driver->get(cpu);
1244
1245         policy = cpufreq_cpu_get(cpu);
1246         if (policy) {
1247                 ret_freq = policy->cur;
1248                 cpufreq_cpu_put(policy);
1249         }
1250
1251         return ret_freq;
1252 }
1253 EXPORT_SYMBOL(cpufreq_quick_get);
1254
1255 /**
1256  * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1257  * @cpu: CPU number
1258  *
1259  * Just return the max possible frequency for a given CPU.
1260  */
1261 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1262 {
1263         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1264         unsigned int ret_freq = 0;
1265
1266         if (policy) {
1267                 ret_freq = policy->max;
1268                 cpufreq_cpu_put(policy);
1269         }
1270
1271         return ret_freq;
1272 }
1273 EXPORT_SYMBOL(cpufreq_quick_get_max);
1274
1275
1276 static unsigned int __cpufreq_get(unsigned int cpu)
1277 {
1278         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1279         unsigned int ret_freq = 0;
1280
1281         if (!cpufreq_driver->get)
1282                 return ret_freq;
1283
1284         ret_freq = cpufreq_driver->get(cpu);
1285
1286         if (ret_freq && policy->cur &&
1287                 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1288                 /* verify no discrepancy between actual and
1289                                         saved value exists */
1290                 if (unlikely(ret_freq != policy->cur)) {
1291                         cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1292                         schedule_work(&policy->update);
1293                 }
1294         }
1295
1296         return ret_freq;
1297 }
1298
1299 /**
1300  * cpufreq_get - get the current CPU frequency (in kHz)
1301  * @cpu: CPU number
1302  *
1303  * Get the CPU current (static) CPU frequency
1304  */
1305 unsigned int cpufreq_get(unsigned int cpu)
1306 {
1307         unsigned int ret_freq = 0;
1308         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1309
1310         if (!policy)
1311                 goto out;
1312
1313         if (unlikely(lock_policy_rwsem_read(cpu)))
1314                 goto out_policy;
1315
1316         ret_freq = __cpufreq_get(cpu);
1317
1318         unlock_policy_rwsem_read(cpu);
1319
1320 out_policy:
1321         cpufreq_cpu_put(policy);
1322 out:
1323         return ret_freq;
1324 }
1325 EXPORT_SYMBOL(cpufreq_get);
1326
1327 static struct subsys_interface cpufreq_interface = {
1328         .name           = "cpufreq",
1329         .subsys         = &cpu_subsys,
1330         .add_dev        = cpufreq_add_dev,
1331         .remove_dev     = cpufreq_remove_dev,
1332 };
1333
1334
1335 /**
1336  * cpufreq_suspend() - Suspend CPUFreq governors
1337  *
1338  * Called during system wide Suspend/Hibernate cycles for suspending governors
1339  * as some platforms can't change frequency after this point in suspend cycle.
1340  * Because some of the devices (like: i2c, regulators, etc) they use for
1341  * changing frequency are suspended quickly after this point.
1342  */
1343 void cpufreq_suspend(void)
1344 {
1345         struct cpufreq_policy *policy;
1346
1347         if (!cpufreq_driver)
1348                 return;
1349
1350         if (!has_target())
1351                 return;
1352
1353         pr_debug("%s: Suspending Governors\n", __func__);
1354
1355         policy = cpufreq_cpu_get(0);
1356
1357         if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1358                 pr_err("%s: Failed to stop governor for policy: %p\n",
1359                         __func__, policy);
1360         else if (cpufreq_driver->suspend
1361             && cpufreq_driver->suspend(policy))
1362                 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1363                         policy);
1364
1365         cpufreq_suspended = true;
1366 }
1367
1368 /**
1369  * cpufreq_resume() - Resume CPUFreq governors
1370  *
1371  * Called during system wide Suspend/Hibernate cycle for resuming governors that
1372  * are suspended with cpufreq_suspend().
1373  */
1374 void cpufreq_resume(void)
1375 {
1376         struct cpufreq_policy *policy;
1377
1378         if (!cpufreq_driver)
1379                 return;
1380
1381         if (!has_target())
1382                 return;
1383
1384         pr_debug("%s: Resuming Governors\n", __func__);
1385
1386         cpufreq_suspended = false;
1387
1388         policy = cpufreq_cpu_get(0);
1389
1390         if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1391             || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1392                 pr_err("%s: Failed to start governor for policy: %p\n",
1393                         __func__, policy);
1394         else if (cpufreq_driver->resume
1395             && cpufreq_driver->resume(policy))
1396                 pr_err("%s: Failed to resume driver: %p\n", __func__,
1397                         policy);
1398
1399         schedule_work(&policy->update);
1400 }
1401
1402 /**
1403  *      cpufreq_get_current_driver - return current driver's name
1404  *
1405  *      Return the name string of the currently loaded cpufreq driver
1406  *      or NULL, if none.
1407  */
1408 const char *cpufreq_get_current_driver(void)
1409 {
1410         if (cpufreq_driver)
1411                 return cpufreq_driver->name;
1412
1413         return NULL;
1414 }
1415 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1416
1417 /*********************************************************************
1418  *                     NOTIFIER LISTS INTERFACE                      *
1419  *********************************************************************/
1420
1421 /**
1422  *      cpufreq_register_notifier - register a driver with cpufreq
1423  *      @nb: notifier function to register
1424  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1425  *
1426  *      Add a driver to one of two lists: either a list of drivers that
1427  *      are notified about clock rate changes (once before and once after
1428  *      the transition), or a list of drivers that are notified about
1429  *      changes in cpufreq policy.
1430  *
1431  *      This function may sleep, and has the same return conditions as
1432  *      blocking_notifier_chain_register.
1433  */
1434 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1435 {
1436         int ret;
1437
1438         if (cpufreq_disabled())
1439                 return -EINVAL;
1440
1441         WARN_ON(!init_cpufreq_transition_notifier_list_called);
1442
1443         switch (list) {
1444         case CPUFREQ_TRANSITION_NOTIFIER:
1445                 ret = srcu_notifier_chain_register(
1446                                 &cpufreq_transition_notifier_list, nb);
1447                 break;
1448         case CPUFREQ_POLICY_NOTIFIER:
1449                 ret = blocking_notifier_chain_register(
1450                                 &cpufreq_policy_notifier_list, nb);
1451                 break;
1452         default:
1453                 ret = -EINVAL;
1454         }
1455
1456         return ret;
1457 }
1458 EXPORT_SYMBOL(cpufreq_register_notifier);
1459
1460
1461 /**
1462  *      cpufreq_unregister_notifier - unregister a driver with cpufreq
1463  *      @nb: notifier block to be unregistered
1464  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1465  *
1466  *      Remove a driver from the CPU frequency notifier list.
1467  *
1468  *      This function may sleep, and has the same return conditions as
1469  *      blocking_notifier_chain_unregister.
1470  */
1471 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1472 {
1473         int ret;
1474
1475         if (cpufreq_disabled())
1476                 return -EINVAL;
1477
1478         switch (list) {
1479         case CPUFREQ_TRANSITION_NOTIFIER:
1480                 ret = srcu_notifier_chain_unregister(
1481                                 &cpufreq_transition_notifier_list, nb);
1482                 break;
1483         case CPUFREQ_POLICY_NOTIFIER:
1484                 ret = blocking_notifier_chain_unregister(
1485                                 &cpufreq_policy_notifier_list, nb);
1486                 break;
1487         default:
1488                 ret = -EINVAL;
1489         }
1490
1491         return ret;
1492 }
1493 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1494
1495
1496 /*********************************************************************
1497  *                              GOVERNORS                            *
1498  *********************************************************************/
1499
1500
1501 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1502                             unsigned int target_freq,
1503                             unsigned int relation)
1504 {
1505         int retval = -EINVAL;
1506         unsigned int old_target_freq = target_freq;
1507
1508         if (cpufreq_disabled())
1509                 return -ENODEV;
1510
1511         /* Make sure that target_freq is within supported range */
1512         if (target_freq > policy->max)
1513                 target_freq = policy->max;
1514         if (target_freq < policy->min)
1515                 target_freq = policy->min;
1516
1517         pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1518                         policy->cpu, target_freq, relation, old_target_freq);
1519
1520         if (target_freq == policy->cur)
1521                 return 0;
1522
1523         if (cpufreq_driver->target)
1524                 retval = cpufreq_driver->target(policy, target_freq, relation);
1525
1526         return retval;
1527 }
1528 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1529
1530 int cpufreq_driver_target(struct cpufreq_policy *policy,
1531                           unsigned int target_freq,
1532                           unsigned int relation)
1533 {
1534         int ret = -EINVAL;
1535
1536         policy = cpufreq_cpu_get(policy->cpu);
1537         if (!policy)
1538                 goto no_policy;
1539
1540         if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1541                 goto fail;
1542
1543         ret = __cpufreq_driver_target(policy, target_freq, relation);
1544
1545         unlock_policy_rwsem_write(policy->cpu);
1546
1547 fail:
1548         cpufreq_cpu_put(policy);
1549 no_policy:
1550         return ret;
1551 }
1552 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1553
1554 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1555 {
1556         int ret = 0;
1557
1558         if (cpufreq_disabled())
1559                 return ret;
1560
1561         if (!cpufreq_driver->getavg)
1562                 return 0;
1563
1564         policy = cpufreq_cpu_get(policy->cpu);
1565         if (!policy)
1566                 return -EINVAL;
1567
1568         ret = cpufreq_driver->getavg(policy, cpu);
1569
1570         cpufreq_cpu_put(policy);
1571         return ret;
1572 }
1573 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1574
1575 /*
1576  * when "event" is CPUFREQ_GOV_LIMITS
1577  */
1578
1579 static int __cpufreq_governor(struct cpufreq_policy *policy,
1580                                         unsigned int event)
1581 {
1582         int ret;
1583
1584         /* Only must be defined when default governor is known to have latency
1585            restrictions, like e.g. conservative or ondemand.
1586            That this is the case is already ensured in Kconfig
1587         */
1588 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1589         struct cpufreq_governor *gov = &cpufreq_gov_performance;
1590 #else
1591         struct cpufreq_governor *gov = NULL;
1592 #endif
1593
1594         /* Don't start any governor operations if we are entering suspend */
1595         if (cpufreq_suspended)
1596                 return 0;
1597
1598         if (policy->governor->max_transition_latency &&
1599             policy->cpuinfo.transition_latency >
1600             policy->governor->max_transition_latency) {
1601                 if (!gov)
1602                         return -EINVAL;
1603                 else {
1604                         printk(KERN_WARNING "%s governor failed, too long"
1605                                " transition latency of HW, fallback"
1606                                " to %s governor\n",
1607                                policy->governor->name,
1608                                gov->name);
1609                         policy->governor = gov;
1610                 }
1611         }
1612
1613         if (!try_module_get(policy->governor->owner))
1614                 return -EINVAL;
1615
1616         pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1617                                                 policy->cpu, event);
1618
1619         mutex_lock(&cpufreq_governor_lock);
1620         if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
1621             (policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
1622                 mutex_unlock(&cpufreq_governor_lock);
1623                 return -EBUSY;
1624         }
1625
1626         if (event == CPUFREQ_GOV_STOP)
1627                 policy->governor_enabled = false;
1628         else if (event == CPUFREQ_GOV_START)
1629                 policy->governor_enabled = true;
1630
1631         mutex_unlock(&cpufreq_governor_lock);
1632
1633         ret = policy->governor->governor(policy, event);
1634
1635         if (!ret) {
1636                 if (event == CPUFREQ_GOV_POLICY_INIT)
1637                         policy->governor->initialized++;
1638                 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1639                         policy->governor->initialized--;
1640         } else {
1641                 /* Restore original values */
1642                 mutex_lock(&cpufreq_governor_lock);
1643                 if (event == CPUFREQ_GOV_STOP)
1644                         policy->governor_enabled = true;
1645                 else if (event == CPUFREQ_GOV_START)
1646                         policy->governor_enabled = false;
1647                 mutex_unlock(&cpufreq_governor_lock);
1648         }
1649
1650         /* we keep one module reference alive for
1651                         each CPU governed by this CPU */
1652         if ((event != CPUFREQ_GOV_START) || ret)
1653                 module_put(policy->governor->owner);
1654         if ((event == CPUFREQ_GOV_STOP) && !ret)
1655                 module_put(policy->governor->owner);
1656
1657         return ret;
1658 }
1659
1660
1661 int cpufreq_register_governor(struct cpufreq_governor *governor)
1662 {
1663         int err;
1664
1665         if (!governor)
1666                 return -EINVAL;
1667
1668         if (cpufreq_disabled())
1669                 return -ENODEV;
1670
1671         mutex_lock(&cpufreq_governor_mutex);
1672
1673         governor->initialized = 0;
1674         err = -EBUSY;
1675         if (__find_governor(governor->name) == NULL) {
1676                 err = 0;
1677                 list_add(&governor->governor_list, &cpufreq_governor_list);
1678         }
1679
1680         mutex_unlock(&cpufreq_governor_mutex);
1681         return err;
1682 }
1683 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1684
1685
1686 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1687 {
1688 #ifdef CONFIG_HOTPLUG_CPU
1689         int cpu;
1690 #endif
1691
1692         if (!governor)
1693                 return;
1694
1695         if (cpufreq_disabled())
1696                 return;
1697
1698 #ifdef CONFIG_HOTPLUG_CPU
1699         for_each_present_cpu(cpu) {
1700                 if (cpu_online(cpu))
1701                         continue;
1702                 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1703                         strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1704         }
1705 #endif
1706
1707         mutex_lock(&cpufreq_governor_mutex);
1708         list_del(&governor->governor_list);
1709         mutex_unlock(&cpufreq_governor_mutex);
1710         return;
1711 }
1712 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1713
1714
1715
1716 /*********************************************************************
1717  *                          POLICY INTERFACE                         *
1718  *********************************************************************/
1719
1720 /**
1721  * cpufreq_get_policy - get the current cpufreq_policy
1722  * @policy: struct cpufreq_policy into which the current cpufreq_policy
1723  *      is written
1724  *
1725  * Reads the current cpufreq policy.
1726  */
1727 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1728 {
1729         struct cpufreq_policy *cpu_policy;
1730         if (!policy)
1731                 return -EINVAL;
1732
1733         cpu_policy = cpufreq_cpu_get(cpu);
1734         if (!cpu_policy)
1735                 return -EINVAL;
1736
1737         memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1738
1739         cpufreq_cpu_put(cpu_policy);
1740         return 0;
1741 }
1742 EXPORT_SYMBOL(cpufreq_get_policy);
1743
1744
1745 /*
1746  * data   : current policy.
1747  * policy : policy to be set.
1748  */
1749 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1750                                 struct cpufreq_policy *policy)
1751 {
1752         int ret = 0, failed = 1;
1753
1754         pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1755                 policy->min, policy->max);
1756
1757         memcpy(&policy->cpuinfo, &data->cpuinfo,
1758                                 sizeof(struct cpufreq_cpuinfo));
1759
1760         if (policy->min > data->max || policy->max < data->min) {
1761                 ret = -EINVAL;
1762                 goto error_out;
1763         }
1764
1765         /* verify the cpu speed can be set within this limit */
1766         ret = cpufreq_driver->verify(policy);
1767         if (ret)
1768                 goto error_out;
1769
1770         /* adjust if necessary - all reasons */
1771         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1772                         CPUFREQ_ADJUST, policy);
1773
1774         /* adjust if necessary - hardware incompatibility*/
1775         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1776                         CPUFREQ_INCOMPATIBLE, policy);
1777
1778         /* verify the cpu speed can be set within this limit,
1779            which might be different to the first one */
1780         ret = cpufreq_driver->verify(policy);
1781         if (ret)
1782                 goto error_out;
1783
1784         /* notification of the new policy */
1785         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1786                         CPUFREQ_NOTIFY, policy);
1787
1788         data->min = policy->min;
1789         data->max = policy->max;
1790
1791         pr_debug("new min and max freqs are %u - %u kHz\n",
1792                                         data->min, data->max);
1793
1794         if (cpufreq_driver->setpolicy) {
1795                 data->policy = policy->policy;
1796                 pr_debug("setting range\n");
1797                 ret = cpufreq_driver->setpolicy(policy);
1798         } else {
1799                 if (policy->governor != data->governor) {
1800                         /* save old, working values */
1801                         struct cpufreq_governor *old_gov = data->governor;
1802
1803                         pr_debug("governor switch\n");
1804
1805                         /* end old governor */
1806                         if (data->governor) {
1807                                 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1808                                 unlock_policy_rwsem_write(policy->cpu);
1809                                 __cpufreq_governor(data,
1810                                                 CPUFREQ_GOV_POLICY_EXIT);
1811                                 lock_policy_rwsem_write(policy->cpu);
1812                         }
1813
1814                         /* start new governor */
1815                         data->governor = policy->governor;
1816                         if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1817                                 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1818                                         failed = 0;
1819                                 } else {
1820                                         unlock_policy_rwsem_write(policy->cpu);
1821                                         __cpufreq_governor(data,
1822                                                         CPUFREQ_GOV_POLICY_EXIT);
1823                                         lock_policy_rwsem_write(policy->cpu);
1824                                 }
1825                         }
1826
1827                         if (failed) {
1828                                 /* new governor failed, so re-start old one */
1829                                 pr_debug("starting governor %s failed\n",
1830                                                         data->governor->name);
1831                                 if (old_gov) {
1832                                         data->governor = old_gov;
1833                                         __cpufreq_governor(data,
1834                                                         CPUFREQ_GOV_POLICY_INIT);
1835                                         __cpufreq_governor(data,
1836                                                            CPUFREQ_GOV_START);
1837                                 }
1838                                 ret = -EINVAL;
1839                                 goto error_out;
1840                         }
1841                         /* might be a policy change, too, so fall through */
1842                 }
1843                 pr_debug("governor: change or update limits\n");
1844                 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1845         }
1846
1847 error_out:
1848         return ret;
1849 }
1850
1851 /**
1852  *      cpufreq_update_policy - re-evaluate an existing cpufreq policy
1853  *      @cpu: CPU which shall be re-evaluated
1854  *
1855  *      Useful for policy notifiers which have different necessities
1856  *      at different times.
1857  */
1858 int cpufreq_update_policy(unsigned int cpu)
1859 {
1860         struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1861         struct cpufreq_policy policy;
1862         int ret;
1863
1864         if (!data) {
1865                 ret = -ENODEV;
1866                 goto no_policy;
1867         }
1868
1869         if (unlikely(lock_policy_rwsem_write(cpu))) {
1870                 ret = -EINVAL;
1871                 goto fail;
1872         }
1873
1874         pr_debug("updating policy for CPU %u\n", cpu);
1875         memcpy(&policy, data, sizeof(struct cpufreq_policy));
1876         policy.min = data->user_policy.min;
1877         policy.max = data->user_policy.max;
1878         policy.policy = data->user_policy.policy;
1879         policy.governor = data->user_policy.governor;
1880
1881         /* BIOS might change freq behind our back
1882           -> ask driver for current freq and notify governors about a change */
1883         if (cpufreq_driver->get) {
1884                 policy.cur = cpufreq_driver->get(cpu);
1885                 if (!data->cur) {
1886                         pr_debug("Driver did not initialize current freq");
1887                         data->cur = policy.cur;
1888                 } else {
1889                         if (data->cur != policy.cur && cpufreq_driver->target)
1890                                 cpufreq_out_of_sync(cpu, data->cur,
1891                                                                 policy.cur);
1892                 }
1893         }
1894
1895         ret = __cpufreq_set_policy(data, &policy);
1896
1897         unlock_policy_rwsem_write(cpu);
1898
1899 fail:
1900         cpufreq_cpu_put(data);
1901 no_policy:
1902         return ret;
1903 }
1904 EXPORT_SYMBOL(cpufreq_update_policy);
1905
1906 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1907                                         unsigned long action, void *hcpu)
1908 {
1909         unsigned int cpu = (unsigned long)hcpu;
1910         struct device *dev;
1911
1912         dev = get_cpu_device(cpu);
1913         if (dev) {
1914                 switch (action) {
1915                 case CPU_ONLINE:
1916                 case CPU_ONLINE_FROZEN:
1917                         cpufreq_add_dev(dev, NULL);
1918                         break;
1919                 case CPU_DOWN_PREPARE:
1920                 case CPU_DOWN_PREPARE_FROZEN:
1921                         __cpufreq_remove_dev(dev, NULL);
1922                         break;
1923                 case CPU_DOWN_FAILED:
1924                 case CPU_DOWN_FAILED_FROZEN:
1925                         cpufreq_add_dev(dev, NULL);
1926                         break;
1927                 }
1928         }
1929         return NOTIFY_OK;
1930 }
1931
1932 static struct notifier_block __refdata cpufreq_cpu_notifier = {
1933     .notifier_call = cpufreq_cpu_callback,
1934 };
1935
1936 /*********************************************************************
1937  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
1938  *********************************************************************/
1939
1940 /**
1941  * cpufreq_register_driver - register a CPU Frequency driver
1942  * @driver_data: A struct cpufreq_driver containing the values#
1943  * submitted by the CPU Frequency driver.
1944  *
1945  *   Registers a CPU Frequency driver to this core code. This code
1946  * returns zero on success, -EBUSY when another driver got here first
1947  * (and isn't unregistered in the meantime).
1948  *
1949  */
1950 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1951 {
1952         unsigned long flags;
1953         int ret;
1954
1955         if (cpufreq_disabled())
1956                 return -ENODEV;
1957
1958         if (!driver_data || !driver_data->verify || !driver_data->init ||
1959             ((!driver_data->setpolicy) && (!driver_data->target)))
1960                 return -EINVAL;
1961
1962         pr_debug("trying to register driver %s\n", driver_data->name);
1963
1964         if (driver_data->setpolicy)
1965                 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1966
1967         write_lock_irqsave(&cpufreq_driver_lock, flags);
1968         if (cpufreq_driver) {
1969                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1970                 return -EBUSY;
1971         }
1972         cpufreq_driver = driver_data;
1973         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1974
1975         ret = subsys_interface_register(&cpufreq_interface);
1976         if (ret)
1977                 goto err_null_driver;
1978
1979         if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1980                 int i;
1981                 ret = -ENODEV;
1982
1983                 /* check for at least one working CPU */
1984                 for (i = 0; i < nr_cpu_ids; i++)
1985                         if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1986                                 ret = 0;
1987                                 break;
1988                         }
1989
1990                 /* if all ->init() calls failed, unregister */
1991                 if (ret) {
1992                         pr_debug("no CPU initialized for driver %s\n",
1993                                                         driver_data->name);
1994                         goto err_if_unreg;
1995                 }
1996         }
1997
1998         register_hotcpu_notifier(&cpufreq_cpu_notifier);
1999         pr_debug("driver %s up and running\n", driver_data->name);
2000
2001         return 0;
2002 err_if_unreg:
2003         subsys_interface_unregister(&cpufreq_interface);
2004 err_null_driver:
2005         write_lock_irqsave(&cpufreq_driver_lock, flags);
2006         cpufreq_driver = NULL;
2007         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2008         return ret;
2009 }
2010 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2011
2012
2013 /**
2014  * cpufreq_unregister_driver - unregister the current CPUFreq driver
2015  *
2016  *    Unregister the current CPUFreq driver. Only call this if you have
2017  * the right to do so, i.e. if you have succeeded in initialising before!
2018  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2019  * currently not initialised.
2020  */
2021 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2022 {
2023         unsigned long flags;
2024
2025         if (!cpufreq_driver || (driver != cpufreq_driver))
2026                 return -EINVAL;
2027
2028         pr_debug("unregistering driver %s\n", driver->name);
2029
2030         subsys_interface_unregister(&cpufreq_interface);
2031         unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2032
2033         write_lock_irqsave(&cpufreq_driver_lock, flags);
2034         cpufreq_driver = NULL;
2035         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2036
2037         return 0;
2038 }
2039 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2040
2041 static int __init cpufreq_core_init(void)
2042 {
2043         int cpu;
2044
2045         if (cpufreq_disabled())
2046                 return -ENODEV;
2047
2048         for_each_possible_cpu(cpu) {
2049                 per_cpu(cpufreq_policy_cpu, cpu) = -1;
2050                 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2051         }
2052
2053         cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2054         BUG_ON(!cpufreq_global_kobject);
2055
2056         return 0;
2057 }
2058 core_initcall(cpufreq_core_init);