ARM: rk: build resource.img with logo_kernel.bmp
[firefly-linux-kernel-4.4.55.git] / drivers / cpufreq / cpufreq.c
1 /*
2  *  linux/drivers/cpufreq/cpufreq.c
3  *
4  *  Copyright (C) 2001 Russell King
5  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6  *
7  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8  *      Added handling for CPU hotplug
9  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10  *      Fix handling for CPU hotplug -- affected CPUs
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  *
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <asm/cputime.h>
21 #include <linux/kernel.h>
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/notifier.h>
26 #include <linux/cpufreq.h>
27 #include <linux/delay.h>
28 #include <linux/interrupt.h>
29 #include <linux/spinlock.h>
30 #include <linux/tick.h>
31 #include <linux/device.h>
32 #include <linux/slab.h>
33 #include <linux/cpu.h>
34 #include <linux/completion.h>
35 #include <linux/mutex.h>
36 #include <linux/syscore_ops.h>
37 #include <linux/suspend.h>
38 #include <linux/tick.h>
39
40 #include <trace/events/power.h>
41
42 /**
43  * The "cpufreq driver" - the arch- or hardware-dependent low
44  * level driver of CPUFreq support, and its spinlock. This lock
45  * also protects the cpufreq_cpu_data array.
46  */
47 static struct cpufreq_driver *cpufreq_driver;
48 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
49 #ifdef CONFIG_HOTPLUG_CPU
50 /* This one keeps track of the previously set governor of a removed CPU */
51 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
52 #endif
53 static DEFINE_RWLOCK(cpufreq_driver_lock);
54 static DEFINE_MUTEX(cpufreq_governor_lock);
55
56 /* Flag to suspend/resume CPUFreq governors */
57 static bool cpufreq_suspended;
58
59 static inline bool has_target(void)
60 {
61         return cpufreq_driver->target;
62 }
63
64 /*
65  * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
66  * all cpufreq/hotplug/workqueue/etc related lock issues.
67  *
68  * The rules for this semaphore:
69  * - Any routine that wants to read from the policy structure will
70  *   do a down_read on this semaphore.
71  * - Any routine that will write to the policy structure and/or may take away
72  *   the policy altogether (eg. CPU hotplug), will hold this lock in write
73  *   mode before doing so.
74  *
75  * Additional rules:
76  * - Governor routines that can be called in cpufreq hotplug path should not
77  *   take this sem as top level hotplug notifier handler takes this.
78  * - Lock should not be held across
79  *     __cpufreq_governor(data, CPUFREQ_GOV_STOP);
80  */
81 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
82 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
83
84 #define lock_policy_rwsem(mode, cpu)                                    \
85 static int lock_policy_rwsem_##mode(int cpu)                            \
86 {                                                                       \
87         int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);              \
88         BUG_ON(policy_cpu == -1);                                       \
89         down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));            \
90                                                                         \
91         return 0;                                                       \
92 }
93
94 lock_policy_rwsem(read, cpu);
95 lock_policy_rwsem(write, cpu);
96
97 #define unlock_policy_rwsem(mode, cpu)                                  \
98 static void unlock_policy_rwsem_##mode(int cpu)                         \
99 {                                                                       \
100         int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);              \
101         BUG_ON(policy_cpu == -1);                                       \
102         up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));              \
103 }
104
105 unlock_policy_rwsem(read, cpu);
106 unlock_policy_rwsem(write, cpu);
107
108 /* internal prototypes */
109 static int __cpufreq_governor(struct cpufreq_policy *policy,
110                 unsigned int event);
111 static unsigned int __cpufreq_get(unsigned int cpu);
112 static void handle_update(struct work_struct *work);
113
114 /**
115  * Two notifier lists: the "policy" list is involved in the
116  * validation process for a new CPU frequency policy; the
117  * "transition" list for kernel code that needs to handle
118  * changes to devices when the CPU clock speed changes.
119  * The mutex locks both lists.
120  */
121 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
122 static struct srcu_notifier_head cpufreq_transition_notifier_list;
123
124 static bool init_cpufreq_transition_notifier_list_called;
125 static int __init init_cpufreq_transition_notifier_list(void)
126 {
127         srcu_init_notifier_head(&cpufreq_transition_notifier_list);
128         init_cpufreq_transition_notifier_list_called = true;
129         return 0;
130 }
131 pure_initcall(init_cpufreq_transition_notifier_list);
132
133 static int off __read_mostly;
134 static int cpufreq_disabled(void)
135 {
136         return off;
137 }
138 void disable_cpufreq(void)
139 {
140         off = 1;
141 }
142 static LIST_HEAD(cpufreq_governor_list);
143 static DEFINE_MUTEX(cpufreq_governor_mutex);
144
145 bool have_governor_per_policy(void)
146 {
147         return cpufreq_driver->have_governor_per_policy;
148 }
149 EXPORT_SYMBOL_GPL(have_governor_per_policy);
150
151 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
152 {
153         if (have_governor_per_policy())
154                 return &policy->kobj;
155         else
156                 return cpufreq_global_kobject;
157 }
158 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
159
160 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
161 {
162         u64 idle_time;
163         u64 cur_wall_time;
164         u64 busy_time;
165
166         cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
167
168         busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
169         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
170         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
171         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
172         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
173         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
174
175         idle_time = cur_wall_time - busy_time;
176         if (wall)
177                 *wall = cputime_to_usecs(cur_wall_time);
178
179         return cputime_to_usecs(idle_time);
180 }
181
182 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
183 {
184         u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
185
186         if (idle_time == -1ULL)
187                 return get_cpu_idle_time_jiffy(cpu, wall);
188         else if (!io_busy)
189                 idle_time += get_cpu_iowait_time_us(cpu, wall);
190
191         return idle_time;
192 }
193 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
194
195 static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
196 {
197         struct cpufreq_policy *data;
198         unsigned long flags;
199
200         if (cpu >= nr_cpu_ids)
201                 goto err_out;
202
203         /* get the cpufreq driver */
204         read_lock_irqsave(&cpufreq_driver_lock, flags);
205
206         if (!cpufreq_driver)
207                 goto err_out_unlock;
208
209         if (!try_module_get(cpufreq_driver->owner))
210                 goto err_out_unlock;
211
212
213         /* get the CPU */
214         data = per_cpu(cpufreq_cpu_data, cpu);
215
216         if (!data)
217                 goto err_out_put_module;
218
219         if (!sysfs && !kobject_get(&data->kobj))
220                 goto err_out_put_module;
221
222         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
223         return data;
224
225 err_out_put_module:
226         module_put(cpufreq_driver->owner);
227 err_out_unlock:
228         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
229 err_out:
230         return NULL;
231 }
232
233 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
234 {
235         if (cpufreq_disabled())
236                 return NULL;
237
238         return __cpufreq_cpu_get(cpu, false);
239 }
240 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
241
242 static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
243 {
244         return __cpufreq_cpu_get(cpu, true);
245 }
246
247 static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
248 {
249         if (!sysfs)
250                 kobject_put(&data->kobj);
251         module_put(cpufreq_driver->owner);
252 }
253
254 void cpufreq_cpu_put(struct cpufreq_policy *data)
255 {
256         if (cpufreq_disabled())
257                 return;
258
259         __cpufreq_cpu_put(data, false);
260 }
261 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
262
263 static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
264 {
265         __cpufreq_cpu_put(data, true);
266 }
267
268 /*********************************************************************
269  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
270  *********************************************************************/
271
272 /**
273  * adjust_jiffies - adjust the system "loops_per_jiffy"
274  *
275  * This function alters the system "loops_per_jiffy" for the clock
276  * speed change. Note that loops_per_jiffy cannot be updated on SMP
277  * systems as each CPU might be scaled differently. So, use the arch
278  * per-CPU loops_per_jiffy value wherever possible.
279  */
280 #ifndef CONFIG_SMP
281 static unsigned long l_p_j_ref;
282 static unsigned int  l_p_j_ref_freq;
283
284 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
285 {
286         if (ci->flags & CPUFREQ_CONST_LOOPS)
287                 return;
288
289         if (!l_p_j_ref_freq) {
290                 l_p_j_ref = loops_per_jiffy;
291                 l_p_j_ref_freq = ci->old;
292                 pr_debug("saving %lu as reference value for loops_per_jiffy; "
293                         "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
294         }
295         if ((val == CPUFREQ_POSTCHANGE  && ci->old != ci->new) ||
296             (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
297                 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
298                                                                 ci->new);
299                 pr_debug("scaling loops_per_jiffy to %lu "
300                         "for frequency %u kHz\n", loops_per_jiffy, ci->new);
301         }
302 }
303 #else
304 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
305 {
306         return;
307 }
308 #endif
309
310
311 void __cpufreq_notify_transition(struct cpufreq_policy *policy,
312                 struct cpufreq_freqs *freqs, unsigned int state)
313 {
314         BUG_ON(irqs_disabled());
315
316         if (cpufreq_disabled())
317                 return;
318
319         freqs->flags = cpufreq_driver->flags;
320         pr_debug("notification %u of frequency transition to %u kHz\n",
321                 state, freqs->new);
322
323         switch (state) {
324
325         case CPUFREQ_PRECHANGE:
326                 /* detect if the driver reported a value as "old frequency"
327                  * which is not equal to what the cpufreq core thinks is
328                  * "old frequency".
329                  */
330                 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
331                         if ((policy) && (policy->cpu == freqs->cpu) &&
332                             (policy->cur) && (policy->cur != freqs->old)) {
333                                 pr_debug("Warning: CPU frequency is"
334                                         " %u, cpufreq assumed %u kHz.\n",
335                                         freqs->old, policy->cur);
336                                 freqs->old = policy->cur;
337                         }
338                 }
339                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
340                                 CPUFREQ_PRECHANGE, freqs);
341                 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
342                 break;
343
344         case CPUFREQ_POSTCHANGE:
345                 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
346                 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
347                         (unsigned long)freqs->cpu);
348                 trace_cpu_frequency(freqs->new, freqs->cpu);
349                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
350                                 CPUFREQ_POSTCHANGE, freqs);
351                 if (likely(policy) && likely(policy->cpu == freqs->cpu))
352                         policy->cur = freqs->new;
353                 break;
354         }
355 }
356 /**
357  * cpufreq_notify_transition - call notifier chain and adjust_jiffies
358  * on frequency transition.
359  *
360  * This function calls the transition notifiers and the "adjust_jiffies"
361  * function. It is called twice on all CPU frequency changes that have
362  * external effects.
363  */
364 void cpufreq_notify_transition(struct cpufreq_policy *policy,
365                 struct cpufreq_freqs *freqs, unsigned int state)
366 {
367         for_each_cpu(freqs->cpu, policy->cpus)
368                 __cpufreq_notify_transition(policy, freqs, state);
369 }
370 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
371
372
373
374 /*********************************************************************
375  *                          SYSFS INTERFACE                          *
376  *********************************************************************/
377
378 static struct cpufreq_governor *__find_governor(const char *str_governor)
379 {
380         struct cpufreq_governor *t;
381
382         list_for_each_entry(t, &cpufreq_governor_list, governor_list)
383                 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
384                         return t;
385
386         return NULL;
387 }
388
389 /**
390  * cpufreq_parse_governor - parse a governor string
391  */
392 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
393                                 struct cpufreq_governor **governor)
394 {
395         int err = -EINVAL;
396
397         if (!cpufreq_driver)
398                 goto out;
399
400         if (cpufreq_driver->setpolicy) {
401                 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
402                         *policy = CPUFREQ_POLICY_PERFORMANCE;
403                         err = 0;
404                 } else if (!strnicmp(str_governor, "powersave",
405                                                 CPUFREQ_NAME_LEN)) {
406                         *policy = CPUFREQ_POLICY_POWERSAVE;
407                         err = 0;
408                 }
409         } else if (cpufreq_driver->target) {
410                 struct cpufreq_governor *t;
411
412                 mutex_lock(&cpufreq_governor_mutex);
413
414                 t = __find_governor(str_governor);
415
416                 if (t == NULL) {
417                         int ret;
418
419                         mutex_unlock(&cpufreq_governor_mutex);
420                         ret = request_module("cpufreq_%s", str_governor);
421                         mutex_lock(&cpufreq_governor_mutex);
422
423                         if (ret == 0)
424                                 t = __find_governor(str_governor);
425                 }
426
427                 if (t != NULL) {
428                         *governor = t;
429                         err = 0;
430                 }
431
432                 mutex_unlock(&cpufreq_governor_mutex);
433         }
434 out:
435         return err;
436 }
437
438
439 /**
440  * cpufreq_per_cpu_attr_read() / show_##file_name() -
441  * print out cpufreq information
442  *
443  * Write out information from cpufreq_driver->policy[cpu]; object must be
444  * "unsigned int".
445  */
446
447 #define show_one(file_name, object)                     \
448 static ssize_t show_##file_name                         \
449 (struct cpufreq_policy *policy, char *buf)              \
450 {                                                       \
451         return sprintf(buf, "%u\n", policy->object);    \
452 }
453
454 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
455 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
456 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
457 show_one(scaling_min_freq, min);
458 show_one(scaling_max_freq, max);
459 show_one(scaling_cur_freq, cur);
460
461 static int __cpufreq_set_policy(struct cpufreq_policy *data,
462                                 struct cpufreq_policy *policy);
463
464 /**
465  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
466  */
467 #define store_one(file_name, object)                    \
468 static ssize_t store_##file_name                                        \
469 (struct cpufreq_policy *policy, const char *buf, size_t count)          \
470 {                                                                       \
471         unsigned int ret;                                               \
472         struct cpufreq_policy new_policy;                               \
473                                                                         \
474         ret = cpufreq_get_policy(&new_policy, policy->cpu);             \
475         if (ret)                                                        \
476                 return -EINVAL;                                         \
477                                                                         \
478         ret = sscanf(buf, "%u", &new_policy.object);                    \
479         if (ret != 1)                                                   \
480                 return -EINVAL;                                         \
481                                                                         \
482         ret = __cpufreq_set_policy(policy, &new_policy);                \
483         policy->user_policy.object = policy->object;                    \
484                                                                         \
485         return ret ? ret : count;                                       \
486 }
487
488 store_one(scaling_min_freq, min);
489 store_one(scaling_max_freq, max);
490
491 /**
492  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
493  */
494 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
495                                         char *buf)
496 {
497         unsigned int cur_freq = __cpufreq_get(policy->cpu);
498         if (!cur_freq)
499                 return sprintf(buf, "<unknown>");
500         return sprintf(buf, "%u\n", cur_freq);
501 }
502
503
504 /**
505  * show_scaling_governor - show the current policy for the specified CPU
506  */
507 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
508 {
509         if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
510                 return sprintf(buf, "powersave\n");
511         else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
512                 return sprintf(buf, "performance\n");
513         else if (policy->governor)
514                 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
515                                 policy->governor->name);
516         return -EINVAL;
517 }
518
519
520 /**
521  * store_scaling_governor - store policy for the specified CPU
522  */
523 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
524                                         const char *buf, size_t count)
525 {
526         unsigned int ret;
527         char    str_governor[16];
528         struct cpufreq_policy new_policy;
529
530         ret = cpufreq_get_policy(&new_policy, policy->cpu);
531         if (ret)
532                 return ret;
533
534         ret = sscanf(buf, "%15s", str_governor);
535         if (ret != 1)
536                 return -EINVAL;
537
538         if (cpufreq_parse_governor(str_governor, &new_policy.policy,
539                                                 &new_policy.governor))
540                 return -EINVAL;
541
542         /* Do not use cpufreq_set_policy here or the user_policy.max
543            will be wrongly overridden */
544         ret = __cpufreq_set_policy(policy, &new_policy);
545
546         policy->user_policy.policy = policy->policy;
547         policy->user_policy.governor = policy->governor;
548
549         if (ret)
550                 return ret;
551         else
552                 return count;
553 }
554
555 /**
556  * show_scaling_driver - show the cpufreq driver currently loaded
557  */
558 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
559 {
560         return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
561 }
562
563 /**
564  * show_scaling_available_governors - show the available CPUfreq governors
565  */
566 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
567                                                 char *buf)
568 {
569         ssize_t i = 0;
570         struct cpufreq_governor *t;
571
572         if (!cpufreq_driver->target) {
573                 i += sprintf(buf, "performance powersave");
574                 goto out;
575         }
576
577         list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
578                 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
579                     - (CPUFREQ_NAME_LEN + 2)))
580                         goto out;
581                 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
582         }
583 out:
584         i += sprintf(&buf[i], "\n");
585         return i;
586 }
587
588 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
589 {
590         ssize_t i = 0;
591         unsigned int cpu;
592
593         for_each_cpu(cpu, mask) {
594                 if (i)
595                         i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
596                 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
597                 if (i >= (PAGE_SIZE - 5))
598                         break;
599         }
600         i += sprintf(&buf[i], "\n");
601         return i;
602 }
603
604 /**
605  * show_related_cpus - show the CPUs affected by each transition even if
606  * hw coordination is in use
607  */
608 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
609 {
610         return show_cpus(policy->related_cpus, buf);
611 }
612
613 /**
614  * show_affected_cpus - show the CPUs affected by each transition
615  */
616 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
617 {
618         return show_cpus(policy->cpus, buf);
619 }
620
621 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
622                                         const char *buf, size_t count)
623 {
624         unsigned int freq = 0;
625         unsigned int ret;
626
627         if (!policy->governor || !policy->governor->store_setspeed)
628                 return -EINVAL;
629
630         ret = sscanf(buf, "%u", &freq);
631         if (ret != 1)
632                 return -EINVAL;
633
634         policy->governor->store_setspeed(policy, freq);
635
636         return count;
637 }
638
639 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
640 {
641         if (!policy->governor || !policy->governor->show_setspeed)
642                 return sprintf(buf, "<unsupported>\n");
643
644         return policy->governor->show_setspeed(policy, buf);
645 }
646
647 /**
648  * show_bios_limit - show the current cpufreq HW/BIOS limitation
649  */
650 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
651 {
652         unsigned int limit;
653         int ret;
654         if (cpufreq_driver->bios_limit) {
655                 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
656                 if (!ret)
657                         return sprintf(buf, "%u\n", limit);
658         }
659         return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
660 }
661
662 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
663 cpufreq_freq_attr_ro(cpuinfo_min_freq);
664 cpufreq_freq_attr_ro(cpuinfo_max_freq);
665 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
666 cpufreq_freq_attr_ro(scaling_available_governors);
667 cpufreq_freq_attr_ro(scaling_driver);
668 cpufreq_freq_attr_ro(scaling_cur_freq);
669 cpufreq_freq_attr_ro(bios_limit);
670 cpufreq_freq_attr_ro(related_cpus);
671 cpufreq_freq_attr_ro(affected_cpus);
672 cpufreq_freq_attr_rw(scaling_min_freq);
673 cpufreq_freq_attr_rw(scaling_max_freq);
674 cpufreq_freq_attr_rw(scaling_governor);
675 cpufreq_freq_attr_rw(scaling_setspeed);
676
677 static struct attribute *default_attrs[] = {
678         &cpuinfo_min_freq.attr,
679         &cpuinfo_max_freq.attr,
680         &cpuinfo_transition_latency.attr,
681         &scaling_min_freq.attr,
682         &scaling_max_freq.attr,
683         &affected_cpus.attr,
684         &related_cpus.attr,
685         &scaling_governor.attr,
686         &scaling_driver.attr,
687         &scaling_available_governors.attr,
688         &scaling_setspeed.attr,
689         NULL
690 };
691
692 struct kobject *cpufreq_global_kobject;
693 EXPORT_SYMBOL(cpufreq_global_kobject);
694
695 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
696 #define to_attr(a) container_of(a, struct freq_attr, attr)
697
698 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
699 {
700         struct cpufreq_policy *policy = to_policy(kobj);
701         struct freq_attr *fattr = to_attr(attr);
702         ssize_t ret = -EINVAL;
703         policy = cpufreq_cpu_get_sysfs(policy->cpu);
704         if (!policy)
705                 goto no_policy;
706
707         if (lock_policy_rwsem_read(policy->cpu) < 0)
708                 goto fail;
709
710         if (fattr->show)
711                 ret = fattr->show(policy, buf);
712         else
713                 ret = -EIO;
714
715         unlock_policy_rwsem_read(policy->cpu);
716 fail:
717         cpufreq_cpu_put_sysfs(policy);
718 no_policy:
719         return ret;
720 }
721
722 static ssize_t store(struct kobject *kobj, struct attribute *attr,
723                      const char *buf, size_t count)
724 {
725         struct cpufreq_policy *policy = to_policy(kobj);
726         struct freq_attr *fattr = to_attr(attr);
727         ssize_t ret = -EINVAL;
728         policy = cpufreq_cpu_get_sysfs(policy->cpu);
729         if (!policy)
730                 goto no_policy;
731
732         if (lock_policy_rwsem_write(policy->cpu) < 0)
733                 goto fail;
734
735         if (fattr->store)
736                 ret = fattr->store(policy, buf, count);
737         else
738                 ret = -EIO;
739
740         unlock_policy_rwsem_write(policy->cpu);
741 fail:
742         cpufreq_cpu_put_sysfs(policy);
743 no_policy:
744         return ret;
745 }
746
747 static void cpufreq_sysfs_release(struct kobject *kobj)
748 {
749         struct cpufreq_policy *policy = to_policy(kobj);
750         pr_debug("last reference is dropped\n");
751         complete(&policy->kobj_unregister);
752 }
753
754 static const struct sysfs_ops sysfs_ops = {
755         .show   = show,
756         .store  = store,
757 };
758
759 static struct kobj_type ktype_cpufreq = {
760         .sysfs_ops      = &sysfs_ops,
761         .default_attrs  = default_attrs,
762         .release        = cpufreq_sysfs_release,
763 };
764
765 /* symlink affected CPUs */
766 static int cpufreq_add_dev_symlink(unsigned int cpu,
767                                    struct cpufreq_policy *policy)
768 {
769         unsigned int j;
770         int ret = 0;
771
772         for_each_cpu(j, policy->cpus) {
773                 struct cpufreq_policy *managed_policy;
774                 struct device *cpu_dev;
775
776                 if (j == cpu)
777                         continue;
778
779                 pr_debug("CPU %u already managed, adding link\n", j);
780                 managed_policy = cpufreq_cpu_get(cpu);
781                 cpu_dev = get_cpu_device(j);
782                 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
783                                         "cpufreq");
784                 if (ret) {
785                         cpufreq_cpu_put(managed_policy);
786                         return ret;
787                 }
788         }
789         return ret;
790 }
791
792 static int cpufreq_add_dev_interface(unsigned int cpu,
793                                      struct cpufreq_policy *policy,
794                                      struct device *dev)
795 {
796         struct cpufreq_policy new_policy;
797         struct freq_attr **drv_attr;
798         unsigned long flags;
799         int ret = 0;
800         unsigned int j;
801
802         /* prepare interface data */
803         ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
804                                    &dev->kobj, "cpufreq");
805         if (ret)
806                 return ret;
807
808         /* set up files for this cpu device */
809         drv_attr = cpufreq_driver->attr;
810         while ((drv_attr) && (*drv_attr)) {
811                 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
812                 if (ret)
813                         goto err_out_kobj_put;
814                 drv_attr++;
815         }
816         if (cpufreq_driver->get) {
817                 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
818                 if (ret)
819                         goto err_out_kobj_put;
820         }
821         if (cpufreq_driver->target) {
822                 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
823                 if (ret)
824                         goto err_out_kobj_put;
825         }
826         if (cpufreq_driver->bios_limit) {
827                 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
828                 if (ret)
829                         goto err_out_kobj_put;
830         }
831
832         write_lock_irqsave(&cpufreq_driver_lock, flags);
833         for_each_cpu(j, policy->cpus) {
834                 per_cpu(cpufreq_cpu_data, j) = policy;
835                 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
836         }
837         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
838
839         ret = cpufreq_add_dev_symlink(cpu, policy);
840         if (ret)
841                 goto err_out_kobj_put;
842
843         memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
844         /* assure that the starting sequence is run in __cpufreq_set_policy */
845         policy->governor = NULL;
846
847         /* set default policy */
848         ret = __cpufreq_set_policy(policy, &new_policy);
849         policy->user_policy.policy = policy->policy;
850         policy->user_policy.governor = policy->governor;
851
852         if (ret) {
853                 pr_debug("setting policy failed\n");
854                 if (cpufreq_driver->exit)
855                         cpufreq_driver->exit(policy);
856         }
857         return ret;
858
859 err_out_kobj_put:
860         kobject_put(&policy->kobj);
861         wait_for_completion(&policy->kobj_unregister);
862         return ret;
863 }
864
865 #ifdef CONFIG_HOTPLUG_CPU
866 static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
867                                   struct device *dev)
868 {
869         struct cpufreq_policy *policy;
870         int ret = 0, has_target = !!cpufreq_driver->target;
871         unsigned long flags;
872
873         policy = cpufreq_cpu_get(sibling);
874         WARN_ON(!policy);
875
876         if (has_target)
877                 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
878
879         lock_policy_rwsem_write(sibling);
880
881         write_lock_irqsave(&cpufreq_driver_lock, flags);
882
883         cpumask_set_cpu(cpu, policy->cpus);
884         per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
885         per_cpu(cpufreq_cpu_data, cpu) = policy;
886         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
887
888         unlock_policy_rwsem_write(sibling);
889
890         if (has_target) {
891                 __cpufreq_governor(policy, CPUFREQ_GOV_START);
892                 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
893         }
894
895         ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
896         if (ret) {
897                 cpufreq_cpu_put(policy);
898                 return ret;
899         }
900
901         return 0;
902 }
903 #endif
904
905 /**
906  * cpufreq_add_dev - add a CPU device
907  *
908  * Adds the cpufreq interface for a CPU device.
909  *
910  * The Oracle says: try running cpufreq registration/unregistration concurrently
911  * with with cpu hotplugging and all hell will break loose. Tried to clean this
912  * mess up, but more thorough testing is needed. - Mathieu
913  */
914 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
915 {
916         unsigned int j, cpu = dev->id;
917         int ret = -ENOMEM;
918         struct cpufreq_policy *policy;
919         unsigned long flags;
920 #ifdef CONFIG_HOTPLUG_CPU
921         struct cpufreq_governor *gov;
922         int sibling;
923 #endif
924
925         if (cpu_is_offline(cpu))
926                 return 0;
927
928         pr_debug("adding CPU %u\n", cpu);
929
930 #ifdef CONFIG_SMP
931         /* check whether a different CPU already registered this
932          * CPU because it is in the same boat. */
933         policy = cpufreq_cpu_get(cpu);
934         if (unlikely(policy)) {
935                 cpufreq_cpu_put(policy);
936                 return 0;
937         }
938
939 #ifdef CONFIG_HOTPLUG_CPU
940         /* Check if this cpu was hot-unplugged earlier and has siblings */
941         read_lock_irqsave(&cpufreq_driver_lock, flags);
942         for_each_online_cpu(sibling) {
943                 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
944                 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
945                         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
946                         return cpufreq_add_policy_cpu(cpu, sibling, dev);
947                 }
948         }
949         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
950 #endif
951 #endif
952
953         if (!try_module_get(cpufreq_driver->owner)) {
954                 ret = -EINVAL;
955                 goto module_out;
956         }
957
958         policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
959         if (!policy)
960                 goto nomem_out;
961
962         if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
963                 goto err_free_policy;
964
965         if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
966                 goto err_free_cpumask;
967
968         policy->cpu = cpu;
969         policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
970         cpumask_copy(policy->cpus, cpumask_of(cpu));
971
972         /* Initially set CPU itself as the policy_cpu */
973         per_cpu(cpufreq_policy_cpu, cpu) = cpu;
974
975         init_completion(&policy->kobj_unregister);
976         INIT_WORK(&policy->update, handle_update);
977
978         /* call driver. From then on the cpufreq must be able
979          * to accept all calls to ->verify and ->setpolicy for this CPU
980          */
981         ret = cpufreq_driver->init(policy);
982         if (ret) {
983                 pr_debug("initialization failed\n");
984                 goto err_set_policy_cpu;
985         }
986
987         /* related cpus should atleast have policy->cpus */
988         cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
989
990         /*
991          * affected cpus must always be the one, which are online. We aren't
992          * managing offline cpus here.
993          */
994         cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
995
996         policy->user_policy.min = policy->min;
997         policy->user_policy.max = policy->max;
998
999         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1000                                      CPUFREQ_START, policy);
1001
1002 #ifdef CONFIG_HOTPLUG_CPU
1003         gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1004         if (gov) {
1005                 policy->governor = gov;
1006                 pr_debug("Restoring governor %s for cpu %d\n",
1007                        policy->governor->name, cpu);
1008         }
1009 #endif
1010
1011         ret = cpufreq_add_dev_interface(cpu, policy, dev);
1012         if (ret)
1013                 goto err_out_unregister;
1014
1015         kobject_uevent(&policy->kobj, KOBJ_ADD);
1016         module_put(cpufreq_driver->owner);
1017         pr_debug("initialization complete\n");
1018
1019         return 0;
1020
1021 err_out_unregister:
1022         write_lock_irqsave(&cpufreq_driver_lock, flags);
1023         for_each_cpu(j, policy->cpus)
1024                 per_cpu(cpufreq_cpu_data, j) = NULL;
1025         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1026
1027         kobject_put(&policy->kobj);
1028         wait_for_completion(&policy->kobj_unregister);
1029
1030 err_set_policy_cpu:
1031         per_cpu(cpufreq_policy_cpu, cpu) = -1;
1032         free_cpumask_var(policy->related_cpus);
1033 err_free_cpumask:
1034         free_cpumask_var(policy->cpus);
1035 err_free_policy:
1036         kfree(policy);
1037 nomem_out:
1038         module_put(cpufreq_driver->owner);
1039 module_out:
1040         return ret;
1041 }
1042
1043 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1044 {
1045         int j;
1046
1047         policy->last_cpu = policy->cpu;
1048         policy->cpu = cpu;
1049
1050         for_each_cpu(j, policy->cpus)
1051                 per_cpu(cpufreq_policy_cpu, j) = cpu;
1052
1053 #ifdef CONFIG_CPU_FREQ_TABLE
1054         cpufreq_frequency_table_update_policy_cpu(policy);
1055 #endif
1056         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1057                         CPUFREQ_UPDATE_POLICY_CPU, policy);
1058 }
1059
1060 /**
1061  * __cpufreq_remove_dev - remove a CPU device
1062  *
1063  * Removes the cpufreq interface for a CPU device.
1064  * Caller should already have policy_rwsem in write mode for this CPU.
1065  * This routine frees the rwsem before returning.
1066  */
1067 static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1068 {
1069         unsigned int cpu = dev->id, ret, cpus;
1070         unsigned long flags;
1071         struct cpufreq_policy *data;
1072         struct kobject *kobj;
1073         struct completion *cmp;
1074         struct device *cpu_dev;
1075
1076         pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1077
1078         write_lock_irqsave(&cpufreq_driver_lock, flags);
1079
1080         data = per_cpu(cpufreq_cpu_data, cpu);
1081         per_cpu(cpufreq_cpu_data, cpu) = NULL;
1082
1083         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1084
1085         if (!data) {
1086                 pr_debug("%s: No cpu_data found\n", __func__);
1087                 return -EINVAL;
1088         }
1089
1090         if (cpufreq_driver->target)
1091                 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1092
1093 #ifdef CONFIG_HOTPLUG_CPU
1094         if (!cpufreq_driver->setpolicy)
1095                 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1096                         data->governor->name, CPUFREQ_NAME_LEN);
1097 #endif
1098
1099         WARN_ON(lock_policy_rwsem_write(cpu));
1100         cpus = cpumask_weight(data->cpus);
1101
1102         if (cpus > 1)
1103                 cpumask_clear_cpu(cpu, data->cpus);
1104         unlock_policy_rwsem_write(cpu);
1105
1106         if (cpu != data->cpu) {
1107                 sysfs_remove_link(&dev->kobj, "cpufreq");
1108         } else if (cpus > 1) {
1109                 /* first sibling now owns the new sysfs dir */
1110                 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1111                 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1112                 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1113                 if (ret) {
1114                         pr_err("%s: Failed to move kobj: %d", __func__, ret);
1115
1116                         WARN_ON(lock_policy_rwsem_write(cpu));
1117                         cpumask_set_cpu(cpu, data->cpus);
1118
1119                         write_lock_irqsave(&cpufreq_driver_lock, flags);
1120                         per_cpu(cpufreq_cpu_data, cpu) = data;
1121                         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1122
1123                         unlock_policy_rwsem_write(cpu);
1124
1125                         ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1126                                         "cpufreq");
1127                         return -EINVAL;
1128                 }
1129
1130                 WARN_ON(lock_policy_rwsem_write(cpu));
1131                 update_policy_cpu(data, cpu_dev->id);
1132                 unlock_policy_rwsem_write(cpu);
1133                 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1134                                 __func__, cpu_dev->id, cpu);
1135         }
1136
1137         /* If cpu is last user of policy, free policy */
1138         if (cpus == 1) {
1139                 if (cpufreq_driver->target)
1140                         __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1141
1142                 lock_policy_rwsem_read(cpu);
1143                 kobj = &data->kobj;
1144                 cmp = &data->kobj_unregister;
1145                 unlock_policy_rwsem_read(cpu);
1146                 kobject_put(kobj);
1147
1148                 /* we need to make sure that the underlying kobj is actually
1149                  * not referenced anymore by anybody before we proceed with
1150                  * unloading.
1151                  */
1152                 pr_debug("waiting for dropping of refcount\n");
1153                 wait_for_completion(cmp);
1154                 pr_debug("wait complete\n");
1155
1156                 if (cpufreq_driver->exit)
1157                         cpufreq_driver->exit(data);
1158
1159                 free_cpumask_var(data->related_cpus);
1160                 free_cpumask_var(data->cpus);
1161                 kfree(data);
1162         } else {
1163                 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1164                 cpufreq_cpu_put(data);
1165                 if (cpufreq_driver->target) {
1166                         __cpufreq_governor(data, CPUFREQ_GOV_START);
1167                         __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1168                 }
1169         }
1170
1171         per_cpu(cpufreq_policy_cpu, cpu) = -1;
1172         return 0;
1173 }
1174
1175
1176 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1177 {
1178         unsigned int cpu = dev->id;
1179         int retval;
1180
1181         if (cpu_is_offline(cpu))
1182                 return 0;
1183
1184         retval = __cpufreq_remove_dev(dev, sif);
1185         return retval;
1186 }
1187
1188
1189 static void handle_update(struct work_struct *work)
1190 {
1191         struct cpufreq_policy *policy =
1192                 container_of(work, struct cpufreq_policy, update);
1193         unsigned int cpu = policy->cpu;
1194         pr_debug("handle_update for cpu %u called\n", cpu);
1195         cpufreq_update_policy(cpu);
1196 }
1197
1198 /**
1199  *      cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1200  *      @cpu: cpu number
1201  *      @old_freq: CPU frequency the kernel thinks the CPU runs at
1202  *      @new_freq: CPU frequency the CPU actually runs at
1203  *
1204  *      We adjust to current frequency first, and need to clean up later.
1205  *      So either call to cpufreq_update_policy() or schedule handle_update()).
1206  */
1207 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1208                                 unsigned int new_freq)
1209 {
1210         struct cpufreq_policy *policy;
1211         struct cpufreq_freqs freqs;
1212         unsigned long flags;
1213
1214
1215         pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1216                "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1217
1218         freqs.old = old_freq;
1219         freqs.new = new_freq;
1220
1221         read_lock_irqsave(&cpufreq_driver_lock, flags);
1222         policy = per_cpu(cpufreq_cpu_data, cpu);
1223         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1224
1225         cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1226         cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1227 }
1228
1229
1230 /**
1231  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1232  * @cpu: CPU number
1233  *
1234  * This is the last known freq, without actually getting it from the driver.
1235  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1236  */
1237 unsigned int cpufreq_quick_get(unsigned int cpu)
1238 {
1239         struct cpufreq_policy *policy;
1240         unsigned int ret_freq = 0;
1241
1242         if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1243                 return cpufreq_driver->get(cpu);
1244
1245         policy = cpufreq_cpu_get(cpu);
1246         if (policy) {
1247                 ret_freq = policy->cur;
1248                 cpufreq_cpu_put(policy);
1249         }
1250
1251         return ret_freq;
1252 }
1253 EXPORT_SYMBOL(cpufreq_quick_get);
1254
1255 /**
1256  * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1257  * @cpu: CPU number
1258  *
1259  * Just return the max possible frequency for a given CPU.
1260  */
1261 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1262 {
1263         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1264         unsigned int ret_freq = 0;
1265
1266         if (policy) {
1267                 ret_freq = policy->max;
1268                 cpufreq_cpu_put(policy);
1269         }
1270
1271         return ret_freq;
1272 }
1273 EXPORT_SYMBOL(cpufreq_quick_get_max);
1274
1275
1276 static unsigned int __cpufreq_get(unsigned int cpu)
1277 {
1278         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1279         unsigned int ret_freq = 0;
1280
1281         if (!cpufreq_driver->get)
1282                 return ret_freq;
1283
1284         ret_freq = cpufreq_driver->get(cpu);
1285
1286         if (ret_freq && policy->cur &&
1287                 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1288                 /* verify no discrepancy between actual and
1289                                         saved value exists */
1290                 if (unlikely(ret_freq != policy->cur)) {
1291                         cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1292                         schedule_work(&policy->update);
1293                 }
1294         }
1295
1296         return ret_freq;
1297 }
1298
1299 /**
1300  * cpufreq_get - get the current CPU frequency (in kHz)
1301  * @cpu: CPU number
1302  *
1303  * Get the CPU current (static) CPU frequency
1304  */
1305 unsigned int cpufreq_get(unsigned int cpu)
1306 {
1307         unsigned int ret_freq = 0;
1308         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1309
1310         if (!policy)
1311                 goto out;
1312
1313         if (unlikely(lock_policy_rwsem_read(cpu)))
1314                 goto out_policy;
1315
1316         ret_freq = __cpufreq_get(cpu);
1317
1318         unlock_policy_rwsem_read(cpu);
1319
1320 out_policy:
1321         cpufreq_cpu_put(policy);
1322 out:
1323         return ret_freq;
1324 }
1325 EXPORT_SYMBOL(cpufreq_get);
1326
1327 static struct subsys_interface cpufreq_interface = {
1328         .name           = "cpufreq",
1329         .subsys         = &cpu_subsys,
1330         .add_dev        = cpufreq_add_dev,
1331         .remove_dev     = cpufreq_remove_dev,
1332 };
1333
1334
1335 /**
1336  * cpufreq_suspend() - Suspend CPUFreq governors
1337  *
1338  * Called during system wide Suspend/Hibernate cycles for suspending governors
1339  * as some platforms can't change frequency after this point in suspend cycle.
1340  * Because some of the devices (like: i2c, regulators, etc) they use for
1341  * changing frequency are suspended quickly after this point.
1342  */
1343 void cpufreq_suspend(void)
1344 {
1345         struct cpufreq_policy *policy;
1346         int cpu;
1347
1348         if (!cpufreq_driver)
1349                 return;
1350
1351         if (!has_target())
1352                 return;
1353
1354         pr_debug("%s: Suspending Governors\n", __func__);
1355
1356         for_each_possible_cpu(cpu) {
1357                 if (!cpu_online(cpu))
1358                         continue;
1359
1360                 policy = cpufreq_cpu_get(cpu);
1361
1362                 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1363                         pr_err("%s: Failed to stop governor for policy: %p\n",
1364                                 __func__, policy);
1365                 else if (cpufreq_driver->suspend
1366                     && cpufreq_driver->suspend(policy))
1367                         pr_err("%s: Failed to suspend driver: %p\n", __func__,
1368                                 policy);
1369         }
1370
1371         cpufreq_suspended = true;
1372 }
1373
1374 /**
1375  * cpufreq_resume() - Resume CPUFreq governors
1376  *
1377  * Called during system wide Suspend/Hibernate cycle for resuming governors that
1378  * are suspended with cpufreq_suspend().
1379  */
1380 void cpufreq_resume(void)
1381 {
1382         struct cpufreq_policy *policy;
1383         int cpu;
1384
1385         if (!cpufreq_driver)
1386                 return;
1387
1388         if (!has_target())
1389                 return;
1390
1391         pr_debug("%s: Resuming Governors\n", __func__);
1392
1393         cpufreq_suspended = false;
1394
1395         for_each_possible_cpu(cpu) {
1396                 if (!cpu_online(cpu))
1397                         continue;
1398
1399                 policy = cpufreq_cpu_get(cpu);
1400
1401                 if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1402                     || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1403                         pr_err("%s: Failed to start governor for policy: %p\n",
1404                                 __func__, policy);
1405                 else if (cpufreq_driver->resume
1406                     && cpufreq_driver->resume(policy))
1407                         pr_err("%s: Failed to resume driver: %p\n", __func__,
1408                                 policy);
1409
1410                 /*
1411                  * schedule call cpufreq_update_policy() for boot CPU, i.e. last
1412                  * policy in list. It will verify that the current freq is in
1413                  * sync with what we believe it to be.
1414                  */
1415                 if (cpu == 0)
1416                         schedule_work(&policy->update);
1417         }
1418 }
1419
1420 /**
1421  *      cpufreq_get_current_driver - return current driver's name
1422  *
1423  *      Return the name string of the currently loaded cpufreq driver
1424  *      or NULL, if none.
1425  */
1426 const char *cpufreq_get_current_driver(void)
1427 {
1428         if (cpufreq_driver)
1429                 return cpufreq_driver->name;
1430
1431         return NULL;
1432 }
1433 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1434
1435 /*********************************************************************
1436  *                     NOTIFIER LISTS INTERFACE                      *
1437  *********************************************************************/
1438
1439 /**
1440  *      cpufreq_register_notifier - register a driver with cpufreq
1441  *      @nb: notifier function to register
1442  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1443  *
1444  *      Add a driver to one of two lists: either a list of drivers that
1445  *      are notified about clock rate changes (once before and once after
1446  *      the transition), or a list of drivers that are notified about
1447  *      changes in cpufreq policy.
1448  *
1449  *      This function may sleep, and has the same return conditions as
1450  *      blocking_notifier_chain_register.
1451  */
1452 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1453 {
1454         int ret;
1455
1456         if (cpufreq_disabled())
1457                 return -EINVAL;
1458
1459         WARN_ON(!init_cpufreq_transition_notifier_list_called);
1460
1461         switch (list) {
1462         case CPUFREQ_TRANSITION_NOTIFIER:
1463                 ret = srcu_notifier_chain_register(
1464                                 &cpufreq_transition_notifier_list, nb);
1465                 break;
1466         case CPUFREQ_POLICY_NOTIFIER:
1467                 ret = blocking_notifier_chain_register(
1468                                 &cpufreq_policy_notifier_list, nb);
1469                 break;
1470         default:
1471                 ret = -EINVAL;
1472         }
1473
1474         return ret;
1475 }
1476 EXPORT_SYMBOL(cpufreq_register_notifier);
1477
1478
1479 /**
1480  *      cpufreq_unregister_notifier - unregister a driver with cpufreq
1481  *      @nb: notifier block to be unregistered
1482  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1483  *
1484  *      Remove a driver from the CPU frequency notifier list.
1485  *
1486  *      This function may sleep, and has the same return conditions as
1487  *      blocking_notifier_chain_unregister.
1488  */
1489 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1490 {
1491         int ret;
1492
1493         if (cpufreq_disabled())
1494                 return -EINVAL;
1495
1496         switch (list) {
1497         case CPUFREQ_TRANSITION_NOTIFIER:
1498                 ret = srcu_notifier_chain_unregister(
1499                                 &cpufreq_transition_notifier_list, nb);
1500                 break;
1501         case CPUFREQ_POLICY_NOTIFIER:
1502                 ret = blocking_notifier_chain_unregister(
1503                                 &cpufreq_policy_notifier_list, nb);
1504                 break;
1505         default:
1506                 ret = -EINVAL;
1507         }
1508
1509         return ret;
1510 }
1511 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1512
1513
1514 /*********************************************************************
1515  *                              GOVERNORS                            *
1516  *********************************************************************/
1517
1518
1519 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1520                             unsigned int target_freq,
1521                             unsigned int relation)
1522 {
1523         int retval = -EINVAL;
1524         unsigned int old_target_freq = target_freq;
1525
1526         if (cpufreq_disabled())
1527                 return -ENODEV;
1528
1529         /* Make sure that target_freq is within supported range */
1530         if (target_freq > policy->max)
1531                 target_freq = policy->max;
1532         if (target_freq < policy->min)
1533                 target_freq = policy->min;
1534
1535         pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1536                         policy->cpu, target_freq, relation, old_target_freq);
1537
1538         if (target_freq == policy->cur)
1539                 return 0;
1540
1541         if (cpufreq_driver->target)
1542                 retval = cpufreq_driver->target(policy, target_freq, relation);
1543
1544         return retval;
1545 }
1546 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1547
1548 int cpufreq_driver_target(struct cpufreq_policy *policy,
1549                           unsigned int target_freq,
1550                           unsigned int relation)
1551 {
1552         int ret = -EINVAL;
1553
1554         policy = cpufreq_cpu_get(policy->cpu);
1555         if (!policy)
1556                 goto no_policy;
1557
1558         if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1559                 goto fail;
1560
1561         ret = __cpufreq_driver_target(policy, target_freq, relation);
1562
1563         unlock_policy_rwsem_write(policy->cpu);
1564
1565 fail:
1566         cpufreq_cpu_put(policy);
1567 no_policy:
1568         return ret;
1569 }
1570 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1571
1572 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1573 {
1574         int ret = 0;
1575
1576         if (cpufreq_disabled())
1577                 return ret;
1578
1579         if (!cpufreq_driver->getavg)
1580                 return 0;
1581
1582         policy = cpufreq_cpu_get(policy->cpu);
1583         if (!policy)
1584                 return -EINVAL;
1585
1586         ret = cpufreq_driver->getavg(policy, cpu);
1587
1588         cpufreq_cpu_put(policy);
1589         return ret;
1590 }
1591 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1592
1593 /*
1594  * when "event" is CPUFREQ_GOV_LIMITS
1595  */
1596
1597 static int __cpufreq_governor(struct cpufreq_policy *policy,
1598                                         unsigned int event)
1599 {
1600         int ret;
1601
1602         /* Only must be defined when default governor is known to have latency
1603            restrictions, like e.g. conservative or ondemand.
1604            That this is the case is already ensured in Kconfig
1605         */
1606 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1607         struct cpufreq_governor *gov = &cpufreq_gov_performance;
1608 #else
1609         struct cpufreq_governor *gov = NULL;
1610 #endif
1611
1612         /* Don't start any governor operations if we are entering suspend */
1613         if (cpufreq_suspended)
1614                 return 0;
1615
1616         if (policy->governor->max_transition_latency &&
1617             policy->cpuinfo.transition_latency >
1618             policy->governor->max_transition_latency) {
1619                 if (!gov)
1620                         return -EINVAL;
1621                 else {
1622                         printk(KERN_WARNING "%s governor failed, too long"
1623                                " transition latency of HW, fallback"
1624                                " to %s governor\n",
1625                                policy->governor->name,
1626                                gov->name);
1627                         policy->governor = gov;
1628                 }
1629         }
1630
1631         if (!try_module_get(policy->governor->owner))
1632                 return -EINVAL;
1633
1634         pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1635                                                 policy->cpu, event);
1636
1637         mutex_lock(&cpufreq_governor_lock);
1638         if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
1639             (policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
1640                 mutex_unlock(&cpufreq_governor_lock);
1641                 return -EBUSY;
1642         }
1643
1644         if (event == CPUFREQ_GOV_STOP)
1645                 policy->governor_enabled = false;
1646         else if (event == CPUFREQ_GOV_START)
1647                 policy->governor_enabled = true;
1648
1649         mutex_unlock(&cpufreq_governor_lock);
1650
1651         ret = policy->governor->governor(policy, event);
1652
1653         if (!ret) {
1654                 if (event == CPUFREQ_GOV_POLICY_INIT)
1655                         policy->governor->initialized++;
1656                 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1657                         policy->governor->initialized--;
1658         } else {
1659                 /* Restore original values */
1660                 mutex_lock(&cpufreq_governor_lock);
1661                 if (event == CPUFREQ_GOV_STOP)
1662                         policy->governor_enabled = true;
1663                 else if (event == CPUFREQ_GOV_START)
1664                         policy->governor_enabled = false;
1665                 mutex_unlock(&cpufreq_governor_lock);
1666         }
1667
1668         /* we keep one module reference alive for
1669                         each CPU governed by this CPU */
1670         if ((event != CPUFREQ_GOV_START) || ret)
1671                 module_put(policy->governor->owner);
1672         if ((event == CPUFREQ_GOV_STOP) && !ret)
1673                 module_put(policy->governor->owner);
1674
1675         return ret;
1676 }
1677
1678
1679 int cpufreq_register_governor(struct cpufreq_governor *governor)
1680 {
1681         int err;
1682
1683         if (!governor)
1684                 return -EINVAL;
1685
1686         if (cpufreq_disabled())
1687                 return -ENODEV;
1688
1689         mutex_lock(&cpufreq_governor_mutex);
1690
1691         governor->initialized = 0;
1692         err = -EBUSY;
1693         if (__find_governor(governor->name) == NULL) {
1694                 err = 0;
1695                 list_add(&governor->governor_list, &cpufreq_governor_list);
1696         }
1697
1698         mutex_unlock(&cpufreq_governor_mutex);
1699         return err;
1700 }
1701 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1702
1703
1704 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1705 {
1706 #ifdef CONFIG_HOTPLUG_CPU
1707         int cpu;
1708 #endif
1709
1710         if (!governor)
1711                 return;
1712
1713         if (cpufreq_disabled())
1714                 return;
1715
1716 #ifdef CONFIG_HOTPLUG_CPU
1717         for_each_present_cpu(cpu) {
1718                 if (cpu_online(cpu))
1719                         continue;
1720                 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1721                         strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1722         }
1723 #endif
1724
1725         mutex_lock(&cpufreq_governor_mutex);
1726         list_del(&governor->governor_list);
1727         mutex_unlock(&cpufreq_governor_mutex);
1728         return;
1729 }
1730 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1731
1732
1733
1734 /*********************************************************************
1735  *                          POLICY INTERFACE                         *
1736  *********************************************************************/
1737
1738 /**
1739  * cpufreq_get_policy - get the current cpufreq_policy
1740  * @policy: struct cpufreq_policy into which the current cpufreq_policy
1741  *      is written
1742  *
1743  * Reads the current cpufreq policy.
1744  */
1745 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1746 {
1747         struct cpufreq_policy *cpu_policy;
1748         if (!policy)
1749                 return -EINVAL;
1750
1751         cpu_policy = cpufreq_cpu_get(cpu);
1752         if (!cpu_policy)
1753                 return -EINVAL;
1754
1755         memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1756
1757         cpufreq_cpu_put(cpu_policy);
1758         return 0;
1759 }
1760 EXPORT_SYMBOL(cpufreq_get_policy);
1761
1762
1763 /*
1764  * data   : current policy.
1765  * policy : policy to be set.
1766  */
1767 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1768                                 struct cpufreq_policy *policy)
1769 {
1770         int ret = 0, failed = 1;
1771
1772         pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1773                 policy->min, policy->max);
1774
1775         memcpy(&policy->cpuinfo, &data->cpuinfo,
1776                                 sizeof(struct cpufreq_cpuinfo));
1777
1778         if (policy->min > data->max || policy->max < data->min) {
1779                 ret = -EINVAL;
1780                 goto error_out;
1781         }
1782
1783         /* verify the cpu speed can be set within this limit */
1784         ret = cpufreq_driver->verify(policy);
1785         if (ret)
1786                 goto error_out;
1787
1788         /* adjust if necessary - all reasons */
1789         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1790                         CPUFREQ_ADJUST, policy);
1791
1792         /* adjust if necessary - hardware incompatibility*/
1793         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1794                         CPUFREQ_INCOMPATIBLE, policy);
1795
1796         /* verify the cpu speed can be set within this limit,
1797            which might be different to the first one */
1798         ret = cpufreq_driver->verify(policy);
1799         if (ret)
1800                 goto error_out;
1801
1802         /* notification of the new policy */
1803         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1804                         CPUFREQ_NOTIFY, policy);
1805
1806         data->min = policy->min;
1807         data->max = policy->max;
1808
1809         pr_debug("new min and max freqs are %u - %u kHz\n",
1810                                         data->min, data->max);
1811
1812         if (cpufreq_driver->setpolicy) {
1813                 data->policy = policy->policy;
1814                 pr_debug("setting range\n");
1815                 ret = cpufreq_driver->setpolicy(policy);
1816         } else {
1817                 if (policy->governor != data->governor) {
1818                         /* save old, working values */
1819                         struct cpufreq_governor *old_gov = data->governor;
1820
1821                         pr_debug("governor switch\n");
1822
1823                         /* end old governor */
1824                         if (data->governor) {
1825                                 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1826                                 unlock_policy_rwsem_write(policy->cpu);
1827                                 __cpufreq_governor(data,
1828                                                 CPUFREQ_GOV_POLICY_EXIT);
1829                                 lock_policy_rwsem_write(policy->cpu);
1830                         }
1831
1832                         /* start new governor */
1833                         data->governor = policy->governor;
1834                         if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1835                                 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1836                                         failed = 0;
1837                                 } else {
1838                                         unlock_policy_rwsem_write(policy->cpu);
1839                                         __cpufreq_governor(data,
1840                                                         CPUFREQ_GOV_POLICY_EXIT);
1841                                         lock_policy_rwsem_write(policy->cpu);
1842                                 }
1843                         }
1844
1845                         if (failed) {
1846                                 /* new governor failed, so re-start old one */
1847                                 pr_debug("starting governor %s failed\n",
1848                                                         data->governor->name);
1849                                 if (old_gov) {
1850                                         data->governor = old_gov;
1851                                         __cpufreq_governor(data,
1852                                                         CPUFREQ_GOV_POLICY_INIT);
1853                                         __cpufreq_governor(data,
1854                                                            CPUFREQ_GOV_START);
1855                                 }
1856                                 ret = -EINVAL;
1857                                 goto error_out;
1858                         }
1859                         /* might be a policy change, too, so fall through */
1860                 }
1861                 pr_debug("governor: change or update limits\n");
1862                 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1863         }
1864
1865 error_out:
1866         return ret;
1867 }
1868
1869 /**
1870  *      cpufreq_update_policy - re-evaluate an existing cpufreq policy
1871  *      @cpu: CPU which shall be re-evaluated
1872  *
1873  *      Useful for policy notifiers which have different necessities
1874  *      at different times.
1875  */
1876 int cpufreq_update_policy(unsigned int cpu)
1877 {
1878         struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1879         struct cpufreq_policy policy;
1880         int ret;
1881
1882         if (!data) {
1883                 ret = -ENODEV;
1884                 goto no_policy;
1885         }
1886
1887         if (unlikely(lock_policy_rwsem_write(cpu))) {
1888                 ret = -EINVAL;
1889                 goto fail;
1890         }
1891
1892         pr_debug("updating policy for CPU %u\n", cpu);
1893         memcpy(&policy, data, sizeof(struct cpufreq_policy));
1894         policy.min = data->user_policy.min;
1895         policy.max = data->user_policy.max;
1896         policy.policy = data->user_policy.policy;
1897         policy.governor = data->user_policy.governor;
1898
1899         /* BIOS might change freq behind our back
1900           -> ask driver for current freq and notify governors about a change */
1901         if (cpufreq_driver->get) {
1902                 policy.cur = cpufreq_driver->get(cpu);
1903                 if (!data->cur) {
1904                         pr_debug("Driver did not initialize current freq");
1905                         data->cur = policy.cur;
1906                 } else {
1907                         if (data->cur != policy.cur && cpufreq_driver->target)
1908                                 cpufreq_out_of_sync(cpu, data->cur,
1909                                                                 policy.cur);
1910                 }
1911         }
1912
1913         ret = __cpufreq_set_policy(data, &policy);
1914
1915         unlock_policy_rwsem_write(cpu);
1916
1917 fail:
1918         cpufreq_cpu_put(data);
1919 no_policy:
1920         return ret;
1921 }
1922 EXPORT_SYMBOL(cpufreq_update_policy);
1923
1924 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1925                                         unsigned long action, void *hcpu)
1926 {
1927         unsigned int cpu = (unsigned long)hcpu;
1928         struct device *dev;
1929
1930         dev = get_cpu_device(cpu);
1931         if (dev) {
1932                 switch (action) {
1933                 case CPU_ONLINE:
1934                 case CPU_ONLINE_FROZEN:
1935                         cpufreq_add_dev(dev, NULL);
1936                         break;
1937                 case CPU_DOWN_PREPARE:
1938                 case CPU_DOWN_PREPARE_FROZEN:
1939                         __cpufreq_remove_dev(dev, NULL);
1940                         break;
1941                 case CPU_DOWN_FAILED:
1942                 case CPU_DOWN_FAILED_FROZEN:
1943                         cpufreq_add_dev(dev, NULL);
1944                         break;
1945                 }
1946         }
1947         return NOTIFY_OK;
1948 }
1949
1950 static struct notifier_block __refdata cpufreq_cpu_notifier = {
1951     .notifier_call = cpufreq_cpu_callback,
1952 };
1953
1954 /*********************************************************************
1955  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
1956  *********************************************************************/
1957
1958 /**
1959  * cpufreq_register_driver - register a CPU Frequency driver
1960  * @driver_data: A struct cpufreq_driver containing the values#
1961  * submitted by the CPU Frequency driver.
1962  *
1963  *   Registers a CPU Frequency driver to this core code. This code
1964  * returns zero on success, -EBUSY when another driver got here first
1965  * (and isn't unregistered in the meantime).
1966  *
1967  */
1968 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1969 {
1970         unsigned long flags;
1971         int ret;
1972
1973         if (cpufreq_disabled())
1974                 return -ENODEV;
1975
1976         if (!driver_data || !driver_data->verify || !driver_data->init ||
1977             ((!driver_data->setpolicy) && (!driver_data->target)))
1978                 return -EINVAL;
1979
1980         pr_debug("trying to register driver %s\n", driver_data->name);
1981
1982         if (driver_data->setpolicy)
1983                 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1984
1985         write_lock_irqsave(&cpufreq_driver_lock, flags);
1986         if (cpufreq_driver) {
1987                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1988                 return -EBUSY;
1989         }
1990         cpufreq_driver = driver_data;
1991         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1992
1993         ret = subsys_interface_register(&cpufreq_interface);
1994         if (ret)
1995                 goto err_null_driver;
1996
1997         if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1998                 int i;
1999                 ret = -ENODEV;
2000
2001                 /* check for at least one working CPU */
2002                 for (i = 0; i < nr_cpu_ids; i++)
2003                         if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2004                                 ret = 0;
2005                                 break;
2006                         }
2007
2008                 /* if all ->init() calls failed, unregister */
2009                 if (ret) {
2010                         pr_debug("no CPU initialized for driver %s\n",
2011                                                         driver_data->name);
2012                         goto err_if_unreg;
2013                 }
2014         }
2015
2016         register_hotcpu_notifier(&cpufreq_cpu_notifier);
2017         pr_debug("driver %s up and running\n", driver_data->name);
2018
2019         return 0;
2020 err_if_unreg:
2021         subsys_interface_unregister(&cpufreq_interface);
2022 err_null_driver:
2023         write_lock_irqsave(&cpufreq_driver_lock, flags);
2024         cpufreq_driver = NULL;
2025         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2026         return ret;
2027 }
2028 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2029
2030
2031 /**
2032  * cpufreq_unregister_driver - unregister the current CPUFreq driver
2033  *
2034  *    Unregister the current CPUFreq driver. Only call this if you have
2035  * the right to do so, i.e. if you have succeeded in initialising before!
2036  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2037  * currently not initialised.
2038  */
2039 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2040 {
2041         unsigned long flags;
2042
2043         if (!cpufreq_driver || (driver != cpufreq_driver))
2044                 return -EINVAL;
2045
2046         pr_debug("unregistering driver %s\n", driver->name);
2047
2048         subsys_interface_unregister(&cpufreq_interface);
2049         unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2050
2051         write_lock_irqsave(&cpufreq_driver_lock, flags);
2052         cpufreq_driver = NULL;
2053         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2054
2055         return 0;
2056 }
2057 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2058
2059 static int __init cpufreq_core_init(void)
2060 {
2061         int cpu;
2062
2063         if (cpufreq_disabled())
2064                 return -ENODEV;
2065
2066         for_each_possible_cpu(cpu) {
2067                 per_cpu(cpufreq_policy_cpu, cpu) = -1;
2068                 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2069         }
2070
2071         cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2072         BUG_ON(!cpufreq_global_kobject);
2073
2074         return 0;
2075 }
2076 core_initcall(cpufreq_core_init);