2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/notifier.h>
24 #include <linux/cpufreq.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/device.h>
29 #include <linux/slab.h>
30 #include <linux/cpu.h>
31 #include <linux/completion.h>
32 #include <linux/mutex.h>
33 #include <linux/syscore_ops.h>
34 #include <linux/suspend.h>
35 #include <linux/tick.h>
37 #include <trace/events/power.h>
40 * The "cpufreq driver" - the arch- or hardware-dependent low
41 * level driver of CPUFreq support, and its spinlock. This lock
42 * also protects the cpufreq_cpu_data array.
44 static struct cpufreq_driver *cpufreq_driver;
45 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
46 #ifdef CONFIG_HOTPLUG_CPU
47 /* This one keeps track of the previously set governor of a removed CPU */
48 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
50 static DEFINE_RWLOCK(cpufreq_driver_lock);
51 static DEFINE_MUTEX(cpufreq_governor_lock);
53 /* Flag to suspend/resume CPUFreq governors */
54 static bool cpufreq_suspended;
56 static inline bool has_target(void)
58 return cpufreq_driver->target;
62 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
63 * all cpufreq/hotplug/workqueue/etc related lock issues.
65 * The rules for this semaphore:
66 * - Any routine that wants to read from the policy structure will
67 * do a down_read on this semaphore.
68 * - Any routine that will write to the policy structure and/or may take away
69 * the policy altogether (eg. CPU hotplug), will hold this lock in write
70 * mode before doing so.
73 * - Governor routines that can be called in cpufreq hotplug path should not
74 * take this sem as top level hotplug notifier handler takes this.
75 * - Lock should not be held across
76 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
78 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
79 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
81 #define lock_policy_rwsem(mode, cpu) \
82 static int lock_policy_rwsem_##mode(int cpu) \
84 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
85 BUG_ON(policy_cpu == -1); \
86 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
91 lock_policy_rwsem(read, cpu);
92 lock_policy_rwsem(write, cpu);
94 #define unlock_policy_rwsem(mode, cpu) \
95 static void unlock_policy_rwsem_##mode(int cpu) \
97 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
98 BUG_ON(policy_cpu == -1); \
99 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
102 unlock_policy_rwsem(read, cpu);
103 unlock_policy_rwsem(write, cpu);
105 /* internal prototypes */
106 static int __cpufreq_governor(struct cpufreq_policy *policy,
108 static unsigned int __cpufreq_get(unsigned int cpu);
109 static void handle_update(struct work_struct *work);
112 * Two notifier lists: the "policy" list is involved in the
113 * validation process for a new CPU frequency policy; the
114 * "transition" list for kernel code that needs to handle
115 * changes to devices when the CPU clock speed changes.
116 * The mutex locks both lists.
118 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
119 static struct srcu_notifier_head cpufreq_transition_notifier_list;
121 static bool init_cpufreq_transition_notifier_list_called;
122 static int __init init_cpufreq_transition_notifier_list(void)
124 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
125 init_cpufreq_transition_notifier_list_called = true;
128 pure_initcall(init_cpufreq_transition_notifier_list);
130 static int off __read_mostly;
131 static int cpufreq_disabled(void)
135 void disable_cpufreq(void)
139 static LIST_HEAD(cpufreq_governor_list);
140 static DEFINE_MUTEX(cpufreq_governor_mutex);
142 bool have_governor_per_policy(void)
144 return cpufreq_driver->have_governor_per_policy;
147 static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
149 struct cpufreq_policy *data;
152 if (cpu >= nr_cpu_ids)
155 /* get the cpufreq driver */
156 read_lock_irqsave(&cpufreq_driver_lock, flags);
161 if (!try_module_get(cpufreq_driver->owner))
166 data = per_cpu(cpufreq_cpu_data, cpu);
169 goto err_out_put_module;
171 if (!sysfs && !kobject_get(&data->kobj))
172 goto err_out_put_module;
174 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
178 module_put(cpufreq_driver->owner);
180 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
185 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
187 if (cpufreq_disabled())
190 return __cpufreq_cpu_get(cpu, false);
192 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
194 static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
196 return __cpufreq_cpu_get(cpu, true);
199 static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
202 kobject_put(&data->kobj);
203 module_put(cpufreq_driver->owner);
206 void cpufreq_cpu_put(struct cpufreq_policy *data)
208 if (cpufreq_disabled())
211 __cpufreq_cpu_put(data, false);
213 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
215 static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
217 __cpufreq_cpu_put(data, true);
220 /*********************************************************************
221 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
222 *********************************************************************/
225 * adjust_jiffies - adjust the system "loops_per_jiffy"
227 * This function alters the system "loops_per_jiffy" for the clock
228 * speed change. Note that loops_per_jiffy cannot be updated on SMP
229 * systems as each CPU might be scaled differently. So, use the arch
230 * per-CPU loops_per_jiffy value wherever possible.
233 static unsigned long l_p_j_ref;
234 static unsigned int l_p_j_ref_freq;
236 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
238 if (ci->flags & CPUFREQ_CONST_LOOPS)
241 if (!l_p_j_ref_freq) {
242 l_p_j_ref = loops_per_jiffy;
243 l_p_j_ref_freq = ci->old;
244 pr_debug("saving %lu as reference value for loops_per_jiffy; "
245 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
247 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
248 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
249 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
251 pr_debug("scaling loops_per_jiffy to %lu "
252 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
256 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
263 void __cpufreq_notify_transition(struct cpufreq_policy *policy,
264 struct cpufreq_freqs *freqs, unsigned int state)
266 BUG_ON(irqs_disabled());
268 if (cpufreq_disabled())
271 freqs->flags = cpufreq_driver->flags;
272 pr_debug("notification %u of frequency transition to %u kHz\n",
277 case CPUFREQ_PRECHANGE:
278 /* detect if the driver reported a value as "old frequency"
279 * which is not equal to what the cpufreq core thinks is
282 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
283 if ((policy) && (policy->cpu == freqs->cpu) &&
284 (policy->cur) && (policy->cur != freqs->old)) {
285 pr_debug("Warning: CPU frequency is"
286 " %u, cpufreq assumed %u kHz.\n",
287 freqs->old, policy->cur);
288 freqs->old = policy->cur;
291 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
292 CPUFREQ_PRECHANGE, freqs);
293 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
296 case CPUFREQ_POSTCHANGE:
297 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
298 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
299 (unsigned long)freqs->cpu);
300 trace_cpu_frequency(freqs->new, freqs->cpu);
301 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
302 CPUFREQ_POSTCHANGE, freqs);
303 if (likely(policy) && likely(policy->cpu == freqs->cpu))
304 policy->cur = freqs->new;
309 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
310 * on frequency transition.
312 * This function calls the transition notifiers and the "adjust_jiffies"
313 * function. It is called twice on all CPU frequency changes that have
316 void cpufreq_notify_transition(struct cpufreq_policy *policy,
317 struct cpufreq_freqs *freqs, unsigned int state)
319 for_each_cpu(freqs->cpu, policy->cpus)
320 __cpufreq_notify_transition(policy, freqs, state);
322 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
326 /*********************************************************************
328 *********************************************************************/
330 static struct cpufreq_governor *__find_governor(const char *str_governor)
332 struct cpufreq_governor *t;
334 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
335 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
342 * cpufreq_parse_governor - parse a governor string
344 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
345 struct cpufreq_governor **governor)
352 if (cpufreq_driver->setpolicy) {
353 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
354 *policy = CPUFREQ_POLICY_PERFORMANCE;
356 } else if (!strnicmp(str_governor, "powersave",
358 *policy = CPUFREQ_POLICY_POWERSAVE;
361 } else if (cpufreq_driver->target) {
362 struct cpufreq_governor *t;
364 mutex_lock(&cpufreq_governor_mutex);
366 t = __find_governor(str_governor);
371 mutex_unlock(&cpufreq_governor_mutex);
372 ret = request_module("cpufreq_%s", str_governor);
373 mutex_lock(&cpufreq_governor_mutex);
376 t = __find_governor(str_governor);
384 mutex_unlock(&cpufreq_governor_mutex);
392 * cpufreq_per_cpu_attr_read() / show_##file_name() -
393 * print out cpufreq information
395 * Write out information from cpufreq_driver->policy[cpu]; object must be
399 #define show_one(file_name, object) \
400 static ssize_t show_##file_name \
401 (struct cpufreq_policy *policy, char *buf) \
403 return sprintf(buf, "%u\n", policy->object); \
406 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
407 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
408 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
409 show_one(scaling_min_freq, min);
410 show_one(scaling_max_freq, max);
411 show_one(scaling_cur_freq, cur);
413 static int __cpufreq_set_policy(struct cpufreq_policy *data,
414 struct cpufreq_policy *policy);
417 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
419 #define store_one(file_name, object) \
420 static ssize_t store_##file_name \
421 (struct cpufreq_policy *policy, const char *buf, size_t count) \
424 struct cpufreq_policy new_policy; \
426 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
430 ret = sscanf(buf, "%u", &new_policy.object); \
434 ret = __cpufreq_set_policy(policy, &new_policy); \
435 policy->user_policy.object = policy->object; \
437 return ret ? ret : count; \
440 store_one(scaling_min_freq, min);
441 store_one(scaling_max_freq, max);
444 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
446 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
449 unsigned int cur_freq = __cpufreq_get(policy->cpu);
451 return sprintf(buf, "<unknown>");
452 return sprintf(buf, "%u\n", cur_freq);
457 * show_scaling_governor - show the current policy for the specified CPU
459 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
461 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
462 return sprintf(buf, "powersave\n");
463 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
464 return sprintf(buf, "performance\n");
465 else if (policy->governor)
466 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
467 policy->governor->name);
473 * store_scaling_governor - store policy for the specified CPU
475 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
476 const char *buf, size_t count)
479 char str_governor[16];
480 struct cpufreq_policy new_policy;
482 ret = cpufreq_get_policy(&new_policy, policy->cpu);
486 ret = sscanf(buf, "%15s", str_governor);
490 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
491 &new_policy.governor))
494 /* Do not use cpufreq_set_policy here or the user_policy.max
495 will be wrongly overridden */
496 ret = __cpufreq_set_policy(policy, &new_policy);
498 policy->user_policy.policy = policy->policy;
499 policy->user_policy.governor = policy->governor;
508 * show_scaling_driver - show the cpufreq driver currently loaded
510 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
512 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
516 * show_scaling_available_governors - show the available CPUfreq governors
518 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
522 struct cpufreq_governor *t;
524 if (!cpufreq_driver->target) {
525 i += sprintf(buf, "performance powersave");
529 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
530 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
531 - (CPUFREQ_NAME_LEN + 2)))
533 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
536 i += sprintf(&buf[i], "\n");
540 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
545 for_each_cpu(cpu, mask) {
547 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
548 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
549 if (i >= (PAGE_SIZE - 5))
552 i += sprintf(&buf[i], "\n");
557 * show_related_cpus - show the CPUs affected by each transition even if
558 * hw coordination is in use
560 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
562 return show_cpus(policy->related_cpus, buf);
566 * show_affected_cpus - show the CPUs affected by each transition
568 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
570 return show_cpus(policy->cpus, buf);
573 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
574 const char *buf, size_t count)
576 unsigned int freq = 0;
579 if (!policy->governor || !policy->governor->store_setspeed)
582 ret = sscanf(buf, "%u", &freq);
586 policy->governor->store_setspeed(policy, freq);
591 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
593 if (!policy->governor || !policy->governor->show_setspeed)
594 return sprintf(buf, "<unsupported>\n");
596 return policy->governor->show_setspeed(policy, buf);
600 * show_bios_limit - show the current cpufreq HW/BIOS limitation
602 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
606 if (cpufreq_driver->bios_limit) {
607 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
609 return sprintf(buf, "%u\n", limit);
611 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
614 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
615 cpufreq_freq_attr_ro(cpuinfo_min_freq);
616 cpufreq_freq_attr_ro(cpuinfo_max_freq);
617 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
618 cpufreq_freq_attr_ro(scaling_available_governors);
619 cpufreq_freq_attr_ro(scaling_driver);
620 cpufreq_freq_attr_ro(scaling_cur_freq);
621 cpufreq_freq_attr_ro(bios_limit);
622 cpufreq_freq_attr_ro(related_cpus);
623 cpufreq_freq_attr_ro(affected_cpus);
624 cpufreq_freq_attr_rw(scaling_min_freq);
625 cpufreq_freq_attr_rw(scaling_max_freq);
626 cpufreq_freq_attr_rw(scaling_governor);
627 cpufreq_freq_attr_rw(scaling_setspeed);
629 static struct attribute *default_attrs[] = {
630 &cpuinfo_min_freq.attr,
631 &cpuinfo_max_freq.attr,
632 &cpuinfo_transition_latency.attr,
633 &scaling_min_freq.attr,
634 &scaling_max_freq.attr,
637 &scaling_governor.attr,
638 &scaling_driver.attr,
639 &scaling_available_governors.attr,
640 &scaling_setspeed.attr,
644 struct kobject *cpufreq_global_kobject;
645 EXPORT_SYMBOL(cpufreq_global_kobject);
647 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
648 #define to_attr(a) container_of(a, struct freq_attr, attr)
650 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
652 struct cpufreq_policy *policy = to_policy(kobj);
653 struct freq_attr *fattr = to_attr(attr);
654 ssize_t ret = -EINVAL;
655 policy = cpufreq_cpu_get_sysfs(policy->cpu);
659 if (lock_policy_rwsem_read(policy->cpu) < 0)
663 ret = fattr->show(policy, buf);
667 unlock_policy_rwsem_read(policy->cpu);
669 cpufreq_cpu_put_sysfs(policy);
674 static ssize_t store(struct kobject *kobj, struct attribute *attr,
675 const char *buf, size_t count)
677 struct cpufreq_policy *policy = to_policy(kobj);
678 struct freq_attr *fattr = to_attr(attr);
679 ssize_t ret = -EINVAL;
680 policy = cpufreq_cpu_get_sysfs(policy->cpu);
684 if (lock_policy_rwsem_write(policy->cpu) < 0)
688 ret = fattr->store(policy, buf, count);
692 unlock_policy_rwsem_write(policy->cpu);
694 cpufreq_cpu_put_sysfs(policy);
699 static void cpufreq_sysfs_release(struct kobject *kobj)
701 struct cpufreq_policy *policy = to_policy(kobj);
702 pr_debug("last reference is dropped\n");
703 complete(&policy->kobj_unregister);
706 static const struct sysfs_ops sysfs_ops = {
711 static struct kobj_type ktype_cpufreq = {
712 .sysfs_ops = &sysfs_ops,
713 .default_attrs = default_attrs,
714 .release = cpufreq_sysfs_release,
717 /* symlink affected CPUs */
718 static int cpufreq_add_dev_symlink(unsigned int cpu,
719 struct cpufreq_policy *policy)
724 for_each_cpu(j, policy->cpus) {
725 struct cpufreq_policy *managed_policy;
726 struct device *cpu_dev;
731 pr_debug("CPU %u already managed, adding link\n", j);
732 managed_policy = cpufreq_cpu_get(cpu);
733 cpu_dev = get_cpu_device(j);
734 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
737 cpufreq_cpu_put(managed_policy);
744 static int cpufreq_add_dev_interface(unsigned int cpu,
745 struct cpufreq_policy *policy,
748 struct cpufreq_policy new_policy;
749 struct freq_attr **drv_attr;
754 /* prepare interface data */
755 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
756 &dev->kobj, "cpufreq");
760 /* set up files for this cpu device */
761 drv_attr = cpufreq_driver->attr;
762 while ((drv_attr) && (*drv_attr)) {
763 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
765 goto err_out_kobj_put;
768 if (cpufreq_driver->get) {
769 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
771 goto err_out_kobj_put;
773 if (cpufreq_driver->target) {
774 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
776 goto err_out_kobj_put;
778 if (cpufreq_driver->bios_limit) {
779 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
781 goto err_out_kobj_put;
784 write_lock_irqsave(&cpufreq_driver_lock, flags);
785 for_each_cpu(j, policy->cpus) {
786 per_cpu(cpufreq_cpu_data, j) = policy;
787 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
789 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
791 ret = cpufreq_add_dev_symlink(cpu, policy);
793 goto err_out_kobj_put;
795 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
796 /* assure that the starting sequence is run in __cpufreq_set_policy */
797 policy->governor = NULL;
799 /* set default policy */
800 ret = __cpufreq_set_policy(policy, &new_policy);
801 policy->user_policy.policy = policy->policy;
802 policy->user_policy.governor = policy->governor;
805 pr_debug("setting policy failed\n");
806 if (cpufreq_driver->exit)
807 cpufreq_driver->exit(policy);
812 kobject_put(&policy->kobj);
813 wait_for_completion(&policy->kobj_unregister);
817 #ifdef CONFIG_HOTPLUG_CPU
818 static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
821 struct cpufreq_policy *policy;
822 int ret = 0, has_target = !!cpufreq_driver->target;
825 policy = cpufreq_cpu_get(sibling);
829 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
831 lock_policy_rwsem_write(sibling);
833 write_lock_irqsave(&cpufreq_driver_lock, flags);
835 cpumask_set_cpu(cpu, policy->cpus);
836 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
837 per_cpu(cpufreq_cpu_data, cpu) = policy;
838 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
840 unlock_policy_rwsem_write(sibling);
843 __cpufreq_governor(policy, CPUFREQ_GOV_START);
844 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
847 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
849 cpufreq_cpu_put(policy);
858 * cpufreq_add_dev - add a CPU device
860 * Adds the cpufreq interface for a CPU device.
862 * The Oracle says: try running cpufreq registration/unregistration concurrently
863 * with with cpu hotplugging and all hell will break loose. Tried to clean this
864 * mess up, but more thorough testing is needed. - Mathieu
866 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
868 unsigned int j, cpu = dev->id;
870 struct cpufreq_policy *policy;
872 #ifdef CONFIG_HOTPLUG_CPU
873 struct cpufreq_governor *gov;
877 if (cpu_is_offline(cpu))
880 pr_debug("adding CPU %u\n", cpu);
883 /* check whether a different CPU already registered this
884 * CPU because it is in the same boat. */
885 policy = cpufreq_cpu_get(cpu);
886 if (unlikely(policy)) {
887 cpufreq_cpu_put(policy);
891 #ifdef CONFIG_HOTPLUG_CPU
892 /* Check if this cpu was hot-unplugged earlier and has siblings */
893 read_lock_irqsave(&cpufreq_driver_lock, flags);
894 for_each_online_cpu(sibling) {
895 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
896 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
897 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
898 return cpufreq_add_policy_cpu(cpu, sibling, dev);
901 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
905 if (!try_module_get(cpufreq_driver->owner)) {
910 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
914 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
915 goto err_free_policy;
917 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
918 goto err_free_cpumask;
921 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
922 cpumask_copy(policy->cpus, cpumask_of(cpu));
924 /* Initially set CPU itself as the policy_cpu */
925 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
927 init_completion(&policy->kobj_unregister);
928 INIT_WORK(&policy->update, handle_update);
930 /* call driver. From then on the cpufreq must be able
931 * to accept all calls to ->verify and ->setpolicy for this CPU
933 ret = cpufreq_driver->init(policy);
935 pr_debug("initialization failed\n");
936 goto err_set_policy_cpu;
939 /* related cpus should atleast have policy->cpus */
940 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
943 * affected cpus must always be the one, which are online. We aren't
944 * managing offline cpus here.
946 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
948 policy->user_policy.min = policy->min;
949 policy->user_policy.max = policy->max;
951 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
952 CPUFREQ_START, policy);
954 #ifdef CONFIG_HOTPLUG_CPU
955 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
957 policy->governor = gov;
958 pr_debug("Restoring governor %s for cpu %d\n",
959 policy->governor->name, cpu);
963 ret = cpufreq_add_dev_interface(cpu, policy, dev);
965 goto err_out_unregister;
967 kobject_uevent(&policy->kobj, KOBJ_ADD);
968 module_put(cpufreq_driver->owner);
969 pr_debug("initialization complete\n");
974 write_lock_irqsave(&cpufreq_driver_lock, flags);
975 for_each_cpu(j, policy->cpus)
976 per_cpu(cpufreq_cpu_data, j) = NULL;
977 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
979 kobject_put(&policy->kobj);
980 wait_for_completion(&policy->kobj_unregister);
983 per_cpu(cpufreq_policy_cpu, cpu) = -1;
984 free_cpumask_var(policy->related_cpus);
986 free_cpumask_var(policy->cpus);
990 module_put(cpufreq_driver->owner);
995 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
999 policy->last_cpu = policy->cpu;
1002 for_each_cpu(j, policy->cpus)
1003 per_cpu(cpufreq_policy_cpu, j) = cpu;
1005 #ifdef CONFIG_CPU_FREQ_TABLE
1006 cpufreq_frequency_table_update_policy_cpu(policy);
1008 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1009 CPUFREQ_UPDATE_POLICY_CPU, policy);
1013 * __cpufreq_remove_dev - remove a CPU device
1015 * Removes the cpufreq interface for a CPU device.
1016 * Caller should already have policy_rwsem in write mode for this CPU.
1017 * This routine frees the rwsem before returning.
1019 static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1021 unsigned int cpu = dev->id, ret, cpus;
1022 unsigned long flags;
1023 struct cpufreq_policy *data;
1024 struct kobject *kobj;
1025 struct completion *cmp;
1026 struct device *cpu_dev;
1028 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1030 write_lock_irqsave(&cpufreq_driver_lock, flags);
1032 data = per_cpu(cpufreq_cpu_data, cpu);
1033 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1035 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1038 pr_debug("%s: No cpu_data found\n", __func__);
1042 if (cpufreq_driver->target)
1043 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1045 #ifdef CONFIG_HOTPLUG_CPU
1046 if (!cpufreq_driver->setpolicy)
1047 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1048 data->governor->name, CPUFREQ_NAME_LEN);
1051 WARN_ON(lock_policy_rwsem_write(cpu));
1052 cpus = cpumask_weight(data->cpus);
1055 cpumask_clear_cpu(cpu, data->cpus);
1056 unlock_policy_rwsem_write(cpu);
1058 if (cpu != data->cpu) {
1059 sysfs_remove_link(&dev->kobj, "cpufreq");
1060 } else if (cpus > 1) {
1061 /* first sibling now owns the new sysfs dir */
1062 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1063 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1064 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1066 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1068 WARN_ON(lock_policy_rwsem_write(cpu));
1069 cpumask_set_cpu(cpu, data->cpus);
1071 write_lock_irqsave(&cpufreq_driver_lock, flags);
1072 per_cpu(cpufreq_cpu_data, cpu) = data;
1073 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1075 unlock_policy_rwsem_write(cpu);
1077 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1082 WARN_ON(lock_policy_rwsem_write(cpu));
1083 update_policy_cpu(data, cpu_dev->id);
1084 unlock_policy_rwsem_write(cpu);
1085 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1086 __func__, cpu_dev->id, cpu);
1089 /* If cpu is last user of policy, free policy */
1091 if (cpufreq_driver->target)
1092 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1094 lock_policy_rwsem_read(cpu);
1096 cmp = &data->kobj_unregister;
1097 unlock_policy_rwsem_read(cpu);
1100 /* we need to make sure that the underlying kobj is actually
1101 * not referenced anymore by anybody before we proceed with
1104 pr_debug("waiting for dropping of refcount\n");
1105 wait_for_completion(cmp);
1106 pr_debug("wait complete\n");
1108 if (cpufreq_driver->exit)
1109 cpufreq_driver->exit(data);
1111 free_cpumask_var(data->related_cpus);
1112 free_cpumask_var(data->cpus);
1115 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1116 cpufreq_cpu_put(data);
1117 if (cpufreq_driver->target) {
1118 __cpufreq_governor(data, CPUFREQ_GOV_START);
1119 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1123 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1128 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1130 unsigned int cpu = dev->id;
1133 if (cpu_is_offline(cpu))
1136 retval = __cpufreq_remove_dev(dev, sif);
1141 static void handle_update(struct work_struct *work)
1143 struct cpufreq_policy *policy =
1144 container_of(work, struct cpufreq_policy, update);
1145 unsigned int cpu = policy->cpu;
1146 pr_debug("handle_update for cpu %u called\n", cpu);
1147 cpufreq_update_policy(cpu);
1151 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1153 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1154 * @new_freq: CPU frequency the CPU actually runs at
1156 * We adjust to current frequency first, and need to clean up later.
1157 * So either call to cpufreq_update_policy() or schedule handle_update()).
1159 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1160 unsigned int new_freq)
1162 struct cpufreq_policy *policy;
1163 struct cpufreq_freqs freqs;
1164 unsigned long flags;
1167 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1168 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1170 freqs.old = old_freq;
1171 freqs.new = new_freq;
1173 read_lock_irqsave(&cpufreq_driver_lock, flags);
1174 policy = per_cpu(cpufreq_cpu_data, cpu);
1175 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1177 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1178 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1183 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1186 * This is the last known freq, without actually getting it from the driver.
1187 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1189 unsigned int cpufreq_quick_get(unsigned int cpu)
1191 struct cpufreq_policy *policy;
1192 unsigned int ret_freq = 0;
1194 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1195 return cpufreq_driver->get(cpu);
1197 policy = cpufreq_cpu_get(cpu);
1199 ret_freq = policy->cur;
1200 cpufreq_cpu_put(policy);
1205 EXPORT_SYMBOL(cpufreq_quick_get);
1208 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1211 * Just return the max possible frequency for a given CPU.
1213 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1215 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1216 unsigned int ret_freq = 0;
1219 ret_freq = policy->max;
1220 cpufreq_cpu_put(policy);
1225 EXPORT_SYMBOL(cpufreq_quick_get_max);
1228 static unsigned int __cpufreq_get(unsigned int cpu)
1230 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1231 unsigned int ret_freq = 0;
1233 if (!cpufreq_driver->get)
1236 ret_freq = cpufreq_driver->get(cpu);
1238 if (ret_freq && policy->cur &&
1239 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1240 /* verify no discrepancy between actual and
1241 saved value exists */
1242 if (unlikely(ret_freq != policy->cur)) {
1243 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1244 schedule_work(&policy->update);
1252 * cpufreq_get - get the current CPU frequency (in kHz)
1255 * Get the CPU current (static) CPU frequency
1257 unsigned int cpufreq_get(unsigned int cpu)
1259 unsigned int ret_freq = 0;
1260 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1265 if (unlikely(lock_policy_rwsem_read(cpu)))
1268 ret_freq = __cpufreq_get(cpu);
1270 unlock_policy_rwsem_read(cpu);
1273 cpufreq_cpu_put(policy);
1277 EXPORT_SYMBOL(cpufreq_get);
1279 static struct subsys_interface cpufreq_interface = {
1281 .subsys = &cpu_subsys,
1282 .add_dev = cpufreq_add_dev,
1283 .remove_dev = cpufreq_remove_dev,
1288 * cpufreq_suspend() - Suspend CPUFreq governors
1290 * Called during system wide Suspend/Hibernate cycles for suspending governors
1291 * as some platforms can't change frequency after this point in suspend cycle.
1292 * Because some of the devices (like: i2c, regulators, etc) they use for
1293 * changing frequency are suspended quickly after this point.
1295 void cpufreq_suspend(void)
1297 struct cpufreq_policy *policy;
1299 if (!cpufreq_driver)
1305 pr_debug("%s: Suspending Governors\n", __func__);
1307 policy = cpufreq_cpu_get(0);
1309 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1310 pr_err("%s: Failed to stop governor for policy: %p\n",
1312 else if (cpufreq_driver->suspend
1313 && cpufreq_driver->suspend(policy))
1314 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1317 cpufreq_suspended = true;
1321 * cpufreq_resume() - Resume CPUFreq governors
1323 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1324 * are suspended with cpufreq_suspend().
1326 void cpufreq_resume(void)
1328 struct cpufreq_policy *policy;
1330 if (!cpufreq_driver)
1336 pr_debug("%s: Resuming Governors\n", __func__);
1338 cpufreq_suspended = false;
1340 policy = cpufreq_cpu_get(0);
1342 if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1343 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1344 pr_err("%s: Failed to start governor for policy: %p\n",
1346 else if (cpufreq_driver->resume
1347 && cpufreq_driver->resume(policy))
1348 pr_err("%s: Failed to resume driver: %p\n", __func__,
1351 schedule_work(&policy->update);
1355 * cpufreq_get_current_driver - return current driver's name
1357 * Return the name string of the currently loaded cpufreq driver
1360 const char *cpufreq_get_current_driver(void)
1363 return cpufreq_driver->name;
1367 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1369 /*********************************************************************
1370 * NOTIFIER LISTS INTERFACE *
1371 *********************************************************************/
1374 * cpufreq_register_notifier - register a driver with cpufreq
1375 * @nb: notifier function to register
1376 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1378 * Add a driver to one of two lists: either a list of drivers that
1379 * are notified about clock rate changes (once before and once after
1380 * the transition), or a list of drivers that are notified about
1381 * changes in cpufreq policy.
1383 * This function may sleep, and has the same return conditions as
1384 * blocking_notifier_chain_register.
1386 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1390 if (cpufreq_disabled())
1393 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1396 case CPUFREQ_TRANSITION_NOTIFIER:
1397 ret = srcu_notifier_chain_register(
1398 &cpufreq_transition_notifier_list, nb);
1400 case CPUFREQ_POLICY_NOTIFIER:
1401 ret = blocking_notifier_chain_register(
1402 &cpufreq_policy_notifier_list, nb);
1410 EXPORT_SYMBOL(cpufreq_register_notifier);
1414 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1415 * @nb: notifier block to be unregistered
1416 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1418 * Remove a driver from the CPU frequency notifier list.
1420 * This function may sleep, and has the same return conditions as
1421 * blocking_notifier_chain_unregister.
1423 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1427 if (cpufreq_disabled())
1431 case CPUFREQ_TRANSITION_NOTIFIER:
1432 ret = srcu_notifier_chain_unregister(
1433 &cpufreq_transition_notifier_list, nb);
1435 case CPUFREQ_POLICY_NOTIFIER:
1436 ret = blocking_notifier_chain_unregister(
1437 &cpufreq_policy_notifier_list, nb);
1445 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1448 /*********************************************************************
1450 *********************************************************************/
1453 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1454 unsigned int target_freq,
1455 unsigned int relation)
1457 int retval = -EINVAL;
1458 unsigned int old_target_freq = target_freq;
1460 if (cpufreq_disabled())
1463 /* Make sure that target_freq is within supported range */
1464 if (target_freq > policy->max)
1465 target_freq = policy->max;
1466 if (target_freq < policy->min)
1467 target_freq = policy->min;
1469 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1470 policy->cpu, target_freq, relation, old_target_freq);
1472 if (target_freq == policy->cur)
1475 if (cpufreq_driver->target)
1476 retval = cpufreq_driver->target(policy, target_freq, relation);
1480 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1482 int cpufreq_driver_target(struct cpufreq_policy *policy,
1483 unsigned int target_freq,
1484 unsigned int relation)
1488 policy = cpufreq_cpu_get(policy->cpu);
1492 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1495 ret = __cpufreq_driver_target(policy, target_freq, relation);
1497 unlock_policy_rwsem_write(policy->cpu);
1500 cpufreq_cpu_put(policy);
1504 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1506 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1510 if (cpufreq_disabled())
1513 if (!cpufreq_driver->getavg)
1516 policy = cpufreq_cpu_get(policy->cpu);
1520 ret = cpufreq_driver->getavg(policy, cpu);
1522 cpufreq_cpu_put(policy);
1525 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1528 * when "event" is CPUFREQ_GOV_LIMITS
1531 static int __cpufreq_governor(struct cpufreq_policy *policy,
1536 /* Only must be defined when default governor is known to have latency
1537 restrictions, like e.g. conservative or ondemand.
1538 That this is the case is already ensured in Kconfig
1540 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1541 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1543 struct cpufreq_governor *gov = NULL;
1546 /* Don't start any governor operations if we are entering suspend */
1547 if (cpufreq_suspended)
1550 if (policy->governor->max_transition_latency &&
1551 policy->cpuinfo.transition_latency >
1552 policy->governor->max_transition_latency) {
1556 printk(KERN_WARNING "%s governor failed, too long"
1557 " transition latency of HW, fallback"
1558 " to %s governor\n",
1559 policy->governor->name,
1561 policy->governor = gov;
1565 if (!try_module_get(policy->governor->owner))
1568 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1569 policy->cpu, event);
1571 mutex_lock(&cpufreq_governor_lock);
1572 if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
1573 (policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
1574 mutex_unlock(&cpufreq_governor_lock);
1578 if (event == CPUFREQ_GOV_STOP)
1579 policy->governor_enabled = false;
1580 else if (event == CPUFREQ_GOV_START)
1581 policy->governor_enabled = true;
1583 mutex_unlock(&cpufreq_governor_lock);
1585 ret = policy->governor->governor(policy, event);
1588 if (event == CPUFREQ_GOV_POLICY_INIT)
1589 policy->governor->initialized++;
1590 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1591 policy->governor->initialized--;
1593 /* Restore original values */
1594 mutex_lock(&cpufreq_governor_lock);
1595 if (event == CPUFREQ_GOV_STOP)
1596 policy->governor_enabled = true;
1597 else if (event == CPUFREQ_GOV_START)
1598 policy->governor_enabled = false;
1599 mutex_unlock(&cpufreq_governor_lock);
1602 /* we keep one module reference alive for
1603 each CPU governed by this CPU */
1604 if ((event != CPUFREQ_GOV_START) || ret)
1605 module_put(policy->governor->owner);
1606 if ((event == CPUFREQ_GOV_STOP) && !ret)
1607 module_put(policy->governor->owner);
1613 int cpufreq_register_governor(struct cpufreq_governor *governor)
1620 if (cpufreq_disabled())
1623 mutex_lock(&cpufreq_governor_mutex);
1625 governor->initialized = 0;
1627 if (__find_governor(governor->name) == NULL) {
1629 list_add(&governor->governor_list, &cpufreq_governor_list);
1632 mutex_unlock(&cpufreq_governor_mutex);
1635 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1638 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1640 #ifdef CONFIG_HOTPLUG_CPU
1647 if (cpufreq_disabled())
1650 #ifdef CONFIG_HOTPLUG_CPU
1651 for_each_present_cpu(cpu) {
1652 if (cpu_online(cpu))
1654 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1655 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1659 mutex_lock(&cpufreq_governor_mutex);
1660 list_del(&governor->governor_list);
1661 mutex_unlock(&cpufreq_governor_mutex);
1664 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1668 /*********************************************************************
1669 * POLICY INTERFACE *
1670 *********************************************************************/
1673 * cpufreq_get_policy - get the current cpufreq_policy
1674 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1677 * Reads the current cpufreq policy.
1679 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1681 struct cpufreq_policy *cpu_policy;
1685 cpu_policy = cpufreq_cpu_get(cpu);
1689 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1691 cpufreq_cpu_put(cpu_policy);
1694 EXPORT_SYMBOL(cpufreq_get_policy);
1698 * data : current policy.
1699 * policy : policy to be set.
1701 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1702 struct cpufreq_policy *policy)
1704 int ret = 0, failed = 1;
1706 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1707 policy->min, policy->max);
1709 memcpy(&policy->cpuinfo, &data->cpuinfo,
1710 sizeof(struct cpufreq_cpuinfo));
1712 if (policy->min > data->max || policy->max < data->min) {
1717 /* verify the cpu speed can be set within this limit */
1718 ret = cpufreq_driver->verify(policy);
1722 /* adjust if necessary - all reasons */
1723 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1724 CPUFREQ_ADJUST, policy);
1726 /* adjust if necessary - hardware incompatibility*/
1727 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1728 CPUFREQ_INCOMPATIBLE, policy);
1730 /* verify the cpu speed can be set within this limit,
1731 which might be different to the first one */
1732 ret = cpufreq_driver->verify(policy);
1736 /* notification of the new policy */
1737 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1738 CPUFREQ_NOTIFY, policy);
1740 data->min = policy->min;
1741 data->max = policy->max;
1743 pr_debug("new min and max freqs are %u - %u kHz\n",
1744 data->min, data->max);
1746 if (cpufreq_driver->setpolicy) {
1747 data->policy = policy->policy;
1748 pr_debug("setting range\n");
1749 ret = cpufreq_driver->setpolicy(policy);
1751 if (policy->governor != data->governor) {
1752 /* save old, working values */
1753 struct cpufreq_governor *old_gov = data->governor;
1755 pr_debug("governor switch\n");
1757 /* end old governor */
1758 if (data->governor) {
1759 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1760 unlock_policy_rwsem_write(policy->cpu);
1761 __cpufreq_governor(data,
1762 CPUFREQ_GOV_POLICY_EXIT);
1763 lock_policy_rwsem_write(policy->cpu);
1766 /* start new governor */
1767 data->governor = policy->governor;
1768 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1769 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1772 unlock_policy_rwsem_write(policy->cpu);
1773 __cpufreq_governor(data,
1774 CPUFREQ_GOV_POLICY_EXIT);
1775 lock_policy_rwsem_write(policy->cpu);
1780 /* new governor failed, so re-start old one */
1781 pr_debug("starting governor %s failed\n",
1782 data->governor->name);
1784 data->governor = old_gov;
1785 __cpufreq_governor(data,
1786 CPUFREQ_GOV_POLICY_INIT);
1787 __cpufreq_governor(data,
1793 /* might be a policy change, too, so fall through */
1795 pr_debug("governor: change or update limits\n");
1796 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1804 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1805 * @cpu: CPU which shall be re-evaluated
1807 * Useful for policy notifiers which have different necessities
1808 * at different times.
1810 int cpufreq_update_policy(unsigned int cpu)
1812 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1813 struct cpufreq_policy policy;
1821 if (unlikely(lock_policy_rwsem_write(cpu))) {
1826 pr_debug("updating policy for CPU %u\n", cpu);
1827 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1828 policy.min = data->user_policy.min;
1829 policy.max = data->user_policy.max;
1830 policy.policy = data->user_policy.policy;
1831 policy.governor = data->user_policy.governor;
1833 /* BIOS might change freq behind our back
1834 -> ask driver for current freq and notify governors about a change */
1835 if (cpufreq_driver->get) {
1836 policy.cur = cpufreq_driver->get(cpu);
1838 pr_debug("Driver did not initialize current freq");
1839 data->cur = policy.cur;
1841 if (data->cur != policy.cur && cpufreq_driver->target)
1842 cpufreq_out_of_sync(cpu, data->cur,
1847 ret = __cpufreq_set_policy(data, &policy);
1849 unlock_policy_rwsem_write(cpu);
1852 cpufreq_cpu_put(data);
1856 EXPORT_SYMBOL(cpufreq_update_policy);
1858 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1859 unsigned long action, void *hcpu)
1861 unsigned int cpu = (unsigned long)hcpu;
1864 dev = get_cpu_device(cpu);
1868 case CPU_ONLINE_FROZEN:
1869 cpufreq_add_dev(dev, NULL);
1871 case CPU_DOWN_PREPARE:
1872 case CPU_DOWN_PREPARE_FROZEN:
1873 __cpufreq_remove_dev(dev, NULL);
1875 case CPU_DOWN_FAILED:
1876 case CPU_DOWN_FAILED_FROZEN:
1877 cpufreq_add_dev(dev, NULL);
1884 static struct notifier_block __refdata cpufreq_cpu_notifier = {
1885 .notifier_call = cpufreq_cpu_callback,
1888 /*********************************************************************
1889 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1890 *********************************************************************/
1893 * cpufreq_register_driver - register a CPU Frequency driver
1894 * @driver_data: A struct cpufreq_driver containing the values#
1895 * submitted by the CPU Frequency driver.
1897 * Registers a CPU Frequency driver to this core code. This code
1898 * returns zero on success, -EBUSY when another driver got here first
1899 * (and isn't unregistered in the meantime).
1902 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1904 unsigned long flags;
1907 if (cpufreq_disabled())
1910 if (!driver_data || !driver_data->verify || !driver_data->init ||
1911 ((!driver_data->setpolicy) && (!driver_data->target)))
1914 pr_debug("trying to register driver %s\n", driver_data->name);
1916 if (driver_data->setpolicy)
1917 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1919 write_lock_irqsave(&cpufreq_driver_lock, flags);
1920 if (cpufreq_driver) {
1921 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1924 cpufreq_driver = driver_data;
1925 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1927 ret = subsys_interface_register(&cpufreq_interface);
1929 goto err_null_driver;
1931 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1935 /* check for at least one working CPU */
1936 for (i = 0; i < nr_cpu_ids; i++)
1937 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1942 /* if all ->init() calls failed, unregister */
1944 pr_debug("no CPU initialized for driver %s\n",
1950 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1951 pr_debug("driver %s up and running\n", driver_data->name);
1955 subsys_interface_unregister(&cpufreq_interface);
1957 write_lock_irqsave(&cpufreq_driver_lock, flags);
1958 cpufreq_driver = NULL;
1959 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1962 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1966 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1968 * Unregister the current CPUFreq driver. Only call this if you have
1969 * the right to do so, i.e. if you have succeeded in initialising before!
1970 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1971 * currently not initialised.
1973 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1975 unsigned long flags;
1977 if (!cpufreq_driver || (driver != cpufreq_driver))
1980 pr_debug("unregistering driver %s\n", driver->name);
1982 subsys_interface_unregister(&cpufreq_interface);
1983 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1985 write_lock_irqsave(&cpufreq_driver_lock, flags);
1986 cpufreq_driver = NULL;
1987 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1991 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
1993 static int __init cpufreq_core_init(void)
1997 if (cpufreq_disabled())
2000 for_each_possible_cpu(cpu) {
2001 per_cpu(cpufreq_policy_cpu, cpu) = -1;
2002 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2005 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2006 BUG_ON(!cpufreq_global_kobject);
2010 core_initcall(cpufreq_core_init);