2 * cpuidle.c - core cpuidle infrastructure
4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Shaohua Li <shaohua.li@intel.com>
6 * Adam Belay <abelay@novell.com>
8 * This code is licenced under the GPL.
11 #include <linux/kernel.h>
12 #include <linux/mutex.h>
13 #include <linux/sched.h>
14 #include <linux/notifier.h>
15 #include <linux/pm_qos.h>
16 #include <linux/cpu.h>
17 #include <linux/cpuidle.h>
18 #include <linux/ktime.h>
19 #include <linux/hrtimer.h>
20 #include <linux/module.h>
21 #include <trace/events/power.h>
25 DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
27 DEFINE_MUTEX(cpuidle_lock);
28 LIST_HEAD(cpuidle_detected_devices);
30 static int enabled_devices;
31 static int off __read_mostly;
32 static int initialized __read_mostly;
34 int cpuidle_disabled(void)
38 void disable_cpuidle(void)
43 #if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT)
44 static void cpuidle_kick_cpus(void)
48 #elif defined(CONFIG_SMP)
49 # error "Arch needs cpu_idle_wait() equivalent here"
50 #else /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT && !CONFIG_SMP */
51 static void cpuidle_kick_cpus(void) {}
54 static int __cpuidle_register_device(struct cpuidle_device *dev);
56 static inline int cpuidle_enter(struct cpuidle_device *dev,
57 struct cpuidle_driver *drv, int index)
59 struct cpuidle_state *target_state = &drv->states[index];
60 return target_state->enter(dev, drv, index);
63 static inline int cpuidle_enter_tk(struct cpuidle_device *dev,
64 struct cpuidle_driver *drv, int index)
66 return cpuidle_wrap_enter(dev, drv, index, cpuidle_enter);
69 typedef int (*cpuidle_enter_t)(struct cpuidle_device *dev,
70 struct cpuidle_driver *drv, int index);
72 static cpuidle_enter_t cpuidle_enter_ops;
75 * cpuidle_play_dead - cpu off-lining
77 * Only returns in case of an error
79 int cpuidle_play_dead(void)
81 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
82 struct cpuidle_driver *drv = cpuidle_get_driver();
83 int i, dead_state = -1;
86 /* Find lowest-power state that supports long-term idle */
87 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
88 struct cpuidle_state *s = &drv->states[i];
90 if (s->power_usage < power_usage && s->enter_dead) {
91 power_usage = s->power_usage;
97 return drv->states[dead_state].enter_dead(dev, dead_state);
103 * cpuidle_idle_call - the main idle loop
105 * NOTE: no locks or semaphores should be used here
106 * return non-zero on failure
108 int cpuidle_idle_call(void)
110 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
111 struct cpuidle_driver *drv = cpuidle_get_driver();
112 int next_state, entered_state;
120 /* check if the device is ready */
121 if (!dev || !dev->enabled)
125 /* shows regressions, re-enable for 2.6.29 */
127 * run any timers that can be run now, at this point
128 * before calculating the idle duration etc.
130 hrtimer_peek_ahead_timers();
133 /* ask the governor for the next state */
134 next_state = cpuidle_curr_governor->select(drv, dev);
135 if (need_resched()) {
140 trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
141 trace_cpu_idle_rcuidle(next_state, dev->cpu);
143 entered_state = cpuidle_enter_ops(dev, drv, next_state);
145 trace_power_end_rcuidle(dev->cpu);
146 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
148 if (entered_state >= 0) {
149 /* Update cpuidle counters */
150 /* This can be moved to within driver enter routine
151 * but that results in multiple copies of same code.
153 dev->states_usage[entered_state].time +=
154 (unsigned long long)dev->last_residency;
155 dev->states_usage[entered_state].usage++;
157 dev->last_residency = 0;
160 /* give the governor an opportunity to reflect on the outcome */
161 if (cpuidle_curr_governor->reflect)
162 cpuidle_curr_governor->reflect(dev, entered_state);
168 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
170 void cpuidle_install_idle_handler(void)
172 if (enabled_devices) {
173 /* Make sure all changes finished before we switch to new idle */
180 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
182 void cpuidle_uninstall_idle_handler(void)
184 if (enabled_devices) {
191 * cpuidle_pause_and_lock - temporarily disables CPUIDLE
193 void cpuidle_pause_and_lock(void)
195 mutex_lock(&cpuidle_lock);
196 cpuidle_uninstall_idle_handler();
199 EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
202 * cpuidle_resume_and_unlock - resumes CPUIDLE operation
204 void cpuidle_resume_and_unlock(void)
206 cpuidle_install_idle_handler();
207 mutex_unlock(&cpuidle_lock);
210 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
213 * cpuidle_wrap_enter - performs timekeeping and irqen around enter function
214 * @dev: pointer to a valid cpuidle_device object
215 * @drv: pointer to a valid cpuidle_driver object
216 * @index: index of the target cpuidle state.
218 int cpuidle_wrap_enter(struct cpuidle_device *dev,
219 struct cpuidle_driver *drv, int index,
220 int (*enter)(struct cpuidle_device *dev,
221 struct cpuidle_driver *drv, int index))
223 ktime_t time_start, time_end;
226 time_start = ktime_get();
228 index = enter(dev, drv, index);
230 time_end = ktime_get();
234 diff = ktime_to_us(ktime_sub(time_end, time_start));
238 dev->last_residency = (int) diff;
243 #ifdef CONFIG_ARCH_HAS_CPU_RELAX
244 static int poll_idle(struct cpuidle_device *dev,
245 struct cpuidle_driver *drv, int index)
252 while (!need_resched())
256 diff = ktime_to_us(ktime_sub(t2, t1));
260 dev->last_residency = (int) diff;
265 static void poll_idle_init(struct cpuidle_driver *drv)
267 struct cpuidle_state *state = &drv->states[0];
269 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
270 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
271 state->exit_latency = 0;
272 state->target_residency = 0;
273 state->power_usage = -1;
275 state->enter = poll_idle;
279 static void poll_idle_init(struct cpuidle_driver *drv) {}
280 #endif /* CONFIG_ARCH_HAS_CPU_RELAX */
283 * cpuidle_enable_device - enables idle PM for a CPU
286 * This function must be called between cpuidle_pause_and_lock and
287 * cpuidle_resume_and_unlock when used externally.
289 int cpuidle_enable_device(struct cpuidle_device *dev)
292 struct cpuidle_driver *drv = cpuidle_get_driver();
296 if (!drv || !cpuidle_curr_governor)
298 if (!dev->state_count)
299 dev->state_count = drv->state_count;
301 if (dev->registered == 0) {
302 ret = __cpuidle_register_device(dev);
307 cpuidle_enter_ops = drv->en_core_tk_irqen ?
308 cpuidle_enter_tk : cpuidle_enter;
312 if ((ret = cpuidle_add_state_sysfs(dev)))
315 if (cpuidle_curr_governor->enable &&
316 (ret = cpuidle_curr_governor->enable(drv, dev)))
319 for (i = 0; i < dev->state_count; i++) {
320 dev->states_usage[i].usage = 0;
321 dev->states_usage[i].time = 0;
323 dev->last_residency = 0;
333 cpuidle_remove_state_sysfs(dev);
338 EXPORT_SYMBOL_GPL(cpuidle_enable_device);
341 * cpuidle_disable_device - disables idle PM for a CPU
344 * This function must be called between cpuidle_pause_and_lock and
345 * cpuidle_resume_and_unlock when used externally.
347 void cpuidle_disable_device(struct cpuidle_device *dev)
351 if (!cpuidle_get_driver() || !cpuidle_curr_governor)
356 if (cpuidle_curr_governor->disable)
357 cpuidle_curr_governor->disable(cpuidle_get_driver(), dev);
359 cpuidle_remove_state_sysfs(dev);
363 EXPORT_SYMBOL_GPL(cpuidle_disable_device);
366 * __cpuidle_register_device - internal register function called before register
367 * and enable routines
370 * cpuidle_lock mutex must be held before this is called
372 static int __cpuidle_register_device(struct cpuidle_device *dev)
375 struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
376 struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
380 if (!try_module_get(cpuidle_driver->owner))
383 init_completion(&dev->kobj_unregister);
385 per_cpu(cpuidle_devices, dev->cpu) = dev;
386 list_add(&dev->device_list, &cpuidle_detected_devices);
387 if ((ret = cpuidle_add_sysfs(cpu_dev))) {
388 module_put(cpuidle_driver->owner);
397 * cpuidle_register_device - registers a CPU's idle PM feature
400 int cpuidle_register_device(struct cpuidle_device *dev)
404 mutex_lock(&cpuidle_lock);
406 if ((ret = __cpuidle_register_device(dev))) {
407 mutex_unlock(&cpuidle_lock);
411 cpuidle_enable_device(dev);
412 cpuidle_install_idle_handler();
414 mutex_unlock(&cpuidle_lock);
420 EXPORT_SYMBOL_GPL(cpuidle_register_device);
423 * cpuidle_unregister_device - unregisters a CPU's idle PM feature
426 void cpuidle_unregister_device(struct cpuidle_device *dev)
428 struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
429 struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
431 if (dev->registered == 0)
434 cpuidle_pause_and_lock();
436 cpuidle_disable_device(dev);
438 cpuidle_remove_sysfs(cpu_dev);
439 list_del(&dev->device_list);
440 wait_for_completion(&dev->kobj_unregister);
441 per_cpu(cpuidle_devices, dev->cpu) = NULL;
443 cpuidle_resume_and_unlock();
445 module_put(cpuidle_driver->owner);
448 EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
452 static void smp_callback(void *v)
454 /* we already woke the CPU up, nothing more to do */
458 * This function gets called when a part of the kernel has a new latency
459 * requirement. This means we need to get all processors out of their C-state,
460 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
461 * wakes them all right up.
463 static int cpuidle_latency_notify(struct notifier_block *b,
464 unsigned long l, void *v)
466 smp_call_function(smp_callback, NULL, 1);
470 static struct notifier_block cpuidle_latency_notifier = {
471 .notifier_call = cpuidle_latency_notify,
474 static inline void latency_notifier_init(struct notifier_block *n)
476 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
479 #else /* CONFIG_SMP */
481 #define latency_notifier_init(x) do { } while (0)
483 #endif /* CONFIG_SMP */
486 * cpuidle_init - core initializer
488 static int __init cpuidle_init(void)
492 if (cpuidle_disabled())
495 ret = cpuidle_add_interface(cpu_subsys.dev_root);
499 latency_notifier_init(&cpuidle_latency_notifier);
504 module_param(off, int, 0444);
505 core_initcall(cpuidle_init);