2 * drivers/base/power/main.c - Where the driver meets power management.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
7 * This file is released under the GPLv2
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will intialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/mutex.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/resume-trace.h>
26 #include <linux/rwsem.h>
27 #include <linux/interrupt.h>
28 #include <linux/timer.h>
34 * The entries in the dpm_list list are in a depth first order, simply
35 * because children are guaranteed to be discovered after parents, and
36 * are inserted at the back of the list on discovery.
38 * Since device_pm_add() may be called with a device semaphore held,
39 * we must never try to acquire a device semaphore while holding
45 static DEFINE_MUTEX(dpm_list_mtx);
47 static void dpm_drv_timeout(unsigned long data);
48 static DEFINE_TIMER(dpm_drv_wd, dpm_drv_timeout, 0, 0);
51 struct task_struct *tsk;
55 * Set once the preparation of devices for a PM transition has started, reset
56 * before starting to resume devices. Protected by dpm_list_mtx.
58 static bool transition_started;
61 * device_pm_init - Initialize the PM-related part of a device object.
62 * @dev: Device object being initialized.
64 void device_pm_init(struct device *dev)
66 dev->power.status = DPM_ON;
71 * device_pm_lock - Lock the list of active devices used by the PM core.
73 void device_pm_lock(void)
75 mutex_lock(&dpm_list_mtx);
79 * device_pm_unlock - Unlock the list of active devices used by the PM core.
81 void device_pm_unlock(void)
83 mutex_unlock(&dpm_list_mtx);
87 * device_pm_add - Add a device to the PM core's list of active devices.
88 * @dev: Device to add to the list.
90 void device_pm_add(struct device *dev)
92 pr_debug("PM: Adding info for %s:%s\n",
93 dev->bus ? dev->bus->name : "No Bus",
94 kobject_name(&dev->kobj));
95 mutex_lock(&dpm_list_mtx);
97 if (dev->parent->power.status >= DPM_SUSPENDING)
98 dev_warn(dev, "parent %s should not be sleeping\n",
99 dev_name(dev->parent));
100 } else if (transition_started) {
102 * We refuse to register parentless devices while a PM
103 * transition is in progress in order to avoid leaving them
104 * unhandled down the road
106 dev_WARN(dev, "Parentless device registered during a PM transaction\n");
109 list_add_tail(&dev->power.entry, &dpm_list);
110 mutex_unlock(&dpm_list_mtx);
114 * device_pm_remove - Remove a device from the PM core's list of active devices.
115 * @dev: Device to be removed from the list.
117 void device_pm_remove(struct device *dev)
119 pr_debug("PM: Removing info for %s:%s\n",
120 dev->bus ? dev->bus->name : "No Bus",
121 kobject_name(&dev->kobj));
122 mutex_lock(&dpm_list_mtx);
123 list_del_init(&dev->power.entry);
124 mutex_unlock(&dpm_list_mtx);
125 pm_runtime_remove(dev);
129 * device_pm_move_before - Move device in the PM core's list of active devices.
130 * @deva: Device to move in dpm_list.
131 * @devb: Device @deva should come before.
133 void device_pm_move_before(struct device *deva, struct device *devb)
135 pr_debug("PM: Moving %s:%s before %s:%s\n",
136 deva->bus ? deva->bus->name : "No Bus",
137 kobject_name(&deva->kobj),
138 devb->bus ? devb->bus->name : "No Bus",
139 kobject_name(&devb->kobj));
140 /* Delete deva from dpm_list and reinsert before devb. */
141 list_move_tail(&deva->power.entry, &devb->power.entry);
145 * device_pm_move_after - Move device in the PM core's list of active devices.
146 * @deva: Device to move in dpm_list.
147 * @devb: Device @deva should come after.
149 void device_pm_move_after(struct device *deva, struct device *devb)
151 pr_debug("PM: Moving %s:%s after %s:%s\n",
152 deva->bus ? deva->bus->name : "No Bus",
153 kobject_name(&deva->kobj),
154 devb->bus ? devb->bus->name : "No Bus",
155 kobject_name(&devb->kobj));
156 /* Delete deva from dpm_list and reinsert after devb. */
157 list_move(&deva->power.entry, &devb->power.entry);
161 * device_pm_move_last - Move device to end of the PM core's list of devices.
162 * @dev: Device to move in dpm_list.
164 void device_pm_move_last(struct device *dev)
166 pr_debug("PM: Moving %s:%s to end of list\n",
167 dev->bus ? dev->bus->name : "No Bus",
168 kobject_name(&dev->kobj));
169 list_move_tail(&dev->power.entry, &dpm_list);
173 * pm_op - Execute the PM operation appropriate for given PM event.
174 * @dev: Device to handle.
175 * @ops: PM operations to choose from.
176 * @state: PM transition of the system being carried out.
178 static int pm_op(struct device *dev,
179 const struct dev_pm_ops *ops,
184 switch (state.event) {
185 #ifdef CONFIG_SUSPEND
186 case PM_EVENT_SUSPEND:
188 error = ops->suspend(dev);
189 suspend_report_result(ops->suspend, error);
192 case PM_EVENT_RESUME:
194 error = ops->resume(dev);
195 suspend_report_result(ops->resume, error);
198 #endif /* CONFIG_SUSPEND */
199 #ifdef CONFIG_HIBERNATION
200 case PM_EVENT_FREEZE:
201 case PM_EVENT_QUIESCE:
203 error = ops->freeze(dev);
204 suspend_report_result(ops->freeze, error);
207 case PM_EVENT_HIBERNATE:
209 error = ops->poweroff(dev);
210 suspend_report_result(ops->poweroff, error);
214 case PM_EVENT_RECOVER:
216 error = ops->thaw(dev);
217 suspend_report_result(ops->thaw, error);
220 case PM_EVENT_RESTORE:
222 error = ops->restore(dev);
223 suspend_report_result(ops->restore, error);
226 #endif /* CONFIG_HIBERNATION */
234 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
235 * @dev: Device to handle.
236 * @ops: PM operations to choose from.
237 * @state: PM transition of the system being carried out.
239 * The driver of @dev will not receive interrupts while this function is being
242 static int pm_noirq_op(struct device *dev,
243 const struct dev_pm_ops *ops,
248 switch (state.event) {
249 #ifdef CONFIG_SUSPEND
250 case PM_EVENT_SUSPEND:
251 if (ops->suspend_noirq) {
252 error = ops->suspend_noirq(dev);
253 suspend_report_result(ops->suspend_noirq, error);
256 case PM_EVENT_RESUME:
257 if (ops->resume_noirq) {
258 error = ops->resume_noirq(dev);
259 suspend_report_result(ops->resume_noirq, error);
262 #endif /* CONFIG_SUSPEND */
263 #ifdef CONFIG_HIBERNATION
264 case PM_EVENT_FREEZE:
265 case PM_EVENT_QUIESCE:
266 if (ops->freeze_noirq) {
267 error = ops->freeze_noirq(dev);
268 suspend_report_result(ops->freeze_noirq, error);
271 case PM_EVENT_HIBERNATE:
272 if (ops->poweroff_noirq) {
273 error = ops->poweroff_noirq(dev);
274 suspend_report_result(ops->poweroff_noirq, error);
278 case PM_EVENT_RECOVER:
279 if (ops->thaw_noirq) {
280 error = ops->thaw_noirq(dev);
281 suspend_report_result(ops->thaw_noirq, error);
284 case PM_EVENT_RESTORE:
285 if (ops->restore_noirq) {
286 error = ops->restore_noirq(dev);
287 suspend_report_result(ops->restore_noirq, error);
290 #endif /* CONFIG_HIBERNATION */
297 static char *pm_verb(int event)
300 case PM_EVENT_SUSPEND:
302 case PM_EVENT_RESUME:
304 case PM_EVENT_FREEZE:
306 case PM_EVENT_QUIESCE:
308 case PM_EVENT_HIBERNATE:
312 case PM_EVENT_RESTORE:
314 case PM_EVENT_RECOVER:
317 return "(unknown PM event)";
321 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
323 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
324 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
325 ", may wakeup" : "");
328 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
331 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
332 kobject_name(&dev->kobj), pm_verb(state.event), info, error);
335 /*------------------------- Resume routines -------------------------*/
338 * device_resume_noirq - Execute an "early resume" callback for given device.
339 * @dev: Device to handle.
340 * @state: PM transition of the system being carried out.
342 * The driver of @dev will not receive interrupts while this function is being
345 static int device_resume_noirq(struct device *dev, pm_message_t state)
356 pm_dev_dbg(dev, state, "EARLY ");
357 error = pm_noirq_op(dev, dev->bus->pm, state);
365 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
366 * @state: PM transition of the system being carried out.
368 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
369 * enable device drivers to receive interrupts.
371 void dpm_resume_noirq(pm_message_t state)
375 mutex_lock(&dpm_list_mtx);
376 transition_started = false;
377 list_for_each_entry(dev, &dpm_list, power.entry)
378 if (dev->power.status > DPM_OFF) {
381 dev->power.status = DPM_OFF;
382 error = device_resume_noirq(dev, state);
384 pm_dev_err(dev, state, " early", error);
386 mutex_unlock(&dpm_list_mtx);
387 resume_device_irqs();
389 EXPORT_SYMBOL_GPL(dpm_resume_noirq);
392 * device_resume - Execute "resume" callbacks for given device.
393 * @dev: Device to handle.
394 * @state: PM transition of the system being carried out.
396 static int device_resume(struct device *dev, pm_message_t state)
407 pm_dev_dbg(dev, state, "");
408 error = pm_op(dev, dev->bus->pm, state);
409 } else if (dev->bus->resume) {
410 pm_dev_dbg(dev, state, "legacy ");
411 error = dev->bus->resume(dev);
419 pm_dev_dbg(dev, state, "type ");
420 error = pm_op(dev, dev->type->pm, state);
427 if (dev->class->pm) {
428 pm_dev_dbg(dev, state, "class ");
429 error = pm_op(dev, dev->class->pm, state);
430 } else if (dev->class->resume) {
431 pm_dev_dbg(dev, state, "legacy class ");
432 error = dev->class->resume(dev);
443 * dpm_drv_timeout - Driver suspend / resume watchdog handler
444 * @data: struct device which timed out
446 * Called when a driver has timed out suspending or resuming.
447 * There's not much we can do here to recover so
448 * BUG() out for a crash-dump
451 static void dpm_drv_timeout(unsigned long data)
453 struct device *dev = dpm_drv_wd_data.dev;
454 struct task_struct *tsk = dpm_drv_wd_data.tsk;
456 #ifdef CONFIG_ARCH_RK29
457 #include <linux/console.h>
461 printk(KERN_EMERG "**** DPM device timeout: %s (%s)\n", dev_name(dev),
462 (dev->driver ? dev->driver->name : "no driver"));
464 printk(KERN_EMERG "dpm suspend stack:\n");
465 show_stack(tsk, NULL);
471 * dpm_drv_wdset - Sets up driver suspend/resume watchdog timer.
472 * @dev: struct device which we're guarding.
475 static void dpm_drv_wdset(struct device *dev)
477 dpm_drv_wd_data.dev = dev;
478 dpm_drv_wd_data.tsk = get_current();
479 dpm_drv_wd.data = (unsigned long) &dpm_drv_wd_data;
480 mod_timer(&dpm_drv_wd, jiffies + (HZ * 3));
484 * dpm_drv_wdclr - clears driver suspend/resume watchdog timer.
485 * @dev: struct device which we're no longer guarding.
488 static void dpm_drv_wdclr(struct device *dev)
490 del_timer_sync(&dpm_drv_wd);
494 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
495 * @state: PM transition of the system being carried out.
497 * Execute the appropriate "resume" callback for all devices whose status
498 * indicates that they are suspended.
500 static void dpm_resume(pm_message_t state)
502 struct list_head list;
504 INIT_LIST_HEAD(&list);
505 mutex_lock(&dpm_list_mtx);
506 while (!list_empty(&dpm_list)) {
507 struct device *dev = to_device(dpm_list.next);
510 if (dev->power.status >= DPM_OFF) {
513 dev->power.status = DPM_RESUMING;
514 mutex_unlock(&dpm_list_mtx);
516 error = device_resume(dev, state);
518 mutex_lock(&dpm_list_mtx);
520 pm_dev_err(dev, state, "", error);
521 } else if (dev->power.status == DPM_SUSPENDING) {
522 /* Allow new children of the device to be registered */
523 dev->power.status = DPM_RESUMING;
525 if (!list_empty(&dev->power.entry))
526 list_move_tail(&dev->power.entry, &list);
529 list_splice(&list, &dpm_list);
530 mutex_unlock(&dpm_list_mtx);
534 * device_complete - Complete a PM transition for given device.
535 * @dev: Device to handle.
536 * @state: PM transition of the system being carried out.
538 static void device_complete(struct device *dev, pm_message_t state)
542 if (dev->class && dev->class->pm && dev->class->pm->complete) {
543 pm_dev_dbg(dev, state, "completing class ");
544 dev->class->pm->complete(dev);
547 if (dev->type && dev->type->pm && dev->type->pm->complete) {
548 pm_dev_dbg(dev, state, "completing type ");
549 dev->type->pm->complete(dev);
552 if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
553 pm_dev_dbg(dev, state, "completing ");
554 dev->bus->pm->complete(dev);
561 * dpm_complete - Complete a PM transition for all non-sysdev devices.
562 * @state: PM transition of the system being carried out.
564 * Execute the ->complete() callbacks for all devices whose PM status is not
565 * DPM_ON (this allows new devices to be registered).
567 static void dpm_complete(pm_message_t state)
569 struct list_head list;
571 INIT_LIST_HEAD(&list);
572 mutex_lock(&dpm_list_mtx);
573 transition_started = false;
574 while (!list_empty(&dpm_list)) {
575 struct device *dev = to_device(dpm_list.prev);
578 if (dev->power.status > DPM_ON) {
579 dev->power.status = DPM_ON;
580 mutex_unlock(&dpm_list_mtx);
582 device_complete(dev, state);
583 pm_runtime_put_noidle(dev);
585 mutex_lock(&dpm_list_mtx);
587 if (!list_empty(&dev->power.entry))
588 list_move(&dev->power.entry, &list);
591 list_splice(&list, &dpm_list);
592 mutex_unlock(&dpm_list_mtx);
596 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
597 * @state: PM transition of the system being carried out.
599 * Execute "resume" callbacks for all devices and complete the PM transition of
602 void dpm_resume_end(pm_message_t state)
608 EXPORT_SYMBOL_GPL(dpm_resume_end);
611 /*------------------------- Suspend routines -------------------------*/
614 * resume_event - Return a "resume" message for given "suspend" sleep state.
615 * @sleep_state: PM message representing a sleep state.
617 * Return a PM message representing the resume event corresponding to given
620 static pm_message_t resume_event(pm_message_t sleep_state)
622 switch (sleep_state.event) {
623 case PM_EVENT_SUSPEND:
625 case PM_EVENT_FREEZE:
626 case PM_EVENT_QUIESCE:
628 case PM_EVENT_HIBERNATE:
635 * device_suspend_noirq - Execute a "late suspend" callback for given device.
636 * @dev: Device to handle.
637 * @state: PM transition of the system being carried out.
639 * The driver of @dev will not receive interrupts while this function is being
642 static int device_suspend_noirq(struct device *dev, pm_message_t state)
650 pm_dev_dbg(dev, state, "LATE ");
651 error = pm_noirq_op(dev, dev->bus->pm, state);
657 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
658 * @state: PM transition of the system being carried out.
660 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
661 * handlers for all non-sysdev devices.
663 int dpm_suspend_noirq(pm_message_t state)
668 suspend_device_irqs();
669 mutex_lock(&dpm_list_mtx);
670 list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
671 error = device_suspend_noirq(dev, state);
673 pm_dev_err(dev, state, " late", error);
676 dev->power.status = DPM_OFF_IRQ;
678 mutex_unlock(&dpm_list_mtx);
680 dpm_resume_noirq(resume_event(state));
683 EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
686 * device_suspend - Execute "suspend" callbacks for given device.
687 * @dev: Device to handle.
688 * @state: PM transition of the system being carried out.
690 static int device_suspend(struct device *dev, pm_message_t state)
697 if (dev->class->pm) {
698 pm_dev_dbg(dev, state, "class ");
699 error = pm_op(dev, dev->class->pm, state);
700 } else if (dev->class->suspend) {
701 pm_dev_dbg(dev, state, "legacy class ");
702 error = dev->class->suspend(dev, state);
703 suspend_report_result(dev->class->suspend, error);
711 pm_dev_dbg(dev, state, "type ");
712 error = pm_op(dev, dev->type->pm, state);
720 pm_dev_dbg(dev, state, "");
721 error = pm_op(dev, dev->bus->pm, state);
722 } else if (dev->bus->suspend) {
723 pm_dev_dbg(dev, state, "legacy ");
724 error = dev->bus->suspend(dev, state);
725 suspend_report_result(dev->bus->suspend, error);
735 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
736 * @state: PM transition of the system being carried out.
738 static int dpm_suspend(pm_message_t state)
740 struct list_head list;
743 INIT_LIST_HEAD(&list);
744 mutex_lock(&dpm_list_mtx);
745 while (!list_empty(&dpm_list)) {
746 struct device *dev = to_device(dpm_list.prev);
749 mutex_unlock(&dpm_list_mtx);
752 error = device_suspend(dev, state);
755 mutex_lock(&dpm_list_mtx);
757 pm_dev_err(dev, state, "", error);
761 dev->power.status = DPM_OFF;
762 if (!list_empty(&dev->power.entry))
763 list_move(&dev->power.entry, &list);
766 list_splice(&list, dpm_list.prev);
767 mutex_unlock(&dpm_list_mtx);
772 * device_prepare - Prepare a device for system power transition.
773 * @dev: Device to handle.
774 * @state: PM transition of the system being carried out.
776 * Execute the ->prepare() callback(s) for given device. No new children of the
777 * device may be registered after this function has returned.
779 static int device_prepare(struct device *dev, pm_message_t state)
785 if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
786 pm_dev_dbg(dev, state, "preparing ");
787 error = dev->bus->pm->prepare(dev);
788 suspend_report_result(dev->bus->pm->prepare, error);
793 if (dev->type && dev->type->pm && dev->type->pm->prepare) {
794 pm_dev_dbg(dev, state, "preparing type ");
795 error = dev->type->pm->prepare(dev);
796 suspend_report_result(dev->type->pm->prepare, error);
801 if (dev->class && dev->class->pm && dev->class->pm->prepare) {
802 pm_dev_dbg(dev, state, "preparing class ");
803 error = dev->class->pm->prepare(dev);
804 suspend_report_result(dev->class->pm->prepare, error);
813 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
814 * @state: PM transition of the system being carried out.
816 * Execute the ->prepare() callback(s) for all devices.
818 static int dpm_prepare(pm_message_t state)
820 struct list_head list;
823 INIT_LIST_HEAD(&list);
824 mutex_lock(&dpm_list_mtx);
825 transition_started = true;
826 while (!list_empty(&dpm_list)) {
827 struct device *dev = to_device(dpm_list.next);
830 dev->power.status = DPM_PREPARING;
831 mutex_unlock(&dpm_list_mtx);
833 pm_runtime_get_noresume(dev);
834 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
835 /* Wake-up requested during system sleep transition. */
836 pm_runtime_put_noidle(dev);
839 error = device_prepare(dev, state);
842 mutex_lock(&dpm_list_mtx);
844 dev->power.status = DPM_ON;
845 if (error == -EAGAIN) {
850 printk(KERN_ERR "PM: Failed to prepare device %s "
851 "for power transition: error %d\n",
852 kobject_name(&dev->kobj), error);
856 dev->power.status = DPM_SUSPENDING;
857 if (!list_empty(&dev->power.entry))
858 list_move_tail(&dev->power.entry, &list);
861 list_splice(&list, &dpm_list);
862 mutex_unlock(&dpm_list_mtx);
867 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
868 * @state: PM transition of the system being carried out.
870 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
871 * callbacks for them.
873 int dpm_suspend_start(pm_message_t state)
878 error = dpm_prepare(state);
880 error = dpm_suspend(state);
883 EXPORT_SYMBOL_GPL(dpm_suspend_start);
885 void __suspend_report_result(const char *function, void *fn, int ret)
888 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
890 EXPORT_SYMBOL_GPL(__suspend_report_result);