2 * drivers/base/power/main.c - Where the driver meets power management.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
7 * This file is released under the GPLv2
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will initialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/mutex.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/resume-trace.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/async.h>
29 #include <linux/suspend.h>
30 #include <linux/timer.h>
36 * The entries in the dpm_list list are in a depth first order, simply
37 * because children are guaranteed to be discovered after parents, and
38 * are inserted at the back of the list on discovery.
40 * Since device_pm_add() may be called with a device lock held,
41 * we must never try to acquire a device lock while holding
46 LIST_HEAD(dpm_prepared_list);
47 LIST_HEAD(dpm_suspended_list);
48 LIST_HEAD(dpm_noirq_list);
50 static DEFINE_MUTEX(dpm_list_mtx);
51 static pm_message_t pm_transition;
53 static void dpm_drv_timeout(unsigned long data);
54 struct dpm_drv_wd_data {
56 struct task_struct *tsk;
59 static int async_error;
62 * device_pm_init - Initialize the PM-related part of a device object.
63 * @dev: Device object being initialized.
65 void device_pm_init(struct device *dev)
67 dev->power.is_prepared = false;
68 dev->power.is_suspended = false;
69 init_completion(&dev->power.completion);
70 complete_all(&dev->power.completion);
71 dev->power.wakeup = NULL;
72 spin_lock_init(&dev->power.lock);
74 INIT_LIST_HEAD(&dev->power.entry);
78 * device_pm_lock - Lock the list of active devices used by the PM core.
80 void device_pm_lock(void)
82 mutex_lock(&dpm_list_mtx);
86 * device_pm_unlock - Unlock the list of active devices used by the PM core.
88 void device_pm_unlock(void)
90 mutex_unlock(&dpm_list_mtx);
94 * device_pm_add - Add a device to the PM core's list of active devices.
95 * @dev: Device to add to the list.
97 void device_pm_add(struct device *dev)
99 pr_debug("PM: Adding info for %s:%s\n",
100 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
101 mutex_lock(&dpm_list_mtx);
102 if (dev->parent && dev->parent->power.is_prepared)
103 dev_warn(dev, "parent %s should not be sleeping\n",
104 dev_name(dev->parent));
105 list_add_tail(&dev->power.entry, &dpm_list);
106 mutex_unlock(&dpm_list_mtx);
110 * device_pm_remove - Remove a device from the PM core's list of active devices.
111 * @dev: Device to be removed from the list.
113 void device_pm_remove(struct device *dev)
115 pr_debug("PM: Removing info for %s:%s\n",
116 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
117 complete_all(&dev->power.completion);
118 mutex_lock(&dpm_list_mtx);
119 list_del_init(&dev->power.entry);
120 mutex_unlock(&dpm_list_mtx);
121 device_wakeup_disable(dev);
122 pm_runtime_remove(dev);
126 * device_pm_move_before - Move device in the PM core's list of active devices.
127 * @deva: Device to move in dpm_list.
128 * @devb: Device @deva should come before.
130 void device_pm_move_before(struct device *deva, struct device *devb)
132 pr_debug("PM: Moving %s:%s before %s:%s\n",
133 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
134 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
135 /* Delete deva from dpm_list and reinsert before devb. */
136 list_move_tail(&deva->power.entry, &devb->power.entry);
140 * device_pm_move_after - Move device in the PM core's list of active devices.
141 * @deva: Device to move in dpm_list.
142 * @devb: Device @deva should come after.
144 void device_pm_move_after(struct device *deva, struct device *devb)
146 pr_debug("PM: Moving %s:%s after %s:%s\n",
147 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
148 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
149 /* Delete deva from dpm_list and reinsert after devb. */
150 list_move(&deva->power.entry, &devb->power.entry);
154 * device_pm_move_last - Move device to end of the PM core's list of devices.
155 * @dev: Device to move in dpm_list.
157 void device_pm_move_last(struct device *dev)
159 pr_debug("PM: Moving %s:%s to end of list\n",
160 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
161 list_move_tail(&dev->power.entry, &dpm_list);
164 static ktime_t initcall_debug_start(struct device *dev)
166 ktime_t calltime = ktime_set(0, 0);
168 if (initcall_debug) {
169 pr_info("calling %s+ @ %i\n",
170 dev_name(dev), task_pid_nr(current));
171 calltime = ktime_get();
177 static void initcall_debug_report(struct device *dev, ktime_t calltime,
180 ktime_t delta, rettime;
182 if (initcall_debug) {
183 rettime = ktime_get();
184 delta = ktime_sub(rettime, calltime);
185 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
186 error, (unsigned long long)ktime_to_ns(delta) >> 10);
191 * dpm_wait - Wait for a PM operation to complete.
192 * @dev: Device to wait for.
193 * @async: If unset, wait only if the device's power.async_suspend flag is set.
195 static void dpm_wait(struct device *dev, bool async)
200 if (async || (pm_async_enabled && dev->power.async_suspend))
201 wait_for_completion(&dev->power.completion);
204 static int dpm_wait_fn(struct device *dev, void *async_ptr)
206 dpm_wait(dev, *((bool *)async_ptr));
210 static void dpm_wait_for_children(struct device *dev, bool async)
212 device_for_each_child(dev, &async, dpm_wait_fn);
216 * pm_op - Execute the PM operation appropriate for given PM event.
217 * @dev: Device to handle.
218 * @ops: PM operations to choose from.
219 * @state: PM transition of the system being carried out.
221 static int pm_op(struct device *dev,
222 const struct dev_pm_ops *ops,
228 calltime = initcall_debug_start(dev);
230 switch (state.event) {
231 #ifdef CONFIG_SUSPEND
232 case PM_EVENT_SUSPEND:
234 error = ops->suspend(dev);
235 suspend_report_result(ops->suspend, error);
238 case PM_EVENT_RESUME:
240 error = ops->resume(dev);
241 suspend_report_result(ops->resume, error);
244 #endif /* CONFIG_SUSPEND */
245 #ifdef CONFIG_HIBERNATE_CALLBACKS
246 case PM_EVENT_FREEZE:
247 case PM_EVENT_QUIESCE:
249 error = ops->freeze(dev);
250 suspend_report_result(ops->freeze, error);
253 case PM_EVENT_HIBERNATE:
255 error = ops->poweroff(dev);
256 suspend_report_result(ops->poweroff, error);
260 case PM_EVENT_RECOVER:
262 error = ops->thaw(dev);
263 suspend_report_result(ops->thaw, error);
266 case PM_EVENT_RESTORE:
268 error = ops->restore(dev);
269 suspend_report_result(ops->restore, error);
272 #endif /* CONFIG_HIBERNATE_CALLBACKS */
277 initcall_debug_report(dev, calltime, error);
283 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
284 * @dev: Device to handle.
285 * @ops: PM operations to choose from.
286 * @state: PM transition of the system being carried out.
288 * The driver of @dev will not receive interrupts while this function is being
291 static int pm_noirq_op(struct device *dev,
292 const struct dev_pm_ops *ops,
296 ktime_t calltime = ktime_set(0, 0), delta, rettime;
298 if (initcall_debug) {
299 pr_info("calling %s+ @ %i, parent: %s\n",
300 dev_name(dev), task_pid_nr(current),
301 dev->parent ? dev_name(dev->parent) : "none");
302 calltime = ktime_get();
305 switch (state.event) {
306 #ifdef CONFIG_SUSPEND
307 case PM_EVENT_SUSPEND:
308 if (ops->suspend_noirq) {
309 error = ops->suspend_noirq(dev);
310 suspend_report_result(ops->suspend_noirq, error);
313 case PM_EVENT_RESUME:
314 if (ops->resume_noirq) {
315 error = ops->resume_noirq(dev);
316 suspend_report_result(ops->resume_noirq, error);
319 #endif /* CONFIG_SUSPEND */
320 #ifdef CONFIG_HIBERNATE_CALLBACKS
321 case PM_EVENT_FREEZE:
322 case PM_EVENT_QUIESCE:
323 if (ops->freeze_noirq) {
324 error = ops->freeze_noirq(dev);
325 suspend_report_result(ops->freeze_noirq, error);
328 case PM_EVENT_HIBERNATE:
329 if (ops->poweroff_noirq) {
330 error = ops->poweroff_noirq(dev);
331 suspend_report_result(ops->poweroff_noirq, error);
335 case PM_EVENT_RECOVER:
336 if (ops->thaw_noirq) {
337 error = ops->thaw_noirq(dev);
338 suspend_report_result(ops->thaw_noirq, error);
341 case PM_EVENT_RESTORE:
342 if (ops->restore_noirq) {
343 error = ops->restore_noirq(dev);
344 suspend_report_result(ops->restore_noirq, error);
347 #endif /* CONFIG_HIBERNATE_CALLBACKS */
352 if (initcall_debug) {
353 rettime = ktime_get();
354 delta = ktime_sub(rettime, calltime);
355 printk("initcall %s_i+ returned %d after %Ld usecs\n",
356 dev_name(dev), error,
357 (unsigned long long)ktime_to_ns(delta) >> 10);
363 static char *pm_verb(int event)
366 case PM_EVENT_SUSPEND:
368 case PM_EVENT_RESUME:
370 case PM_EVENT_FREEZE:
372 case PM_EVENT_QUIESCE:
374 case PM_EVENT_HIBERNATE:
378 case PM_EVENT_RESTORE:
380 case PM_EVENT_RECOVER:
383 return "(unknown PM event)";
387 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
389 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
390 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
391 ", may wakeup" : "");
394 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
397 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
398 dev_name(dev), pm_verb(state.event), info, error);
401 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
407 calltime = ktime_get();
408 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
409 do_div(usecs64, NSEC_PER_USEC);
413 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
414 info ?: "", info ? " " : "", pm_verb(state.event),
415 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
418 /*------------------------- Resume routines -------------------------*/
421 * device_resume_noirq - Execute an "early resume" callback for given device.
422 * @dev: Device to handle.
423 * @state: PM transition of the system being carried out.
425 * The driver of @dev will not receive interrupts while this function is being
428 static int device_resume_noirq(struct device *dev, pm_message_t state)
435 if (dev->pwr_domain) {
436 pm_dev_dbg(dev, state, "EARLY power domain ");
437 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
438 } else if (dev->type && dev->type->pm) {
439 pm_dev_dbg(dev, state, "EARLY type ");
440 error = pm_noirq_op(dev, dev->type->pm, state);
441 } else if (dev->class && dev->class->pm) {
442 pm_dev_dbg(dev, state, "EARLY class ");
443 error = pm_noirq_op(dev, dev->class->pm, state);
444 } else if (dev->bus && dev->bus->pm) {
445 pm_dev_dbg(dev, state, "EARLY ");
446 error = pm_noirq_op(dev, dev->bus->pm, state);
454 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
455 * @state: PM transition of the system being carried out.
457 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
458 * enable device drivers to receive interrupts.
460 void dpm_resume_noirq(pm_message_t state)
462 ktime_t starttime = ktime_get();
464 mutex_lock(&dpm_list_mtx);
465 while (!list_empty(&dpm_noirq_list)) {
466 struct device *dev = to_device(dpm_noirq_list.next);
470 list_move_tail(&dev->power.entry, &dpm_suspended_list);
471 mutex_unlock(&dpm_list_mtx);
473 error = device_resume_noirq(dev, state);
475 pm_dev_err(dev, state, " early", error);
477 mutex_lock(&dpm_list_mtx);
480 mutex_unlock(&dpm_list_mtx);
481 dpm_show_time(starttime, state, "early");
482 resume_device_irqs();
484 EXPORT_SYMBOL_GPL(dpm_resume_noirq);
487 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
488 * @dev: Device to resume.
489 * @cb: Resume callback to execute.
491 static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
496 calltime = initcall_debug_start(dev);
499 suspend_report_result(cb, error);
501 initcall_debug_report(dev, calltime, error);
507 * device_resume - Execute "resume" callbacks for given device.
508 * @dev: Device to handle.
509 * @state: PM transition of the system being carried out.
510 * @async: If true, the device is being resumed asynchronously.
512 static int device_resume(struct device *dev, pm_message_t state, bool async)
519 dpm_wait(dev->parent, async);
523 * This is a fib. But we'll allow new children to be added below
524 * a resumed device, even if the device hasn't been completed yet.
526 dev->power.is_prepared = false;
528 if (!dev->power.is_suspended)
531 if (dev->pwr_domain) {
532 pm_dev_dbg(dev, state, "power domain ");
533 error = pm_op(dev, &dev->pwr_domain->ops, state);
537 if (dev->type && dev->type->pm) {
538 pm_dev_dbg(dev, state, "type ");
539 error = pm_op(dev, dev->type->pm, state);
544 if (dev->class->pm) {
545 pm_dev_dbg(dev, state, "class ");
546 error = pm_op(dev, dev->class->pm, state);
548 } else if (dev->class->resume) {
549 pm_dev_dbg(dev, state, "legacy class ");
550 error = legacy_resume(dev, dev->class->resume);
557 pm_dev_dbg(dev, state, "");
558 error = pm_op(dev, dev->bus->pm, state);
559 } else if (dev->bus->resume) {
560 pm_dev_dbg(dev, state, "legacy ");
561 error = legacy_resume(dev, dev->bus->resume);
566 dev->power.is_suspended = false;
570 complete_all(&dev->power.completion);
576 static void async_resume(void *data, async_cookie_t cookie)
578 struct device *dev = (struct device *)data;
581 error = device_resume(dev, pm_transition, true);
583 pm_dev_err(dev, pm_transition, " async", error);
587 static bool is_async(struct device *dev)
589 return dev->power.async_suspend && pm_async_enabled
590 && !pm_trace_is_enabled();
594 * dpm_drv_timeout - Driver suspend / resume watchdog handler
595 * @data: struct device which timed out
597 * Called when a driver has timed out suspending or resuming.
598 * There's not much we can do here to recover so
599 * BUG() out for a crash-dump
602 static void dpm_drv_timeout(unsigned long data)
604 struct dpm_drv_wd_data *wd_data = (void *)data;
605 struct device *dev = wd_data->dev;
606 struct task_struct *tsk = wd_data->tsk;
608 printk(KERN_EMERG "**** DPM device timeout: %s (%s)\n", dev_name(dev),
609 (dev->driver ? dev->driver->name : "no driver"));
611 printk(KERN_EMERG "dpm suspend stack:\n");
612 show_stack(tsk, NULL);
618 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
619 * @state: PM transition of the system being carried out.
621 * Execute the appropriate "resume" callback for all devices whose status
622 * indicates that they are suspended.
624 void dpm_resume(pm_message_t state)
627 ktime_t starttime = ktime_get();
631 mutex_lock(&dpm_list_mtx);
632 pm_transition = state;
635 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
636 INIT_COMPLETION(dev->power.completion);
639 async_schedule(async_resume, dev);
643 while (!list_empty(&dpm_suspended_list)) {
644 dev = to_device(dpm_suspended_list.next);
646 if (!is_async(dev)) {
649 mutex_unlock(&dpm_list_mtx);
651 error = device_resume(dev, state, false);
653 pm_dev_err(dev, state, "", error);
655 mutex_lock(&dpm_list_mtx);
657 if (!list_empty(&dev->power.entry))
658 list_move_tail(&dev->power.entry, &dpm_prepared_list);
661 mutex_unlock(&dpm_list_mtx);
662 async_synchronize_full();
663 dpm_show_time(starttime, state, NULL);
667 * device_complete - Complete a PM transition for given device.
668 * @dev: Device to handle.
669 * @state: PM transition of the system being carried out.
671 static void device_complete(struct device *dev, pm_message_t state)
675 if (dev->pwr_domain) {
676 pm_dev_dbg(dev, state, "completing power domain ");
677 if (dev->pwr_domain->ops.complete)
678 dev->pwr_domain->ops.complete(dev);
679 } else if (dev->type && dev->type->pm) {
680 pm_dev_dbg(dev, state, "completing type ");
681 if (dev->type->pm->complete)
682 dev->type->pm->complete(dev);
683 } else if (dev->class && dev->class->pm) {
684 pm_dev_dbg(dev, state, "completing class ");
685 if (dev->class->pm->complete)
686 dev->class->pm->complete(dev);
687 } else if (dev->bus && dev->bus->pm) {
688 pm_dev_dbg(dev, state, "completing ");
689 if (dev->bus->pm->complete)
690 dev->bus->pm->complete(dev);
697 * dpm_complete - Complete a PM transition for all non-sysdev devices.
698 * @state: PM transition of the system being carried out.
700 * Execute the ->complete() callbacks for all devices whose PM status is not
701 * DPM_ON (this allows new devices to be registered).
703 void dpm_complete(pm_message_t state)
705 struct list_head list;
709 INIT_LIST_HEAD(&list);
710 mutex_lock(&dpm_list_mtx);
711 while (!list_empty(&dpm_prepared_list)) {
712 struct device *dev = to_device(dpm_prepared_list.prev);
715 dev->power.is_prepared = false;
716 list_move(&dev->power.entry, &list);
717 mutex_unlock(&dpm_list_mtx);
719 device_complete(dev, state);
721 mutex_lock(&dpm_list_mtx);
724 list_splice(&list, &dpm_list);
725 mutex_unlock(&dpm_list_mtx);
729 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
730 * @state: PM transition of the system being carried out.
732 * Execute "resume" callbacks for all devices and complete the PM transition of
735 void dpm_resume_end(pm_message_t state)
740 EXPORT_SYMBOL_GPL(dpm_resume_end);
743 /*------------------------- Suspend routines -------------------------*/
746 * resume_event - Return a "resume" message for given "suspend" sleep state.
747 * @sleep_state: PM message representing a sleep state.
749 * Return a PM message representing the resume event corresponding to given
752 static pm_message_t resume_event(pm_message_t sleep_state)
754 switch (sleep_state.event) {
755 case PM_EVENT_SUSPEND:
757 case PM_EVENT_FREEZE:
758 case PM_EVENT_QUIESCE:
760 case PM_EVENT_HIBERNATE:
767 * device_suspend_noirq - Execute a "late suspend" callback for given device.
768 * @dev: Device to handle.
769 * @state: PM transition of the system being carried out.
771 * The driver of @dev will not receive interrupts while this function is being
774 static int device_suspend_noirq(struct device *dev, pm_message_t state)
778 if (dev->pwr_domain) {
779 pm_dev_dbg(dev, state, "LATE power domain ");
780 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
783 } else if (dev->type && dev->type->pm) {
784 pm_dev_dbg(dev, state, "LATE type ");
785 error = pm_noirq_op(dev, dev->type->pm, state);
788 } else if (dev->class && dev->class->pm) {
789 pm_dev_dbg(dev, state, "LATE class ");
790 error = pm_noirq_op(dev, dev->class->pm, state);
793 } else if (dev->bus && dev->bus->pm) {
794 pm_dev_dbg(dev, state, "LATE ");
795 error = pm_noirq_op(dev, dev->bus->pm, state);
804 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
805 * @state: PM transition of the system being carried out.
807 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
808 * handlers for all non-sysdev devices.
810 int dpm_suspend_noirq(pm_message_t state)
812 ktime_t starttime = ktime_get();
815 suspend_device_irqs();
816 mutex_lock(&dpm_list_mtx);
817 while (!list_empty(&dpm_suspended_list)) {
818 struct device *dev = to_device(dpm_suspended_list.prev);
821 mutex_unlock(&dpm_list_mtx);
823 error = device_suspend_noirq(dev, state);
825 mutex_lock(&dpm_list_mtx);
827 pm_dev_err(dev, state, " late", error);
831 if (!list_empty(&dev->power.entry))
832 list_move(&dev->power.entry, &dpm_noirq_list);
835 mutex_unlock(&dpm_list_mtx);
837 dpm_resume_noirq(resume_event(state));
839 dpm_show_time(starttime, state, "late");
842 EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
845 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
846 * @dev: Device to suspend.
847 * @state: PM transition of the system being carried out.
848 * @cb: Suspend callback to execute.
850 static int legacy_suspend(struct device *dev, pm_message_t state,
851 int (*cb)(struct device *dev, pm_message_t state))
856 calltime = initcall_debug_start(dev);
858 error = cb(dev, state);
859 suspend_report_result(cb, error);
861 initcall_debug_report(dev, calltime, error);
867 * device_suspend - Execute "suspend" callbacks for given device.
868 * @dev: Device to handle.
869 * @state: PM transition of the system being carried out.
870 * @async: If true, the device is being suspended asynchronously.
872 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
875 struct timer_list timer;
876 struct dpm_drv_wd_data data;
878 dpm_wait_for_children(dev, async);
881 data.tsk = get_current();
882 init_timer_on_stack(&timer);
883 timer.expires = jiffies + HZ * 12;
884 timer.function = dpm_drv_timeout;
885 timer.data = (unsigned long)&data;
893 if (pm_wakeup_pending()) {
894 async_error = -EBUSY;
898 if (dev->pwr_domain) {
899 pm_dev_dbg(dev, state, "power domain ");
900 error = pm_op(dev, &dev->pwr_domain->ops, state);
904 if (dev->type && dev->type->pm) {
905 pm_dev_dbg(dev, state, "type ");
906 error = pm_op(dev, dev->type->pm, state);
911 if (dev->class->pm) {
912 pm_dev_dbg(dev, state, "class ");
913 error = pm_op(dev, dev->class->pm, state);
915 } else if (dev->class->suspend) {
916 pm_dev_dbg(dev, state, "legacy class ");
917 error = legacy_suspend(dev, state, dev->class->suspend);
924 pm_dev_dbg(dev, state, "");
925 error = pm_op(dev, dev->bus->pm, state);
926 } else if (dev->bus->suspend) {
927 pm_dev_dbg(dev, state, "legacy ");
928 error = legacy_suspend(dev, state, dev->bus->suspend);
933 dev->power.is_suspended = !error;
938 del_timer_sync(&timer);
939 destroy_timer_on_stack(&timer);
941 complete_all(&dev->power.completion);
949 static void async_suspend(void *data, async_cookie_t cookie)
951 struct device *dev = (struct device *)data;
954 error = __device_suspend(dev, pm_transition, true);
956 pm_dev_err(dev, pm_transition, " async", error);
961 static int device_suspend(struct device *dev)
963 INIT_COMPLETION(dev->power.completion);
965 if (pm_async_enabled && dev->power.async_suspend) {
967 async_schedule(async_suspend, dev);
971 return __device_suspend(dev, pm_transition, false);
975 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
976 * @state: PM transition of the system being carried out.
978 int dpm_suspend(pm_message_t state)
980 ktime_t starttime = ktime_get();
985 mutex_lock(&dpm_list_mtx);
986 pm_transition = state;
988 while (!list_empty(&dpm_prepared_list)) {
989 struct device *dev = to_device(dpm_prepared_list.prev);
992 mutex_unlock(&dpm_list_mtx);
994 error = device_suspend(dev);
996 mutex_lock(&dpm_list_mtx);
998 pm_dev_err(dev, state, "", error);
1002 if (!list_empty(&dev->power.entry))
1003 list_move(&dev->power.entry, &dpm_suspended_list);
1008 mutex_unlock(&dpm_list_mtx);
1009 async_synchronize_full();
1011 error = async_error;
1013 dpm_show_time(starttime, state, NULL);
1018 * device_prepare - Prepare a device for system power transition.
1019 * @dev: Device to handle.
1020 * @state: PM transition of the system being carried out.
1022 * Execute the ->prepare() callback(s) for given device. No new children of the
1023 * device may be registered after this function has returned.
1025 static int device_prepare(struct device *dev, pm_message_t state)
1031 if (dev->pwr_domain) {
1032 pm_dev_dbg(dev, state, "preparing power domain ");
1033 if (dev->pwr_domain->ops.prepare)
1034 error = dev->pwr_domain->ops.prepare(dev);
1035 suspend_report_result(dev->pwr_domain->ops.prepare, error);
1038 } else if (dev->type && dev->type->pm) {
1039 pm_dev_dbg(dev, state, "preparing type ");
1040 if (dev->type->pm->prepare)
1041 error = dev->type->pm->prepare(dev);
1042 suspend_report_result(dev->type->pm->prepare, error);
1045 } else if (dev->class && dev->class->pm) {
1046 pm_dev_dbg(dev, state, "preparing class ");
1047 if (dev->class->pm->prepare)
1048 error = dev->class->pm->prepare(dev);
1049 suspend_report_result(dev->class->pm->prepare, error);
1052 } else if (dev->bus && dev->bus->pm) {
1053 pm_dev_dbg(dev, state, "preparing ");
1054 if (dev->bus->pm->prepare)
1055 error = dev->bus->pm->prepare(dev);
1056 suspend_report_result(dev->bus->pm->prepare, error);
1066 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1067 * @state: PM transition of the system being carried out.
1069 * Execute the ->prepare() callback(s) for all devices.
1071 int dpm_prepare(pm_message_t state)
1077 mutex_lock(&dpm_list_mtx);
1078 while (!list_empty(&dpm_list)) {
1079 struct device *dev = to_device(dpm_list.next);
1082 mutex_unlock(&dpm_list_mtx);
1084 pm_runtime_get_noresume(dev);
1085 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1086 pm_wakeup_event(dev, 0);
1088 pm_runtime_put_sync(dev);
1089 error = pm_wakeup_pending() ?
1090 -EBUSY : device_prepare(dev, state);
1092 mutex_lock(&dpm_list_mtx);
1094 if (error == -EAGAIN) {
1099 printk(KERN_INFO "PM: Device %s not prepared "
1100 "for power transition: code %d\n",
1101 dev_name(dev), error);
1105 dev->power.is_prepared = true;
1106 if (!list_empty(&dev->power.entry))
1107 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1110 mutex_unlock(&dpm_list_mtx);
1115 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1116 * @state: PM transition of the system being carried out.
1118 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1119 * callbacks for them.
1121 int dpm_suspend_start(pm_message_t state)
1125 error = dpm_prepare(state);
1127 error = dpm_suspend(state);
1130 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1132 void __suspend_report_result(const char *function, void *fn, int ret)
1135 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1137 EXPORT_SYMBOL_GPL(__suspend_report_result);
1140 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1141 * @dev: Device to wait for.
1142 * @subordinate: Device that needs to wait for @dev.
1144 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1146 dpm_wait(dev, subordinate->power.async_suspend);
1149 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);