2 * drivers/base/power/main.c - Where the driver meets power management.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
7 * This file is released under the GPLv2
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will initialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/resume-trace.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/async.h>
30 #include <linux/suspend.h>
31 #include <trace/events/power.h>
32 #include <linux/cpufreq.h>
33 #include <linux/cpuidle.h>
37 typedef int (*pm_callback_t)(struct device *);
40 * The entries in the dpm_list list are in a depth first order, simply
41 * because children are guaranteed to be discovered after parents, and
42 * are inserted at the back of the list on discovery.
44 * Since device_pm_add() may be called with a device lock held,
45 * we must never try to acquire a device lock while holding
50 static LIST_HEAD(dpm_prepared_list);
51 static LIST_HEAD(dpm_suspended_list);
52 static LIST_HEAD(dpm_late_early_list);
53 static LIST_HEAD(dpm_noirq_list);
55 struct suspend_stats suspend_stats;
56 static DEFINE_MUTEX(dpm_list_mtx);
57 static pm_message_t pm_transition;
59 static int async_error;
62 * device_pm_sleep_init - Initialize system suspend-related device fields.
63 * @dev: Device object being initialized.
65 void device_pm_sleep_init(struct device *dev)
67 dev->power.is_prepared = false;
68 dev->power.is_suspended = false;
69 init_completion(&dev->power.completion);
70 complete_all(&dev->power.completion);
71 dev->power.wakeup = NULL;
72 INIT_LIST_HEAD(&dev->power.entry);
76 * device_pm_lock - Lock the list of active devices used by the PM core.
78 void device_pm_lock(void)
80 mutex_lock(&dpm_list_mtx);
84 * device_pm_unlock - Unlock the list of active devices used by the PM core.
86 void device_pm_unlock(void)
88 mutex_unlock(&dpm_list_mtx);
92 * device_pm_add - Add a device to the PM core's list of active devices.
93 * @dev: Device to add to the list.
95 void device_pm_add(struct device *dev)
97 pr_debug("PM: Adding info for %s:%s\n",
98 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
99 mutex_lock(&dpm_list_mtx);
100 if (dev->parent && dev->parent->power.is_prepared)
101 dev_warn(dev, "parent %s should not be sleeping\n",
102 dev_name(dev->parent));
103 list_add_tail(&dev->power.entry, &dpm_list);
104 mutex_unlock(&dpm_list_mtx);
108 * device_pm_remove - Remove a device from the PM core's list of active devices.
109 * @dev: Device to be removed from the list.
111 void device_pm_remove(struct device *dev)
113 pr_debug("PM: Removing info for %s:%s\n",
114 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
115 complete_all(&dev->power.completion);
116 mutex_lock(&dpm_list_mtx);
117 list_del_init(&dev->power.entry);
118 mutex_unlock(&dpm_list_mtx);
119 device_wakeup_disable(dev);
120 pm_runtime_remove(dev);
124 * device_pm_move_before - Move device in the PM core's list of active devices.
125 * @deva: Device to move in dpm_list.
126 * @devb: Device @deva should come before.
128 void device_pm_move_before(struct device *deva, struct device *devb)
130 pr_debug("PM: Moving %s:%s before %s:%s\n",
131 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
132 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
133 /* Delete deva from dpm_list and reinsert before devb. */
134 list_move_tail(&deva->power.entry, &devb->power.entry);
138 * device_pm_move_after - Move device in the PM core's list of active devices.
139 * @deva: Device to move in dpm_list.
140 * @devb: Device @deva should come after.
142 void device_pm_move_after(struct device *deva, struct device *devb)
144 pr_debug("PM: Moving %s:%s after %s:%s\n",
145 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
146 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
147 /* Delete deva from dpm_list and reinsert after devb. */
148 list_move(&deva->power.entry, &devb->power.entry);
152 * device_pm_move_last - Move device to end of the PM core's list of devices.
153 * @dev: Device to move in dpm_list.
155 void device_pm_move_last(struct device *dev)
157 pr_debug("PM: Moving %s:%s to end of list\n",
158 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
159 list_move_tail(&dev->power.entry, &dpm_list);
162 static ktime_t initcall_debug_start(struct device *dev)
164 ktime_t calltime = ktime_set(0, 0);
166 if (pm_print_times_enabled) {
167 pr_info("calling %s+ @ %i, parent: %s\n",
168 dev_name(dev), task_pid_nr(current),
169 dev->parent ? dev_name(dev->parent) : "none");
170 calltime = ktime_get();
176 static void initcall_debug_report(struct device *dev, ktime_t calltime,
179 ktime_t delta, rettime;
181 if (pm_print_times_enabled) {
182 rettime = ktime_get();
183 delta = ktime_sub(rettime, calltime);
184 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
185 error, (unsigned long long)ktime_to_ns(delta) >> 10);
190 * dpm_wait - Wait for a PM operation to complete.
191 * @dev: Device to wait for.
192 * @async: If unset, wait only if the device's power.async_suspend flag is set.
194 static void dpm_wait(struct device *dev, bool async)
199 if (async || (pm_async_enabled && dev->power.async_suspend))
200 wait_for_completion(&dev->power.completion);
203 static int dpm_wait_fn(struct device *dev, void *async_ptr)
205 dpm_wait(dev, *((bool *)async_ptr));
209 static void dpm_wait_for_children(struct device *dev, bool async)
211 device_for_each_child(dev, &async, dpm_wait_fn);
215 * pm_op - Return the PM operation appropriate for given PM event.
216 * @ops: PM operations to choose from.
217 * @state: PM transition of the system being carried out.
219 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
221 switch (state.event) {
222 #ifdef CONFIG_SUSPEND
223 case PM_EVENT_SUSPEND:
225 case PM_EVENT_RESUME:
227 #endif /* CONFIG_SUSPEND */
228 #ifdef CONFIG_HIBERNATE_CALLBACKS
229 case PM_EVENT_FREEZE:
230 case PM_EVENT_QUIESCE:
232 case PM_EVENT_HIBERNATE:
233 return ops->poweroff;
235 case PM_EVENT_RECOVER:
238 case PM_EVENT_RESTORE:
240 #endif /* CONFIG_HIBERNATE_CALLBACKS */
247 * pm_late_early_op - Return the PM operation appropriate for given PM event.
248 * @ops: PM operations to choose from.
249 * @state: PM transition of the system being carried out.
251 * Runtime PM is disabled for @dev while this function is being executed.
253 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
256 switch (state.event) {
257 #ifdef CONFIG_SUSPEND
258 case PM_EVENT_SUSPEND:
259 return ops->suspend_late;
260 case PM_EVENT_RESUME:
261 return ops->resume_early;
262 #endif /* CONFIG_SUSPEND */
263 #ifdef CONFIG_HIBERNATE_CALLBACKS
264 case PM_EVENT_FREEZE:
265 case PM_EVENT_QUIESCE:
266 return ops->freeze_late;
267 case PM_EVENT_HIBERNATE:
268 return ops->poweroff_late;
270 case PM_EVENT_RECOVER:
271 return ops->thaw_early;
272 case PM_EVENT_RESTORE:
273 return ops->restore_early;
274 #endif /* CONFIG_HIBERNATE_CALLBACKS */
281 * pm_noirq_op - Return the PM operation appropriate for given PM event.
282 * @ops: PM operations to choose from.
283 * @state: PM transition of the system being carried out.
285 * The driver of @dev will not receive interrupts while this function is being
288 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
290 switch (state.event) {
291 #ifdef CONFIG_SUSPEND
292 case PM_EVENT_SUSPEND:
293 return ops->suspend_noirq;
294 case PM_EVENT_RESUME:
295 return ops->resume_noirq;
296 #endif /* CONFIG_SUSPEND */
297 #ifdef CONFIG_HIBERNATE_CALLBACKS
298 case PM_EVENT_FREEZE:
299 case PM_EVENT_QUIESCE:
300 return ops->freeze_noirq;
301 case PM_EVENT_HIBERNATE:
302 return ops->poweroff_noirq;
304 case PM_EVENT_RECOVER:
305 return ops->thaw_noirq;
306 case PM_EVENT_RESTORE:
307 return ops->restore_noirq;
308 #endif /* CONFIG_HIBERNATE_CALLBACKS */
314 static char *pm_verb(int event)
317 case PM_EVENT_SUSPEND:
319 case PM_EVENT_RESUME:
321 case PM_EVENT_FREEZE:
323 case PM_EVENT_QUIESCE:
325 case PM_EVENT_HIBERNATE:
329 case PM_EVENT_RESTORE:
331 case PM_EVENT_RECOVER:
334 return "(unknown PM event)";
338 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
340 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
341 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
342 ", may wakeup" : "");
345 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
348 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
349 dev_name(dev), pm_verb(state.event), info, error);
352 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
358 calltime = ktime_get();
359 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
360 do_div(usecs64, NSEC_PER_USEC);
364 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
365 info ?: "", info ? " " : "", pm_verb(state.event),
366 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
369 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
370 pm_message_t state, char *info)
378 calltime = initcall_debug_start(dev);
380 pm_dev_dbg(dev, state, info);
382 suspend_report_result(cb, error);
384 initcall_debug_report(dev, calltime, error);
389 /*------------------------- Resume routines -------------------------*/
392 * device_resume_noirq - Execute an "early resume" callback for given device.
393 * @dev: Device to handle.
394 * @state: PM transition of the system being carried out.
396 * The driver of @dev will not receive interrupts while this function is being
399 static int device_resume_noirq(struct device *dev, pm_message_t state)
401 pm_callback_t callback = NULL;
408 if (dev->power.syscore)
411 if (dev->pm_domain) {
412 info = "noirq power domain ";
413 callback = pm_noirq_op(&dev->pm_domain->ops, state);
414 } else if (dev->type && dev->type->pm) {
415 info = "noirq type ";
416 callback = pm_noirq_op(dev->type->pm, state);
417 } else if (dev->class && dev->class->pm) {
418 info = "noirq class ";
419 callback = pm_noirq_op(dev->class->pm, state);
420 } else if (dev->bus && dev->bus->pm) {
422 callback = pm_noirq_op(dev->bus->pm, state);
425 if (!callback && dev->driver && dev->driver->pm) {
426 info = "noirq driver ";
427 callback = pm_noirq_op(dev->driver->pm, state);
430 error = dpm_run_callback(callback, dev, state, info);
438 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
439 * @state: PM transition of the system being carried out.
441 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
442 * enable device drivers to receive interrupts.
444 static void dpm_resume_noirq(pm_message_t state)
446 ktime_t starttime = ktime_get();
448 mutex_lock(&dpm_list_mtx);
449 while (!list_empty(&dpm_noirq_list)) {
450 struct device *dev = to_device(dpm_noirq_list.next);
454 list_move_tail(&dev->power.entry, &dpm_late_early_list);
455 mutex_unlock(&dpm_list_mtx);
457 error = device_resume_noirq(dev, state);
459 suspend_stats.failed_resume_noirq++;
460 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
461 dpm_save_failed_dev(dev_name(dev));
462 pm_dev_err(dev, state, " noirq", error);
465 mutex_lock(&dpm_list_mtx);
468 mutex_unlock(&dpm_list_mtx);
469 dpm_show_time(starttime, state, "noirq");
470 resume_device_irqs();
475 * device_resume_early - Execute an "early resume" callback for given device.
476 * @dev: Device to handle.
477 * @state: PM transition of the system being carried out.
479 * Runtime PM is disabled for @dev while this function is being executed.
481 static int device_resume_early(struct device *dev, pm_message_t state)
483 pm_callback_t callback = NULL;
490 if (dev->power.syscore)
493 if (dev->pm_domain) {
494 info = "early power domain ";
495 callback = pm_late_early_op(&dev->pm_domain->ops, state);
496 } else if (dev->type && dev->type->pm) {
497 info = "early type ";
498 callback = pm_late_early_op(dev->type->pm, state);
499 } else if (dev->class && dev->class->pm) {
500 info = "early class ";
501 callback = pm_late_early_op(dev->class->pm, state);
502 } else if (dev->bus && dev->bus->pm) {
504 callback = pm_late_early_op(dev->bus->pm, state);
507 if (!callback && dev->driver && dev->driver->pm) {
508 info = "early driver ";
509 callback = pm_late_early_op(dev->driver->pm, state);
512 error = dpm_run_callback(callback, dev, state, info);
517 pm_runtime_enable(dev);
522 * dpm_resume_early - Execute "early resume" callbacks for all devices.
523 * @state: PM transition of the system being carried out.
525 static void dpm_resume_early(pm_message_t state)
527 ktime_t starttime = ktime_get();
529 mutex_lock(&dpm_list_mtx);
530 while (!list_empty(&dpm_late_early_list)) {
531 struct device *dev = to_device(dpm_late_early_list.next);
535 list_move_tail(&dev->power.entry, &dpm_suspended_list);
536 mutex_unlock(&dpm_list_mtx);
538 error = device_resume_early(dev, state);
540 suspend_stats.failed_resume_early++;
541 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
542 dpm_save_failed_dev(dev_name(dev));
543 pm_dev_err(dev, state, " early", error);
546 mutex_lock(&dpm_list_mtx);
549 mutex_unlock(&dpm_list_mtx);
550 dpm_show_time(starttime, state, "early");
554 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
555 * @state: PM transition of the system being carried out.
557 void dpm_resume_start(pm_message_t state)
559 dpm_resume_noirq(state);
560 dpm_resume_early(state);
562 EXPORT_SYMBOL_GPL(dpm_resume_start);
565 * device_resume - Execute "resume" callbacks for given device.
566 * @dev: Device to handle.
567 * @state: PM transition of the system being carried out.
568 * @async: If true, the device is being resumed asynchronously.
570 static int device_resume(struct device *dev, pm_message_t state, bool async)
572 pm_callback_t callback = NULL;
579 if (dev->power.syscore)
582 dpm_wait(dev->parent, async);
586 * This is a fib. But we'll allow new children to be added below
587 * a resumed device, even if the device hasn't been completed yet.
589 dev->power.is_prepared = false;
591 if (!dev->power.is_suspended)
594 if (dev->pm_domain) {
595 info = "power domain ";
596 callback = pm_op(&dev->pm_domain->ops, state);
600 if (dev->type && dev->type->pm) {
602 callback = pm_op(dev->type->pm, state);
607 if (dev->class->pm) {
609 callback = pm_op(dev->class->pm, state);
611 } else if (dev->class->resume) {
612 info = "legacy class ";
613 callback = dev->class->resume;
621 callback = pm_op(dev->bus->pm, state);
622 } else if (dev->bus->resume) {
623 info = "legacy bus ";
624 callback = dev->bus->resume;
630 if (!callback && dev->driver && dev->driver->pm) {
632 callback = pm_op(dev->driver->pm, state);
636 error = dpm_run_callback(callback, dev, state, info);
637 dev->power.is_suspended = false;
643 complete_all(&dev->power.completion);
650 static void async_resume(void *data, async_cookie_t cookie)
652 struct device *dev = (struct device *)data;
655 error = device_resume(dev, pm_transition, true);
657 pm_dev_err(dev, pm_transition, " async", error);
661 static bool is_async(struct device *dev)
663 return dev->power.async_suspend && pm_async_enabled
664 && !pm_trace_is_enabled();
668 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
669 * @state: PM transition of the system being carried out.
671 * Execute the appropriate "resume" callback for all devices whose status
672 * indicates that they are suspended.
674 void dpm_resume(pm_message_t state)
677 ktime_t starttime = ktime_get();
681 mutex_lock(&dpm_list_mtx);
682 pm_transition = state;
685 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
686 INIT_COMPLETION(dev->power.completion);
689 async_schedule(async_resume, dev);
693 while (!list_empty(&dpm_suspended_list)) {
694 dev = to_device(dpm_suspended_list.next);
696 if (!is_async(dev)) {
699 mutex_unlock(&dpm_list_mtx);
701 error = device_resume(dev, state, false);
703 suspend_stats.failed_resume++;
704 dpm_save_failed_step(SUSPEND_RESUME);
705 dpm_save_failed_dev(dev_name(dev));
706 pm_dev_err(dev, state, "", error);
709 mutex_lock(&dpm_list_mtx);
711 if (!list_empty(&dev->power.entry))
712 list_move_tail(&dev->power.entry, &dpm_prepared_list);
715 mutex_unlock(&dpm_list_mtx);
716 async_synchronize_full();
717 dpm_show_time(starttime, state, NULL);
723 * device_complete - Complete a PM transition for given device.
724 * @dev: Device to handle.
725 * @state: PM transition of the system being carried out.
727 static void device_complete(struct device *dev, pm_message_t state)
729 void (*callback)(struct device *) = NULL;
732 if (dev->power.syscore)
737 if (dev->pm_domain) {
738 info = "completing power domain ";
739 callback = dev->pm_domain->ops.complete;
740 } else if (dev->type && dev->type->pm) {
741 info = "completing type ";
742 callback = dev->type->pm->complete;
743 } else if (dev->class && dev->class->pm) {
744 info = "completing class ";
745 callback = dev->class->pm->complete;
746 } else if (dev->bus && dev->bus->pm) {
747 info = "completing bus ";
748 callback = dev->bus->pm->complete;
751 if (!callback && dev->driver && dev->driver->pm) {
752 info = "completing driver ";
753 callback = dev->driver->pm->complete;
757 pm_dev_dbg(dev, state, info);
767 * dpm_complete - Complete a PM transition for all non-sysdev devices.
768 * @state: PM transition of the system being carried out.
770 * Execute the ->complete() callbacks for all devices whose PM status is not
771 * DPM_ON (this allows new devices to be registered).
773 void dpm_complete(pm_message_t state)
775 struct list_head list;
779 INIT_LIST_HEAD(&list);
780 mutex_lock(&dpm_list_mtx);
781 while (!list_empty(&dpm_prepared_list)) {
782 struct device *dev = to_device(dpm_prepared_list.prev);
785 dev->power.is_prepared = false;
786 list_move(&dev->power.entry, &list);
787 mutex_unlock(&dpm_list_mtx);
789 device_complete(dev, state);
791 mutex_lock(&dpm_list_mtx);
794 list_splice(&list, &dpm_list);
795 mutex_unlock(&dpm_list_mtx);
799 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
800 * @state: PM transition of the system being carried out.
802 * Execute "resume" callbacks for all devices and complete the PM transition of
805 void dpm_resume_end(pm_message_t state)
810 EXPORT_SYMBOL_GPL(dpm_resume_end);
813 /*------------------------- Suspend routines -------------------------*/
816 * resume_event - Return a "resume" message for given "suspend" sleep state.
817 * @sleep_state: PM message representing a sleep state.
819 * Return a PM message representing the resume event corresponding to given
822 static pm_message_t resume_event(pm_message_t sleep_state)
824 switch (sleep_state.event) {
825 case PM_EVENT_SUSPEND:
827 case PM_EVENT_FREEZE:
828 case PM_EVENT_QUIESCE:
830 case PM_EVENT_HIBERNATE:
837 * device_suspend_noirq - Execute a "late suspend" callback for given device.
838 * @dev: Device to handle.
839 * @state: PM transition of the system being carried out.
841 * The driver of @dev will not receive interrupts while this function is being
844 static int device_suspend_noirq(struct device *dev, pm_message_t state)
846 pm_callback_t callback = NULL;
849 if (dev->power.syscore)
852 if (dev->pm_domain) {
853 info = "noirq power domain ";
854 callback = pm_noirq_op(&dev->pm_domain->ops, state);
855 } else if (dev->type && dev->type->pm) {
856 info = "noirq type ";
857 callback = pm_noirq_op(dev->type->pm, state);
858 } else if (dev->class && dev->class->pm) {
859 info = "noirq class ";
860 callback = pm_noirq_op(dev->class->pm, state);
861 } else if (dev->bus && dev->bus->pm) {
863 callback = pm_noirq_op(dev->bus->pm, state);
866 if (!callback && dev->driver && dev->driver->pm) {
867 info = "noirq driver ";
868 callback = pm_noirq_op(dev->driver->pm, state);
871 return dpm_run_callback(callback, dev, state, info);
875 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
876 * @state: PM transition of the system being carried out.
878 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
879 * handlers for all non-sysdev devices.
881 static int dpm_suspend_noirq(pm_message_t state)
883 ktime_t starttime = ktime_get();
887 suspend_device_irqs();
888 mutex_lock(&dpm_list_mtx);
889 while (!list_empty(&dpm_late_early_list)) {
890 struct device *dev = to_device(dpm_late_early_list.prev);
893 mutex_unlock(&dpm_list_mtx);
895 error = device_suspend_noirq(dev, state);
897 mutex_lock(&dpm_list_mtx);
899 pm_dev_err(dev, state, " noirq", error);
900 suspend_stats.failed_suspend_noirq++;
901 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
902 dpm_save_failed_dev(dev_name(dev));
906 if (!list_empty(&dev->power.entry))
907 list_move(&dev->power.entry, &dpm_noirq_list);
910 if (pm_wakeup_pending()) {
915 mutex_unlock(&dpm_list_mtx);
917 dpm_resume_noirq(resume_event(state));
919 dpm_show_time(starttime, state, "noirq");
924 * device_suspend_late - Execute a "late suspend" callback for given device.
925 * @dev: Device to handle.
926 * @state: PM transition of the system being carried out.
928 * Runtime PM is disabled for @dev while this function is being executed.
930 static int device_suspend_late(struct device *dev, pm_message_t state)
932 pm_callback_t callback = NULL;
935 __pm_runtime_disable(dev, false);
937 if (dev->power.syscore)
940 if (dev->pm_domain) {
941 info = "late power domain ";
942 callback = pm_late_early_op(&dev->pm_domain->ops, state);
943 } else if (dev->type && dev->type->pm) {
945 callback = pm_late_early_op(dev->type->pm, state);
946 } else if (dev->class && dev->class->pm) {
947 info = "late class ";
948 callback = pm_late_early_op(dev->class->pm, state);
949 } else if (dev->bus && dev->bus->pm) {
951 callback = pm_late_early_op(dev->bus->pm, state);
954 if (!callback && dev->driver && dev->driver->pm) {
955 info = "late driver ";
956 callback = pm_late_early_op(dev->driver->pm, state);
959 return dpm_run_callback(callback, dev, state, info);
963 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
964 * @state: PM transition of the system being carried out.
966 static int dpm_suspend_late(pm_message_t state)
968 ktime_t starttime = ktime_get();
971 mutex_lock(&dpm_list_mtx);
972 while (!list_empty(&dpm_suspended_list)) {
973 struct device *dev = to_device(dpm_suspended_list.prev);
976 mutex_unlock(&dpm_list_mtx);
978 error = device_suspend_late(dev, state);
980 mutex_lock(&dpm_list_mtx);
982 pm_dev_err(dev, state, " late", error);
983 suspend_stats.failed_suspend_late++;
984 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
985 dpm_save_failed_dev(dev_name(dev));
989 if (!list_empty(&dev->power.entry))
990 list_move(&dev->power.entry, &dpm_late_early_list);
993 if (pm_wakeup_pending()) {
998 mutex_unlock(&dpm_list_mtx);
1000 dpm_resume_early(resume_event(state));
1002 dpm_show_time(starttime, state, "late");
1008 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1009 * @state: PM transition of the system being carried out.
1011 int dpm_suspend_end(pm_message_t state)
1013 int error = dpm_suspend_late(state);
1017 error = dpm_suspend_noirq(state);
1019 dpm_resume_early(resume_event(state));
1025 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1028 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1029 * @dev: Device to suspend.
1030 * @state: PM transition of the system being carried out.
1031 * @cb: Suspend callback to execute.
1033 static int legacy_suspend(struct device *dev, pm_message_t state,
1034 int (*cb)(struct device *dev, pm_message_t state))
1039 calltime = initcall_debug_start(dev);
1041 error = cb(dev, state);
1042 suspend_report_result(cb, error);
1044 initcall_debug_report(dev, calltime, error);
1050 * device_suspend - Execute "suspend" callbacks for given device.
1051 * @dev: Device to handle.
1052 * @state: PM transition of the system being carried out.
1053 * @async: If true, the device is being suspended asynchronously.
1055 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1057 pm_callback_t callback = NULL;
1061 dpm_wait_for_children(dev, async);
1067 * If a device configured to wake up the system from sleep states
1068 * has been suspended at run time and there's a resume request pending
1069 * for it, this is equivalent to the device signaling wakeup, so the
1070 * system suspend operation should be aborted.
1072 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1073 pm_wakeup_event(dev, 0);
1075 if (pm_wakeup_pending()) {
1076 async_error = -EBUSY;
1080 if (dev->power.syscore)
1085 if (dev->pm_domain) {
1086 info = "power domain ";
1087 callback = pm_op(&dev->pm_domain->ops, state);
1091 if (dev->type && dev->type->pm) {
1093 callback = pm_op(dev->type->pm, state);
1098 if (dev->class->pm) {
1100 callback = pm_op(dev->class->pm, state);
1102 } else if (dev->class->suspend) {
1103 pm_dev_dbg(dev, state, "legacy class ");
1104 error = legacy_suspend(dev, state, dev->class->suspend);
1112 callback = pm_op(dev->bus->pm, state);
1113 } else if (dev->bus->suspend) {
1114 pm_dev_dbg(dev, state, "legacy bus ");
1115 error = legacy_suspend(dev, state, dev->bus->suspend);
1121 if (!callback && dev->driver && dev->driver->pm) {
1123 callback = pm_op(dev->driver->pm, state);
1126 error = dpm_run_callback(callback, dev, state, info);
1130 dev->power.is_suspended = true;
1131 if (dev->power.wakeup_path
1132 && dev->parent && !dev->parent->power.ignore_children)
1133 dev->parent->power.wakeup_path = true;
1139 complete_all(&dev->power.completion);
1141 async_error = error;
1146 static void async_suspend(void *data, async_cookie_t cookie)
1148 struct device *dev = (struct device *)data;
1151 error = __device_suspend(dev, pm_transition, true);
1153 dpm_save_failed_dev(dev_name(dev));
1154 pm_dev_err(dev, pm_transition, " async", error);
1160 static int device_suspend(struct device *dev)
1162 INIT_COMPLETION(dev->power.completion);
1164 if (pm_async_enabled && dev->power.async_suspend) {
1166 async_schedule(async_suspend, dev);
1170 return __device_suspend(dev, pm_transition, false);
1174 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1175 * @state: PM transition of the system being carried out.
1177 int dpm_suspend(pm_message_t state)
1179 ktime_t starttime = ktime_get();
1186 mutex_lock(&dpm_list_mtx);
1187 pm_transition = state;
1189 while (!list_empty(&dpm_prepared_list)) {
1190 struct device *dev = to_device(dpm_prepared_list.prev);
1193 mutex_unlock(&dpm_list_mtx);
1195 error = device_suspend(dev);
1197 mutex_lock(&dpm_list_mtx);
1199 pm_dev_err(dev, state, "", error);
1200 dpm_save_failed_dev(dev_name(dev));
1204 if (!list_empty(&dev->power.entry))
1205 list_move(&dev->power.entry, &dpm_suspended_list);
1210 mutex_unlock(&dpm_list_mtx);
1211 async_synchronize_full();
1213 error = async_error;
1215 suspend_stats.failed_suspend++;
1216 dpm_save_failed_step(SUSPEND_SUSPEND);
1218 dpm_show_time(starttime, state, NULL);
1223 * device_prepare - Prepare a device for system power transition.
1224 * @dev: Device to handle.
1225 * @state: PM transition of the system being carried out.
1227 * Execute the ->prepare() callback(s) for given device. No new children of the
1228 * device may be registered after this function has returned.
1230 static int device_prepare(struct device *dev, pm_message_t state)
1232 int (*callback)(struct device *) = NULL;
1236 if (dev->power.syscore)
1240 * If a device's parent goes into runtime suspend at the wrong time,
1241 * it won't be possible to resume the device. To prevent this we
1242 * block runtime suspend here, during the prepare phase, and allow
1243 * it again during the complete phase.
1245 pm_runtime_get_noresume(dev);
1249 dev->power.wakeup_path = device_may_wakeup(dev);
1251 if (dev->pm_domain) {
1252 info = "preparing power domain ";
1253 callback = dev->pm_domain->ops.prepare;
1254 } else if (dev->type && dev->type->pm) {
1255 info = "preparing type ";
1256 callback = dev->type->pm->prepare;
1257 } else if (dev->class && dev->class->pm) {
1258 info = "preparing class ";
1259 callback = dev->class->pm->prepare;
1260 } else if (dev->bus && dev->bus->pm) {
1261 info = "preparing bus ";
1262 callback = dev->bus->pm->prepare;
1265 if (!callback && dev->driver && dev->driver->pm) {
1266 info = "preparing driver ";
1267 callback = dev->driver->pm->prepare;
1271 error = callback(dev);
1272 suspend_report_result(callback, error);
1281 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1282 * @state: PM transition of the system being carried out.
1284 * Execute the ->prepare() callback(s) for all devices.
1286 int dpm_prepare(pm_message_t state)
1292 mutex_lock(&dpm_list_mtx);
1293 while (!list_empty(&dpm_list)) {
1294 struct device *dev = to_device(dpm_list.next);
1297 mutex_unlock(&dpm_list_mtx);
1299 error = device_prepare(dev, state);
1301 mutex_lock(&dpm_list_mtx);
1303 if (error == -EAGAIN) {
1308 printk(KERN_INFO "PM: Device %s not prepared "
1309 "for power transition: code %d\n",
1310 dev_name(dev), error);
1314 dev->power.is_prepared = true;
1315 if (!list_empty(&dev->power.entry))
1316 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1319 mutex_unlock(&dpm_list_mtx);
1324 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1325 * @state: PM transition of the system being carried out.
1327 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1328 * callbacks for them.
1330 int dpm_suspend_start(pm_message_t state)
1334 error = dpm_prepare(state);
1336 suspend_stats.failed_prepare++;
1337 dpm_save_failed_step(SUSPEND_PREPARE);
1339 error = dpm_suspend(state);
1342 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1344 void __suspend_report_result(const char *function, void *fn, int ret)
1347 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1349 EXPORT_SYMBOL_GPL(__suspend_report_result);
1352 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1353 * @dev: Device to wait for.
1354 * @subordinate: Device that needs to wait for @dev.
1356 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1358 dpm_wait(dev, subordinate->power.async_suspend);
1361 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1364 * dpm_for_each_dev - device iterator.
1365 * @data: data for the callback.
1366 * @fn: function to be called for each device.
1368 * Iterate over devices in dpm_list, and call @fn for each device,
1371 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1379 list_for_each_entry(dev, &dpm_list, power.entry)
1383 EXPORT_SYMBOL_GPL(dpm_for_each_dev);