2 * drivers/base/power/main.c - Where the driver meets power management.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
7 * This file is released under the GPLv2
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will initialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/timer.h>
36 #include <linux/wakeup_reason.h>
41 typedef int (*pm_callback_t)(struct device *);
44 * The entries in the dpm_list list are in a depth first order, simply
45 * because children are guaranteed to be discovered after parents, and
46 * are inserted at the back of the list on discovery.
48 * Since device_pm_add() may be called with a device lock held,
49 * we must never try to acquire a device lock while holding
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
59 struct suspend_stats suspend_stats;
60 static DEFINE_MUTEX(dpm_list_mtx);
61 static pm_message_t pm_transition;
63 static int async_error;
65 static char *pm_verb(int event)
68 case PM_EVENT_SUSPEND:
74 case PM_EVENT_QUIESCE:
76 case PM_EVENT_HIBERNATE:
80 case PM_EVENT_RESTORE:
82 case PM_EVENT_RECOVER:
85 return "(unknown PM event)";
90 * device_pm_sleep_init - Initialize system suspend-related device fields.
91 * @dev: Device object being initialized.
93 void device_pm_sleep_init(struct device *dev)
95 dev->power.is_prepared = false;
96 dev->power.is_suspended = false;
97 dev->power.is_noirq_suspended = false;
98 dev->power.is_late_suspended = false;
99 init_completion(&dev->power.completion);
100 complete_all(&dev->power.completion);
101 dev->power.wakeup = NULL;
102 INIT_LIST_HEAD(&dev->power.entry);
106 * device_pm_lock - Lock the list of active devices used by the PM core.
108 void device_pm_lock(void)
110 mutex_lock(&dpm_list_mtx);
114 * device_pm_unlock - Unlock the list of active devices used by the PM core.
116 void device_pm_unlock(void)
118 mutex_unlock(&dpm_list_mtx);
122 * device_pm_add - Add a device to the PM core's list of active devices.
123 * @dev: Device to add to the list.
125 void device_pm_add(struct device *dev)
127 pr_debug("PM: Adding info for %s:%s\n",
128 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
129 device_pm_check_callbacks(dev);
130 mutex_lock(&dpm_list_mtx);
131 if (dev->parent && dev->parent->power.is_prepared)
132 dev_warn(dev, "parent %s should not be sleeping\n",
133 dev_name(dev->parent));
134 list_add_tail(&dev->power.entry, &dpm_list);
135 mutex_unlock(&dpm_list_mtx);
139 * device_pm_remove - Remove a device from the PM core's list of active devices.
140 * @dev: Device to be removed from the list.
142 void device_pm_remove(struct device *dev)
144 pr_debug("PM: Removing info for %s:%s\n",
145 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
146 complete_all(&dev->power.completion);
147 mutex_lock(&dpm_list_mtx);
148 list_del_init(&dev->power.entry);
149 mutex_unlock(&dpm_list_mtx);
150 device_wakeup_disable(dev);
151 pm_runtime_remove(dev);
152 device_pm_check_callbacks(dev);
156 * device_pm_move_before - Move device in the PM core's list of active devices.
157 * @deva: Device to move in dpm_list.
158 * @devb: Device @deva should come before.
160 void device_pm_move_before(struct device *deva, struct device *devb)
162 pr_debug("PM: Moving %s:%s before %s:%s\n",
163 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
164 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
165 /* Delete deva from dpm_list and reinsert before devb. */
166 list_move_tail(&deva->power.entry, &devb->power.entry);
170 * device_pm_move_after - Move device in the PM core's list of active devices.
171 * @deva: Device to move in dpm_list.
172 * @devb: Device @deva should come after.
174 void device_pm_move_after(struct device *deva, struct device *devb)
176 pr_debug("PM: Moving %s:%s after %s:%s\n",
177 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
178 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
179 /* Delete deva from dpm_list and reinsert after devb. */
180 list_move(&deva->power.entry, &devb->power.entry);
184 * device_pm_move_last - Move device to end of the PM core's list of devices.
185 * @dev: Device to move in dpm_list.
187 void device_pm_move_last(struct device *dev)
189 pr_debug("PM: Moving %s:%s to end of list\n",
190 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
191 list_move_tail(&dev->power.entry, &dpm_list);
194 static ktime_t initcall_debug_start(struct device *dev, void *cb)
196 ktime_t calltime = ktime_set(0, 0);
198 if (pm_print_times_enabled) {
199 pr_info("calling %s+ @ %i, parent: %s, cb: %pf\n",
200 dev_name(dev), task_pid_nr(current),
201 dev->parent ? dev_name(dev->parent) : "none", cb);
202 calltime = ktime_get();
208 static void initcall_debug_report(struct device *dev, ktime_t calltime,
209 int error, pm_message_t state, char *info)
214 rettime = ktime_get();
215 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
217 if (pm_print_times_enabled) {
218 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
219 error, (unsigned long long)nsecs >> 10);
224 * dpm_wait - Wait for a PM operation to complete.
225 * @dev: Device to wait for.
226 * @async: If unset, wait only if the device's power.async_suspend flag is set.
228 static void dpm_wait(struct device *dev, bool async)
233 if (async || (pm_async_enabled && dev->power.async_suspend))
234 wait_for_completion(&dev->power.completion);
237 static int dpm_wait_fn(struct device *dev, void *async_ptr)
239 dpm_wait(dev, *((bool *)async_ptr));
243 static void dpm_wait_for_children(struct device *dev, bool async)
245 device_for_each_child(dev, &async, dpm_wait_fn);
249 * pm_op - Return the PM operation appropriate for given PM event.
250 * @ops: PM operations to choose from.
251 * @state: PM transition of the system being carried out.
253 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
255 switch (state.event) {
256 #ifdef CONFIG_SUSPEND
257 case PM_EVENT_SUSPEND:
259 case PM_EVENT_RESUME:
261 #endif /* CONFIG_SUSPEND */
262 #ifdef CONFIG_HIBERNATE_CALLBACKS
263 case PM_EVENT_FREEZE:
264 case PM_EVENT_QUIESCE:
266 case PM_EVENT_HIBERNATE:
267 return ops->poweroff;
269 case PM_EVENT_RECOVER:
272 case PM_EVENT_RESTORE:
274 #endif /* CONFIG_HIBERNATE_CALLBACKS */
281 * pm_late_early_op - Return the PM operation appropriate for given PM event.
282 * @ops: PM operations to choose from.
283 * @state: PM transition of the system being carried out.
285 * Runtime PM is disabled for @dev while this function is being executed.
287 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
290 switch (state.event) {
291 #ifdef CONFIG_SUSPEND
292 case PM_EVENT_SUSPEND:
293 return ops->suspend_late;
294 case PM_EVENT_RESUME:
295 return ops->resume_early;
296 #endif /* CONFIG_SUSPEND */
297 #ifdef CONFIG_HIBERNATE_CALLBACKS
298 case PM_EVENT_FREEZE:
299 case PM_EVENT_QUIESCE:
300 return ops->freeze_late;
301 case PM_EVENT_HIBERNATE:
302 return ops->poweroff_late;
304 case PM_EVENT_RECOVER:
305 return ops->thaw_early;
306 case PM_EVENT_RESTORE:
307 return ops->restore_early;
308 #endif /* CONFIG_HIBERNATE_CALLBACKS */
315 * pm_noirq_op - Return the PM operation appropriate for given PM event.
316 * @ops: PM operations to choose from.
317 * @state: PM transition of the system being carried out.
319 * The driver of @dev will not receive interrupts while this function is being
322 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
324 switch (state.event) {
325 #ifdef CONFIG_SUSPEND
326 case PM_EVENT_SUSPEND:
327 return ops->suspend_noirq;
328 case PM_EVENT_RESUME:
329 return ops->resume_noirq;
330 #endif /* CONFIG_SUSPEND */
331 #ifdef CONFIG_HIBERNATE_CALLBACKS
332 case PM_EVENT_FREEZE:
333 case PM_EVENT_QUIESCE:
334 return ops->freeze_noirq;
335 case PM_EVENT_HIBERNATE:
336 return ops->poweroff_noirq;
338 case PM_EVENT_RECOVER:
339 return ops->thaw_noirq;
340 case PM_EVENT_RESTORE:
341 return ops->restore_noirq;
342 #endif /* CONFIG_HIBERNATE_CALLBACKS */
348 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
350 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
351 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
352 ", may wakeup" : "");
355 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
358 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
359 dev_name(dev), pm_verb(state.event), info, error);
362 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
368 calltime = ktime_get();
369 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
370 do_div(usecs64, NSEC_PER_USEC);
374 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
375 info ?: "", info ? " " : "", pm_verb(state.event),
376 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
379 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
380 pm_message_t state, char *info)
388 calltime = initcall_debug_start(dev, cb);
390 pm_dev_dbg(dev, state, info);
391 trace_device_pm_callback_start(dev, info, state.event);
393 trace_device_pm_callback_end(dev, error);
394 suspend_report_result(cb, error);
396 initcall_debug_report(dev, calltime, error, state, info);
401 #ifdef CONFIG_DPM_WATCHDOG
402 struct dpm_watchdog {
404 struct task_struct *tsk;
405 struct timer_list timer;
408 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
409 struct dpm_watchdog wd
412 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
413 * @data: Watchdog object address.
415 * Called when a driver has timed out suspending or resuming.
416 * There's not much we can do here to recover so panic() to
417 * capture a crash-dump in pstore.
419 static void dpm_watchdog_handler(unsigned long data)
421 struct dpm_watchdog *wd = (void *)data;
423 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
424 show_stack(wd->tsk, NULL);
425 panic("%s %s: unrecoverable failure\n",
426 dev_driver_string(wd->dev), dev_name(wd->dev));
430 * dpm_watchdog_set - Enable pm watchdog for given device.
431 * @wd: Watchdog. Must be allocated on the stack.
432 * @dev: Device to handle.
434 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
436 struct timer_list *timer = &wd->timer;
441 init_timer_on_stack(timer);
442 /* use same timeout value for both suspend and resume */
443 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
444 timer->function = dpm_watchdog_handler;
445 timer->data = (unsigned long)wd;
450 * dpm_watchdog_clear - Disable suspend/resume watchdog.
451 * @wd: Watchdog to disable.
453 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
455 struct timer_list *timer = &wd->timer;
457 del_timer_sync(timer);
458 destroy_timer_on_stack(timer);
461 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
462 #define dpm_watchdog_set(x, y)
463 #define dpm_watchdog_clear(x)
466 /*------------------------- Resume routines -------------------------*/
469 * device_resume_noirq - Execute an "early resume" callback for given device.
470 * @dev: Device to handle.
471 * @state: PM transition of the system being carried out.
472 * @async: If true, the device is being resumed asynchronously.
474 * The driver of @dev will not receive interrupts while this function is being
477 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
479 pm_callback_t callback = NULL;
486 if (dev->power.syscore || dev->power.direct_complete)
489 if (!dev->power.is_noirq_suspended)
492 dpm_wait(dev->parent, async);
494 if (dev->pm_domain) {
495 info = "noirq power domain ";
496 callback = pm_noirq_op(&dev->pm_domain->ops, state);
497 } else if (dev->type && dev->type->pm) {
498 info = "noirq type ";
499 callback = pm_noirq_op(dev->type->pm, state);
500 } else if (dev->class && dev->class->pm) {
501 info = "noirq class ";
502 callback = pm_noirq_op(dev->class->pm, state);
503 } else if (dev->bus && dev->bus->pm) {
505 callback = pm_noirq_op(dev->bus->pm, state);
508 if (!callback && dev->driver && dev->driver->pm) {
509 info = "noirq driver ";
510 callback = pm_noirq_op(dev->driver->pm, state);
513 error = dpm_run_callback(callback, dev, state, info);
514 dev->power.is_noirq_suspended = false;
517 complete_all(&dev->power.completion);
522 static bool is_async(struct device *dev)
524 return dev->power.async_suspend && pm_async_enabled
525 && !pm_trace_is_enabled();
528 static void async_resume_noirq(void *data, async_cookie_t cookie)
530 struct device *dev = (struct device *)data;
533 error = device_resume_noirq(dev, pm_transition, true);
535 pm_dev_err(dev, pm_transition, " async", error);
541 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
542 * @state: PM transition of the system being carried out.
544 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
545 * enable device drivers to receive interrupts.
547 void dpm_resume_noirq(pm_message_t state)
550 ktime_t starttime = ktime_get();
552 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
553 mutex_lock(&dpm_list_mtx);
554 pm_transition = state;
557 * Advanced the async threads upfront,
558 * in case the starting of async threads is
559 * delayed by non-async resuming devices.
561 list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
562 reinit_completion(&dev->power.completion);
565 async_schedule(async_resume_noirq, dev);
569 while (!list_empty(&dpm_noirq_list)) {
570 dev = to_device(dpm_noirq_list.next);
572 list_move_tail(&dev->power.entry, &dpm_late_early_list);
573 mutex_unlock(&dpm_list_mtx);
575 if (!is_async(dev)) {
578 error = device_resume_noirq(dev, state, false);
580 suspend_stats.failed_resume_noirq++;
581 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
582 dpm_save_failed_dev(dev_name(dev));
583 pm_dev_err(dev, state, " noirq", error);
587 mutex_lock(&dpm_list_mtx);
590 mutex_unlock(&dpm_list_mtx);
591 async_synchronize_full();
592 dpm_show_time(starttime, state, "noirq");
593 resume_device_irqs();
594 device_wakeup_disarm_wake_irqs();
596 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
600 * device_resume_early - Execute an "early resume" callback for given device.
601 * @dev: Device to handle.
602 * @state: PM transition of the system being carried out.
603 * @async: If true, the device is being resumed asynchronously.
605 * Runtime PM is disabled for @dev while this function is being executed.
607 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
609 pm_callback_t callback = NULL;
616 if (dev->power.syscore || dev->power.direct_complete)
619 if (!dev->power.is_late_suspended)
622 dpm_wait(dev->parent, async);
624 if (dev->pm_domain) {
625 info = "early power domain ";
626 callback = pm_late_early_op(&dev->pm_domain->ops, state);
627 } else if (dev->type && dev->type->pm) {
628 info = "early type ";
629 callback = pm_late_early_op(dev->type->pm, state);
630 } else if (dev->class && dev->class->pm) {
631 info = "early class ";
632 callback = pm_late_early_op(dev->class->pm, state);
633 } else if (dev->bus && dev->bus->pm) {
635 callback = pm_late_early_op(dev->bus->pm, state);
638 if (!callback && dev->driver && dev->driver->pm) {
639 info = "early driver ";
640 callback = pm_late_early_op(dev->driver->pm, state);
643 error = dpm_run_callback(callback, dev, state, info);
644 dev->power.is_late_suspended = false;
649 pm_runtime_enable(dev);
650 complete_all(&dev->power.completion);
654 static void async_resume_early(void *data, async_cookie_t cookie)
656 struct device *dev = (struct device *)data;
659 error = device_resume_early(dev, pm_transition, true);
661 pm_dev_err(dev, pm_transition, " async", error);
667 * dpm_resume_early - Execute "early resume" callbacks for all devices.
668 * @state: PM transition of the system being carried out.
670 void dpm_resume_early(pm_message_t state)
673 ktime_t starttime = ktime_get();
675 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
676 mutex_lock(&dpm_list_mtx);
677 pm_transition = state;
680 * Advanced the async threads upfront,
681 * in case the starting of async threads is
682 * delayed by non-async resuming devices.
684 list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
685 reinit_completion(&dev->power.completion);
688 async_schedule(async_resume_early, dev);
692 while (!list_empty(&dpm_late_early_list)) {
693 dev = to_device(dpm_late_early_list.next);
695 list_move_tail(&dev->power.entry, &dpm_suspended_list);
696 mutex_unlock(&dpm_list_mtx);
698 if (!is_async(dev)) {
701 error = device_resume_early(dev, state, false);
703 suspend_stats.failed_resume_early++;
704 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
705 dpm_save_failed_dev(dev_name(dev));
706 pm_dev_err(dev, state, " early", error);
709 mutex_lock(&dpm_list_mtx);
712 mutex_unlock(&dpm_list_mtx);
713 async_synchronize_full();
714 dpm_show_time(starttime, state, "early");
715 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
719 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
720 * @state: PM transition of the system being carried out.
722 void dpm_resume_start(pm_message_t state)
724 dpm_resume_noirq(state);
725 dpm_resume_early(state);
727 EXPORT_SYMBOL_GPL(dpm_resume_start);
730 * device_resume - Execute "resume" callbacks for given device.
731 * @dev: Device to handle.
732 * @state: PM transition of the system being carried out.
733 * @async: If true, the device is being resumed asynchronously.
735 static int device_resume(struct device *dev, pm_message_t state, bool async)
737 pm_callback_t callback = NULL;
740 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
745 if (dev->power.syscore)
748 if (dev->power.direct_complete) {
749 /* Match the pm_runtime_disable() in __device_suspend(). */
750 pm_runtime_enable(dev);
754 dpm_wait(dev->parent, async);
755 dpm_watchdog_set(&wd, dev);
759 * This is a fib. But we'll allow new children to be added below
760 * a resumed device, even if the device hasn't been completed yet.
762 dev->power.is_prepared = false;
764 if (!dev->power.is_suspended)
767 if (dev->pm_domain) {
768 info = "power domain ";
769 callback = pm_op(&dev->pm_domain->ops, state);
773 if (dev->type && dev->type->pm) {
775 callback = pm_op(dev->type->pm, state);
780 if (dev->class->pm) {
782 callback = pm_op(dev->class->pm, state);
784 } else if (dev->class->resume) {
785 info = "legacy class ";
786 callback = dev->class->resume;
794 callback = pm_op(dev->bus->pm, state);
795 } else if (dev->bus->resume) {
796 info = "legacy bus ";
797 callback = dev->bus->resume;
803 if (!callback && dev->driver && dev->driver->pm) {
805 callback = pm_op(dev->driver->pm, state);
809 error = dpm_run_callback(callback, dev, state, info);
810 dev->power.is_suspended = false;
814 dpm_watchdog_clear(&wd);
817 complete_all(&dev->power.completion);
824 static void async_resume(void *data, async_cookie_t cookie)
826 struct device *dev = (struct device *)data;
829 error = device_resume(dev, pm_transition, true);
831 pm_dev_err(dev, pm_transition, " async", error);
836 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
837 * @state: PM transition of the system being carried out.
839 * Execute the appropriate "resume" callback for all devices whose status
840 * indicates that they are suspended.
842 void dpm_resume(pm_message_t state)
845 ktime_t starttime = ktime_get();
847 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
850 mutex_lock(&dpm_list_mtx);
851 pm_transition = state;
854 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
855 reinit_completion(&dev->power.completion);
858 async_schedule(async_resume, dev);
862 while (!list_empty(&dpm_suspended_list)) {
863 dev = to_device(dpm_suspended_list.next);
865 if (!is_async(dev)) {
868 mutex_unlock(&dpm_list_mtx);
870 error = device_resume(dev, state, false);
872 suspend_stats.failed_resume++;
873 dpm_save_failed_step(SUSPEND_RESUME);
874 dpm_save_failed_dev(dev_name(dev));
875 pm_dev_err(dev, state, "", error);
878 mutex_lock(&dpm_list_mtx);
880 if (!list_empty(&dev->power.entry))
881 list_move_tail(&dev->power.entry, &dpm_prepared_list);
884 mutex_unlock(&dpm_list_mtx);
885 async_synchronize_full();
886 dpm_show_time(starttime, state, NULL);
889 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
893 * device_complete - Complete a PM transition for given device.
894 * @dev: Device to handle.
895 * @state: PM transition of the system being carried out.
897 static void device_complete(struct device *dev, pm_message_t state)
899 void (*callback)(struct device *) = NULL;
902 if (dev->power.syscore)
907 if (dev->pm_domain) {
908 info = "completing power domain ";
909 callback = dev->pm_domain->ops.complete;
910 } else if (dev->type && dev->type->pm) {
911 info = "completing type ";
912 callback = dev->type->pm->complete;
913 } else if (dev->class && dev->class->pm) {
914 info = "completing class ";
915 callback = dev->class->pm->complete;
916 } else if (dev->bus && dev->bus->pm) {
917 info = "completing bus ";
918 callback = dev->bus->pm->complete;
921 if (!callback && dev->driver && dev->driver->pm) {
922 info = "completing driver ";
923 callback = dev->driver->pm->complete;
927 pm_dev_dbg(dev, state, info);
937 * dpm_complete - Complete a PM transition for all non-sysdev devices.
938 * @state: PM transition of the system being carried out.
940 * Execute the ->complete() callbacks for all devices whose PM status is not
941 * DPM_ON (this allows new devices to be registered).
943 void dpm_complete(pm_message_t state)
945 struct list_head list;
947 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
950 INIT_LIST_HEAD(&list);
951 mutex_lock(&dpm_list_mtx);
952 while (!list_empty(&dpm_prepared_list)) {
953 struct device *dev = to_device(dpm_prepared_list.prev);
956 dev->power.is_prepared = false;
957 list_move(&dev->power.entry, &list);
958 mutex_unlock(&dpm_list_mtx);
960 trace_device_pm_callback_start(dev, "", state.event);
961 device_complete(dev, state);
962 trace_device_pm_callback_end(dev, 0);
964 mutex_lock(&dpm_list_mtx);
967 list_splice(&list, &dpm_list);
968 mutex_unlock(&dpm_list_mtx);
969 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
973 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
974 * @state: PM transition of the system being carried out.
976 * Execute "resume" callbacks for all devices and complete the PM transition of
979 void dpm_resume_end(pm_message_t state)
984 EXPORT_SYMBOL_GPL(dpm_resume_end);
987 /*------------------------- Suspend routines -------------------------*/
990 * resume_event - Return a "resume" message for given "suspend" sleep state.
991 * @sleep_state: PM message representing a sleep state.
993 * Return a PM message representing the resume event corresponding to given
996 static pm_message_t resume_event(pm_message_t sleep_state)
998 switch (sleep_state.event) {
999 case PM_EVENT_SUSPEND:
1001 case PM_EVENT_FREEZE:
1002 case PM_EVENT_QUIESCE:
1003 return PMSG_RECOVER;
1004 case PM_EVENT_HIBERNATE:
1005 return PMSG_RESTORE;
1011 * device_suspend_noirq - Execute a "late suspend" callback for given device.
1012 * @dev: Device to handle.
1013 * @state: PM transition of the system being carried out.
1014 * @async: If true, the device is being suspended asynchronously.
1016 * The driver of @dev will not receive interrupts while this function is being
1019 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1021 pm_callback_t callback = NULL;
1031 if (pm_wakeup_pending()) {
1032 async_error = -EBUSY;
1036 if (dev->power.syscore || dev->power.direct_complete)
1039 dpm_wait_for_children(dev, async);
1044 if (dev->pm_domain) {
1045 info = "noirq power domain ";
1046 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1047 } else if (dev->type && dev->type->pm) {
1048 info = "noirq type ";
1049 callback = pm_noirq_op(dev->type->pm, state);
1050 } else if (dev->class && dev->class->pm) {
1051 info = "noirq class ";
1052 callback = pm_noirq_op(dev->class->pm, state);
1053 } else if (dev->bus && dev->bus->pm) {
1054 info = "noirq bus ";
1055 callback = pm_noirq_op(dev->bus->pm, state);
1058 if (!callback && dev->driver && dev->driver->pm) {
1059 info = "noirq driver ";
1060 callback = pm_noirq_op(dev->driver->pm, state);
1063 error = dpm_run_callback(callback, dev, state, info);
1065 dev->power.is_noirq_suspended = true;
1067 async_error = error;
1070 complete_all(&dev->power.completion);
1071 TRACE_SUSPEND(error);
1075 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1077 struct device *dev = (struct device *)data;
1080 error = __device_suspend_noirq(dev, pm_transition, true);
1082 dpm_save_failed_dev(dev_name(dev));
1083 pm_dev_err(dev, pm_transition, " async", error);
1089 static int device_suspend_noirq(struct device *dev)
1091 reinit_completion(&dev->power.completion);
1093 if (is_async(dev)) {
1095 async_schedule(async_suspend_noirq, dev);
1098 return __device_suspend_noirq(dev, pm_transition, false);
1102 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1103 * @state: PM transition of the system being carried out.
1105 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1106 * handlers for all non-sysdev devices.
1108 int dpm_suspend_noirq(pm_message_t state)
1110 ktime_t starttime = ktime_get();
1113 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1115 device_wakeup_arm_wake_irqs();
1116 suspend_device_irqs();
1117 mutex_lock(&dpm_list_mtx);
1118 pm_transition = state;
1121 while (!list_empty(&dpm_late_early_list)) {
1122 struct device *dev = to_device(dpm_late_early_list.prev);
1125 mutex_unlock(&dpm_list_mtx);
1127 error = device_suspend_noirq(dev);
1129 mutex_lock(&dpm_list_mtx);
1131 pm_dev_err(dev, state, " noirq", error);
1132 dpm_save_failed_dev(dev_name(dev));
1136 if (!list_empty(&dev->power.entry))
1137 list_move(&dev->power.entry, &dpm_noirq_list);
1143 mutex_unlock(&dpm_list_mtx);
1144 async_synchronize_full();
1146 error = async_error;
1149 suspend_stats.failed_suspend_noirq++;
1150 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1151 dpm_resume_noirq(resume_event(state));
1153 dpm_show_time(starttime, state, "noirq");
1155 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1160 * device_suspend_late - Execute a "late suspend" callback for given device.
1161 * @dev: Device to handle.
1162 * @state: PM transition of the system being carried out.
1163 * @async: If true, the device is being suspended asynchronously.
1165 * Runtime PM is disabled for @dev while this function is being executed.
1167 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1169 pm_callback_t callback = NULL;
1176 __pm_runtime_disable(dev, false);
1181 if (pm_wakeup_pending()) {
1182 async_error = -EBUSY;
1186 if (dev->power.syscore || dev->power.direct_complete)
1189 dpm_wait_for_children(dev, async);
1194 if (dev->pm_domain) {
1195 info = "late power domain ";
1196 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1197 } else if (dev->type && dev->type->pm) {
1198 info = "late type ";
1199 callback = pm_late_early_op(dev->type->pm, state);
1200 } else if (dev->class && dev->class->pm) {
1201 info = "late class ";
1202 callback = pm_late_early_op(dev->class->pm, state);
1203 } else if (dev->bus && dev->bus->pm) {
1205 callback = pm_late_early_op(dev->bus->pm, state);
1208 if (!callback && dev->driver && dev->driver->pm) {
1209 info = "late driver ";
1210 callback = pm_late_early_op(dev->driver->pm, state);
1213 error = dpm_run_callback(callback, dev, state, info);
1215 dev->power.is_late_suspended = true;
1217 async_error = error;
1220 TRACE_SUSPEND(error);
1221 complete_all(&dev->power.completion);
1225 static void async_suspend_late(void *data, async_cookie_t cookie)
1227 struct device *dev = (struct device *)data;
1230 error = __device_suspend_late(dev, pm_transition, true);
1232 dpm_save_failed_dev(dev_name(dev));
1233 pm_dev_err(dev, pm_transition, " async", error);
1238 static int device_suspend_late(struct device *dev)
1240 reinit_completion(&dev->power.completion);
1242 if (is_async(dev)) {
1244 async_schedule(async_suspend_late, dev);
1248 return __device_suspend_late(dev, pm_transition, false);
1252 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1253 * @state: PM transition of the system being carried out.
1255 int dpm_suspend_late(pm_message_t state)
1257 ktime_t starttime = ktime_get();
1260 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1261 mutex_lock(&dpm_list_mtx);
1262 pm_transition = state;
1265 while (!list_empty(&dpm_suspended_list)) {
1266 struct device *dev = to_device(dpm_suspended_list.prev);
1269 mutex_unlock(&dpm_list_mtx);
1271 error = device_suspend_late(dev);
1273 mutex_lock(&dpm_list_mtx);
1274 if (!list_empty(&dev->power.entry))
1275 list_move(&dev->power.entry, &dpm_late_early_list);
1278 pm_dev_err(dev, state, " late", error);
1279 dpm_save_failed_dev(dev_name(dev));
1288 mutex_unlock(&dpm_list_mtx);
1289 async_synchronize_full();
1291 error = async_error;
1293 suspend_stats.failed_suspend_late++;
1294 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1295 dpm_resume_early(resume_event(state));
1297 dpm_show_time(starttime, state, "late");
1299 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1304 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1305 * @state: PM transition of the system being carried out.
1307 int dpm_suspend_end(pm_message_t state)
1309 int error = dpm_suspend_late(state);
1313 error = dpm_suspend_noirq(state);
1315 dpm_resume_early(resume_event(state));
1321 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1324 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1325 * @dev: Device to suspend.
1326 * @state: PM transition of the system being carried out.
1327 * @cb: Suspend callback to execute.
1328 * @info: string description of caller.
1330 static int legacy_suspend(struct device *dev, pm_message_t state,
1331 int (*cb)(struct device *dev, pm_message_t state),
1337 calltime = initcall_debug_start(dev, cb);
1339 trace_device_pm_callback_start(dev, info, state.event);
1340 error = cb(dev, state);
1341 trace_device_pm_callback_end(dev, error);
1342 suspend_report_result(cb, error);
1344 initcall_debug_report(dev, calltime, error, state, info);
1350 * device_suspend - Execute "suspend" callbacks for given device.
1351 * @dev: Device to handle.
1352 * @state: PM transition of the system being carried out.
1353 * @async: If true, the device is being suspended asynchronously.
1355 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1357 pm_callback_t callback = NULL;
1360 char suspend_abort[MAX_SUSPEND_ABORT_LEN];
1361 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1366 dpm_wait_for_children(dev, async);
1372 * If a device configured to wake up the system from sleep states
1373 * has been suspended at run time and there's a resume request pending
1374 * for it, this is equivalent to the device signaling wakeup, so the
1375 * system suspend operation should be aborted.
1377 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1378 pm_wakeup_event(dev, 0);
1380 if (pm_wakeup_pending()) {
1381 pm_get_active_wakeup_sources(suspend_abort,
1382 MAX_SUSPEND_ABORT_LEN);
1383 log_suspend_abort_reason(suspend_abort);
1384 async_error = -EBUSY;
1388 if (dev->power.syscore)
1391 if (dev->power.direct_complete) {
1392 if (pm_runtime_status_suspended(dev)) {
1393 pm_runtime_disable(dev);
1394 if (pm_runtime_status_suspended(dev))
1397 pm_runtime_enable(dev);
1399 dev->power.direct_complete = false;
1402 dpm_watchdog_set(&wd, dev);
1405 if (dev->pm_domain) {
1406 info = "power domain ";
1407 callback = pm_op(&dev->pm_domain->ops, state);
1411 if (dev->type && dev->type->pm) {
1413 callback = pm_op(dev->type->pm, state);
1418 if (dev->class->pm) {
1420 callback = pm_op(dev->class->pm, state);
1422 } else if (dev->class->suspend) {
1423 pm_dev_dbg(dev, state, "legacy class ");
1424 error = legacy_suspend(dev, state, dev->class->suspend,
1433 callback = pm_op(dev->bus->pm, state);
1434 } else if (dev->bus->suspend) {
1435 pm_dev_dbg(dev, state, "legacy bus ");
1436 error = legacy_suspend(dev, state, dev->bus->suspend,
1443 if (!callback && dev->driver && dev->driver->pm) {
1445 callback = pm_op(dev->driver->pm, state);
1448 error = dpm_run_callback(callback, dev, state, info);
1452 struct device *parent = dev->parent;
1454 dev->power.is_suspended = true;
1456 spin_lock_irq(&parent->power.lock);
1458 dev->parent->power.direct_complete = false;
1459 if (dev->power.wakeup_path
1460 && !dev->parent->power.ignore_children)
1461 dev->parent->power.wakeup_path = true;
1463 spin_unlock_irq(&parent->power.lock);
1468 dpm_watchdog_clear(&wd);
1471 complete_all(&dev->power.completion);
1473 async_error = error;
1475 TRACE_SUSPEND(error);
1479 static void async_suspend(void *data, async_cookie_t cookie)
1481 struct device *dev = (struct device *)data;
1484 error = __device_suspend(dev, pm_transition, true);
1486 dpm_save_failed_dev(dev_name(dev));
1487 pm_dev_err(dev, pm_transition, " async", error);
1493 static int device_suspend(struct device *dev)
1495 reinit_completion(&dev->power.completion);
1497 if (is_async(dev)) {
1499 async_schedule(async_suspend, dev);
1503 return __device_suspend(dev, pm_transition, false);
1507 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1508 * @state: PM transition of the system being carried out.
1510 int dpm_suspend(pm_message_t state)
1512 ktime_t starttime = ktime_get();
1515 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1520 mutex_lock(&dpm_list_mtx);
1521 pm_transition = state;
1523 while (!list_empty(&dpm_prepared_list)) {
1524 struct device *dev = to_device(dpm_prepared_list.prev);
1527 mutex_unlock(&dpm_list_mtx);
1529 error = device_suspend(dev);
1531 mutex_lock(&dpm_list_mtx);
1533 pm_dev_err(dev, state, "", error);
1534 dpm_save_failed_dev(dev_name(dev));
1538 if (!list_empty(&dev->power.entry))
1539 list_move(&dev->power.entry, &dpm_suspended_list);
1544 mutex_unlock(&dpm_list_mtx);
1545 async_synchronize_full();
1547 error = async_error;
1549 suspend_stats.failed_suspend++;
1550 dpm_save_failed_step(SUSPEND_SUSPEND);
1552 dpm_show_time(starttime, state, NULL);
1553 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1558 * device_prepare - Prepare a device for system power transition.
1559 * @dev: Device to handle.
1560 * @state: PM transition of the system being carried out.
1562 * Execute the ->prepare() callback(s) for given device. No new children of the
1563 * device may be registered after this function has returned.
1565 static int device_prepare(struct device *dev, pm_message_t state)
1567 int (*callback)(struct device *) = NULL;
1571 if (dev->power.syscore)
1575 * If a device's parent goes into runtime suspend at the wrong time,
1576 * it won't be possible to resume the device. To prevent this we
1577 * block runtime suspend here, during the prepare phase, and allow
1578 * it again during the complete phase.
1580 pm_runtime_get_noresume(dev);
1584 dev->power.wakeup_path = device_may_wakeup(dev);
1586 if (dev->power.no_pm_callbacks) {
1587 ret = 1; /* Let device go direct_complete */
1591 if (dev->pm_domain) {
1592 info = "preparing power domain ";
1593 callback = dev->pm_domain->ops.prepare;
1594 } else if (dev->type && dev->type->pm) {
1595 info = "preparing type ";
1596 callback = dev->type->pm->prepare;
1597 } else if (dev->class && dev->class->pm) {
1598 info = "preparing class ";
1599 callback = dev->class->pm->prepare;
1600 } else if (dev->bus && dev->bus->pm) {
1601 info = "preparing bus ";
1602 callback = dev->bus->pm->prepare;
1605 if (!callback && dev->driver && dev->driver->pm) {
1606 info = "preparing driver ";
1607 callback = dev->driver->pm->prepare;
1611 ret = callback(dev);
1617 suspend_report_result(callback, ret);
1618 pm_runtime_put(dev);
1622 * A positive return value from ->prepare() means "this device appears
1623 * to be runtime-suspended and its state is fine, so if it really is
1624 * runtime-suspended, you can leave it in that state provided that you
1625 * will do the same thing with all of its descendants". This only
1626 * applies to suspend transitions, however.
1628 spin_lock_irq(&dev->power.lock);
1629 dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1630 spin_unlock_irq(&dev->power.lock);
1635 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1636 * @state: PM transition of the system being carried out.
1638 * Execute the ->prepare() callback(s) for all devices.
1640 int dpm_prepare(pm_message_t state)
1644 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1647 mutex_lock(&dpm_list_mtx);
1648 while (!list_empty(&dpm_list)) {
1649 struct device *dev = to_device(dpm_list.next);
1652 mutex_unlock(&dpm_list_mtx);
1654 trace_device_pm_callback_start(dev, "", state.event);
1655 error = device_prepare(dev, state);
1656 trace_device_pm_callback_end(dev, error);
1658 mutex_lock(&dpm_list_mtx);
1660 if (error == -EAGAIN) {
1665 printk(KERN_INFO "PM: Device %s not prepared "
1666 "for power transition: code %d\n",
1667 dev_name(dev), error);
1671 dev->power.is_prepared = true;
1672 if (!list_empty(&dev->power.entry))
1673 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1676 mutex_unlock(&dpm_list_mtx);
1677 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1682 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1683 * @state: PM transition of the system being carried out.
1685 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1686 * callbacks for them.
1688 int dpm_suspend_start(pm_message_t state)
1692 error = dpm_prepare(state);
1694 suspend_stats.failed_prepare++;
1695 dpm_save_failed_step(SUSPEND_PREPARE);
1697 error = dpm_suspend(state);
1700 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1702 void __suspend_report_result(const char *function, void *fn, int ret)
1705 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1707 EXPORT_SYMBOL_GPL(__suspend_report_result);
1710 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1711 * @dev: Device to wait for.
1712 * @subordinate: Device that needs to wait for @dev.
1714 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1716 dpm_wait(dev, subordinate->power.async_suspend);
1719 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1722 * dpm_for_each_dev - device iterator.
1723 * @data: data for the callback.
1724 * @fn: function to be called for each device.
1726 * Iterate over devices in dpm_list, and call @fn for each device,
1729 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1737 list_for_each_entry(dev, &dpm_list, power.entry)
1741 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1743 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1748 return !ops->prepare &&
1750 !ops->suspend_late &&
1751 !ops->suspend_noirq &&
1752 !ops->resume_noirq &&
1753 !ops->resume_early &&
1758 void device_pm_check_callbacks(struct device *dev)
1760 spin_lock_irq(&dev->power.lock);
1761 dev->power.no_pm_callbacks =
1762 (!dev->bus || pm_ops_is_empty(dev->bus->pm)) &&
1763 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
1764 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1765 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1766 (!dev->driver || pm_ops_is_empty(dev->driver->pm));
1767 spin_unlock_irq(&dev->power.lock);