Revert "drivers: power: use 'current' instead of 'get_current()'"
[firefly-linux-kernel-4.4.55.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/timer.h>
36 #include <linux/wakeup_reason.h>
37
38 #include "../base.h"
39 #include "power.h"
40
41 typedef int (*pm_callback_t)(struct device *);
42
43 /*
44  * The entries in the dpm_list list are in a depth first order, simply
45  * because children are guaranteed to be discovered after parents, and
46  * are inserted at the back of the list on discovery.
47  *
48  * Since device_pm_add() may be called with a device lock held,
49  * we must never try to acquire a device lock while holding
50  * dpm_list_mutex.
51  */
52
53 LIST_HEAD(dpm_list);
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
58
59 struct suspend_stats suspend_stats;
60 static DEFINE_MUTEX(dpm_list_mtx);
61 static pm_message_t pm_transition;
62
63 static int async_error;
64
65 static char *pm_verb(int event)
66 {
67         switch (event) {
68         case PM_EVENT_SUSPEND:
69                 return "suspend";
70         case PM_EVENT_RESUME:
71                 return "resume";
72         case PM_EVENT_FREEZE:
73                 return "freeze";
74         case PM_EVENT_QUIESCE:
75                 return "quiesce";
76         case PM_EVENT_HIBERNATE:
77                 return "hibernate";
78         case PM_EVENT_THAW:
79                 return "thaw";
80         case PM_EVENT_RESTORE:
81                 return "restore";
82         case PM_EVENT_RECOVER:
83                 return "recover";
84         default:
85                 return "(unknown PM event)";
86         }
87 }
88
89 /**
90  * device_pm_sleep_init - Initialize system suspend-related device fields.
91  * @dev: Device object being initialized.
92  */
93 void device_pm_sleep_init(struct device *dev)
94 {
95         dev->power.is_prepared = false;
96         dev->power.is_suspended = false;
97         dev->power.is_noirq_suspended = false;
98         dev->power.is_late_suspended = false;
99         init_completion(&dev->power.completion);
100         complete_all(&dev->power.completion);
101         dev->power.wakeup = NULL;
102         INIT_LIST_HEAD(&dev->power.entry);
103 }
104
105 /**
106  * device_pm_lock - Lock the list of active devices used by the PM core.
107  */
108 void device_pm_lock(void)
109 {
110         mutex_lock(&dpm_list_mtx);
111 }
112
113 /**
114  * device_pm_unlock - Unlock the list of active devices used by the PM core.
115  */
116 void device_pm_unlock(void)
117 {
118         mutex_unlock(&dpm_list_mtx);
119 }
120
121 /**
122  * device_pm_add - Add a device to the PM core's list of active devices.
123  * @dev: Device to add to the list.
124  */
125 void device_pm_add(struct device *dev)
126 {
127         pr_debug("PM: Adding info for %s:%s\n",
128                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
129         mutex_lock(&dpm_list_mtx);
130         if (dev->parent && dev->parent->power.is_prepared)
131                 dev_warn(dev, "parent %s should not be sleeping\n",
132                         dev_name(dev->parent));
133         list_add_tail(&dev->power.entry, &dpm_list);
134         mutex_unlock(&dpm_list_mtx);
135 }
136
137 /**
138  * device_pm_remove - Remove a device from the PM core's list of active devices.
139  * @dev: Device to be removed from the list.
140  */
141 void device_pm_remove(struct device *dev)
142 {
143         pr_debug("PM: Removing info for %s:%s\n",
144                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
145         complete_all(&dev->power.completion);
146         mutex_lock(&dpm_list_mtx);
147         list_del_init(&dev->power.entry);
148         mutex_unlock(&dpm_list_mtx);
149         device_wakeup_disable(dev);
150         pm_runtime_remove(dev);
151 }
152
153 /**
154  * device_pm_move_before - Move device in the PM core's list of active devices.
155  * @deva: Device to move in dpm_list.
156  * @devb: Device @deva should come before.
157  */
158 void device_pm_move_before(struct device *deva, struct device *devb)
159 {
160         pr_debug("PM: Moving %s:%s before %s:%s\n",
161                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
162                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
163         /* Delete deva from dpm_list and reinsert before devb. */
164         list_move_tail(&deva->power.entry, &devb->power.entry);
165 }
166
167 /**
168  * device_pm_move_after - Move device in the PM core's list of active devices.
169  * @deva: Device to move in dpm_list.
170  * @devb: Device @deva should come after.
171  */
172 void device_pm_move_after(struct device *deva, struct device *devb)
173 {
174         pr_debug("PM: Moving %s:%s after %s:%s\n",
175                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
176                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
177         /* Delete deva from dpm_list and reinsert after devb. */
178         list_move(&deva->power.entry, &devb->power.entry);
179 }
180
181 /**
182  * device_pm_move_last - Move device to end of the PM core's list of devices.
183  * @dev: Device to move in dpm_list.
184  */
185 void device_pm_move_last(struct device *dev)
186 {
187         pr_debug("PM: Moving %s:%s to end of list\n",
188                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
189         list_move_tail(&dev->power.entry, &dpm_list);
190 }
191
192 static ktime_t initcall_debug_start(struct device *dev)
193 {
194         ktime_t calltime = ktime_set(0, 0);
195
196         if (pm_print_times_enabled) {
197                 pr_info("calling  %s+ @ %i, parent: %s\n",
198                         dev_name(dev), task_pid_nr(current),
199                         dev->parent ? dev_name(dev->parent) : "none");
200                 calltime = ktime_get();
201         }
202
203         return calltime;
204 }
205
206 static void initcall_debug_report(struct device *dev, ktime_t calltime,
207                                   int error, pm_message_t state, char *info)
208 {
209         ktime_t rettime;
210         s64 nsecs;
211
212         rettime = ktime_get();
213         nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
214
215         if (pm_print_times_enabled) {
216                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
217                         error, (unsigned long long)nsecs >> 10);
218         }
219 }
220
221 /**
222  * dpm_wait - Wait for a PM operation to complete.
223  * @dev: Device to wait for.
224  * @async: If unset, wait only if the device's power.async_suspend flag is set.
225  */
226 static void dpm_wait(struct device *dev, bool async)
227 {
228         if (!dev)
229                 return;
230
231         if (async || (pm_async_enabled && dev->power.async_suspend))
232                 wait_for_completion(&dev->power.completion);
233 }
234
235 static int dpm_wait_fn(struct device *dev, void *async_ptr)
236 {
237         dpm_wait(dev, *((bool *)async_ptr));
238         return 0;
239 }
240
241 static void dpm_wait_for_children(struct device *dev, bool async)
242 {
243        device_for_each_child(dev, &async, dpm_wait_fn);
244 }
245
246 /**
247  * pm_op - Return the PM operation appropriate for given PM event.
248  * @ops: PM operations to choose from.
249  * @state: PM transition of the system being carried out.
250  */
251 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
252 {
253         switch (state.event) {
254 #ifdef CONFIG_SUSPEND
255         case PM_EVENT_SUSPEND:
256                 return ops->suspend;
257         case PM_EVENT_RESUME:
258                 return ops->resume;
259 #endif /* CONFIG_SUSPEND */
260 #ifdef CONFIG_HIBERNATE_CALLBACKS
261         case PM_EVENT_FREEZE:
262         case PM_EVENT_QUIESCE:
263                 return ops->freeze;
264         case PM_EVENT_HIBERNATE:
265                 return ops->poweroff;
266         case PM_EVENT_THAW:
267         case PM_EVENT_RECOVER:
268                 return ops->thaw;
269                 break;
270         case PM_EVENT_RESTORE:
271                 return ops->restore;
272 #endif /* CONFIG_HIBERNATE_CALLBACKS */
273         }
274
275         return NULL;
276 }
277
278 /**
279  * pm_late_early_op - Return the PM operation appropriate for given PM event.
280  * @ops: PM operations to choose from.
281  * @state: PM transition of the system being carried out.
282  *
283  * Runtime PM is disabled for @dev while this function is being executed.
284  */
285 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
286                                       pm_message_t state)
287 {
288         switch (state.event) {
289 #ifdef CONFIG_SUSPEND
290         case PM_EVENT_SUSPEND:
291                 return ops->suspend_late;
292         case PM_EVENT_RESUME:
293                 return ops->resume_early;
294 #endif /* CONFIG_SUSPEND */
295 #ifdef CONFIG_HIBERNATE_CALLBACKS
296         case PM_EVENT_FREEZE:
297         case PM_EVENT_QUIESCE:
298                 return ops->freeze_late;
299         case PM_EVENT_HIBERNATE:
300                 return ops->poweroff_late;
301         case PM_EVENT_THAW:
302         case PM_EVENT_RECOVER:
303                 return ops->thaw_early;
304         case PM_EVENT_RESTORE:
305                 return ops->restore_early;
306 #endif /* CONFIG_HIBERNATE_CALLBACKS */
307         }
308
309         return NULL;
310 }
311
312 /**
313  * pm_noirq_op - Return the PM operation appropriate for given PM event.
314  * @ops: PM operations to choose from.
315  * @state: PM transition of the system being carried out.
316  *
317  * The driver of @dev will not receive interrupts while this function is being
318  * executed.
319  */
320 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
321 {
322         switch (state.event) {
323 #ifdef CONFIG_SUSPEND
324         case PM_EVENT_SUSPEND:
325                 return ops->suspend_noirq;
326         case PM_EVENT_RESUME:
327                 return ops->resume_noirq;
328 #endif /* CONFIG_SUSPEND */
329 #ifdef CONFIG_HIBERNATE_CALLBACKS
330         case PM_EVENT_FREEZE:
331         case PM_EVENT_QUIESCE:
332                 return ops->freeze_noirq;
333         case PM_EVENT_HIBERNATE:
334                 return ops->poweroff_noirq;
335         case PM_EVENT_THAW:
336         case PM_EVENT_RECOVER:
337                 return ops->thaw_noirq;
338         case PM_EVENT_RESTORE:
339                 return ops->restore_noirq;
340 #endif /* CONFIG_HIBERNATE_CALLBACKS */
341         }
342
343         return NULL;
344 }
345
346 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
347 {
348         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
349                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
350                 ", may wakeup" : "");
351 }
352
353 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
354                         int error)
355 {
356         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
357                 dev_name(dev), pm_verb(state.event), info, error);
358 }
359
360 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
361 {
362         ktime_t calltime;
363         u64 usecs64;
364         int usecs;
365
366         calltime = ktime_get();
367         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
368         do_div(usecs64, NSEC_PER_USEC);
369         usecs = usecs64;
370         if (usecs == 0)
371                 usecs = 1;
372         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
373                 info ?: "", info ? " " : "", pm_verb(state.event),
374                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
375 }
376
377 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
378                             pm_message_t state, char *info)
379 {
380         ktime_t calltime;
381         int error;
382
383         if (!cb)
384                 return 0;
385
386         calltime = initcall_debug_start(dev);
387
388         pm_dev_dbg(dev, state, info);
389         trace_device_pm_callback_start(dev, info, state.event);
390         error = cb(dev);
391         trace_device_pm_callback_end(dev, error);
392         suspend_report_result(cb, error);
393
394         initcall_debug_report(dev, calltime, error, state, info);
395
396         return error;
397 }
398
399 #ifdef CONFIG_DPM_WATCHDOG
400 struct dpm_watchdog {
401         struct device           *dev;
402         struct task_struct      *tsk;
403         struct timer_list       timer;
404 };
405
406 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
407         struct dpm_watchdog wd
408
409 /**
410  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
411  * @data: Watchdog object address.
412  *
413  * Called when a driver has timed out suspending or resuming.
414  * There's not much we can do here to recover so panic() to
415  * capture a crash-dump in pstore.
416  */
417 static void dpm_watchdog_handler(unsigned long data)
418 {
419         struct dpm_watchdog *wd = (void *)data;
420
421         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
422         show_stack(wd->tsk, NULL);
423         panic("%s %s: unrecoverable failure\n",
424                 dev_driver_string(wd->dev), dev_name(wd->dev));
425 }
426
427 /**
428  * dpm_watchdog_set - Enable pm watchdog for given device.
429  * @wd: Watchdog. Must be allocated on the stack.
430  * @dev: Device to handle.
431  */
432 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
433 {
434         struct timer_list *timer = &wd->timer;
435
436         wd->dev = dev;
437         wd->tsk = current;
438
439         init_timer_on_stack(timer);
440         /* use same timeout value for both suspend and resume */
441         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
442         timer->function = dpm_watchdog_handler;
443         timer->data = (unsigned long)wd;
444         add_timer(timer);
445 }
446
447 /**
448  * dpm_watchdog_clear - Disable suspend/resume watchdog.
449  * @wd: Watchdog to disable.
450  */
451 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
452 {
453         struct timer_list *timer = &wd->timer;
454
455         del_timer_sync(timer);
456         destroy_timer_on_stack(timer);
457 }
458 #else
459 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
460 #define dpm_watchdog_set(x, y)
461 #define dpm_watchdog_clear(x)
462 #endif
463
464 /*------------------------- Resume routines -------------------------*/
465
466 /**
467  * device_resume_noirq - Execute an "early resume" callback for given device.
468  * @dev: Device to handle.
469  * @state: PM transition of the system being carried out.
470  * @async: If true, the device is being resumed asynchronously.
471  *
472  * The driver of @dev will not receive interrupts while this function is being
473  * executed.
474  */
475 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
476 {
477         pm_callback_t callback = NULL;
478         char *info = NULL;
479         int error = 0;
480
481         TRACE_DEVICE(dev);
482         TRACE_RESUME(0);
483
484         if (dev->power.syscore || dev->power.direct_complete)
485                 goto Out;
486
487         if (!dev->power.is_noirq_suspended)
488                 goto Out;
489
490         dpm_wait(dev->parent, async);
491
492         if (dev->pm_domain) {
493                 info = "noirq power domain ";
494                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
495         } else if (dev->type && dev->type->pm) {
496                 info = "noirq type ";
497                 callback = pm_noirq_op(dev->type->pm, state);
498         } else if (dev->class && dev->class->pm) {
499                 info = "noirq class ";
500                 callback = pm_noirq_op(dev->class->pm, state);
501         } else if (dev->bus && dev->bus->pm) {
502                 info = "noirq bus ";
503                 callback = pm_noirq_op(dev->bus->pm, state);
504         }
505
506         if (!callback && dev->driver && dev->driver->pm) {
507                 info = "noirq driver ";
508                 callback = pm_noirq_op(dev->driver->pm, state);
509         }
510
511         error = dpm_run_callback(callback, dev, state, info);
512         dev->power.is_noirq_suspended = false;
513
514  Out:
515         complete_all(&dev->power.completion);
516         TRACE_RESUME(error);
517         return error;
518 }
519
520 static bool is_async(struct device *dev)
521 {
522         return dev->power.async_suspend && pm_async_enabled
523                 && !pm_trace_is_enabled();
524 }
525
526 static void async_resume_noirq(void *data, async_cookie_t cookie)
527 {
528         struct device *dev = (struct device *)data;
529         int error;
530
531         error = device_resume_noirq(dev, pm_transition, true);
532         if (error)
533                 pm_dev_err(dev, pm_transition, " async", error);
534
535         put_device(dev);
536 }
537
538 /**
539  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
540  * @state: PM transition of the system being carried out.
541  *
542  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
543  * enable device drivers to receive interrupts.
544  */
545 void dpm_resume_noirq(pm_message_t state)
546 {
547         struct device *dev;
548         ktime_t starttime = ktime_get();
549
550         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
551         mutex_lock(&dpm_list_mtx);
552         pm_transition = state;
553
554         /*
555          * Advanced the async threads upfront,
556          * in case the starting of async threads is
557          * delayed by non-async resuming devices.
558          */
559         list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
560                 reinit_completion(&dev->power.completion);
561                 if (is_async(dev)) {
562                         get_device(dev);
563                         async_schedule(async_resume_noirq, dev);
564                 }
565         }
566
567         while (!list_empty(&dpm_noirq_list)) {
568                 dev = to_device(dpm_noirq_list.next);
569                 get_device(dev);
570                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
571                 mutex_unlock(&dpm_list_mtx);
572
573                 if (!is_async(dev)) {
574                         int error;
575
576                         error = device_resume_noirq(dev, state, false);
577                         if (error) {
578                                 suspend_stats.failed_resume_noirq++;
579                                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
580                                 dpm_save_failed_dev(dev_name(dev));
581                                 pm_dev_err(dev, state, " noirq", error);
582                         }
583                 }
584
585                 mutex_lock(&dpm_list_mtx);
586                 put_device(dev);
587         }
588         mutex_unlock(&dpm_list_mtx);
589         async_synchronize_full();
590         dpm_show_time(starttime, state, "noirq");
591         resume_device_irqs();
592         device_wakeup_disarm_wake_irqs();
593         cpuidle_resume();
594         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
595 }
596
597 /**
598  * device_resume_early - Execute an "early resume" callback for given device.
599  * @dev: Device to handle.
600  * @state: PM transition of the system being carried out.
601  * @async: If true, the device is being resumed asynchronously.
602  *
603  * Runtime PM is disabled for @dev while this function is being executed.
604  */
605 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
606 {
607         pm_callback_t callback = NULL;
608         char *info = NULL;
609         int error = 0;
610
611         TRACE_DEVICE(dev);
612         TRACE_RESUME(0);
613
614         if (dev->power.syscore || dev->power.direct_complete)
615                 goto Out;
616
617         if (!dev->power.is_late_suspended)
618                 goto Out;
619
620         dpm_wait(dev->parent, async);
621
622         if (dev->pm_domain) {
623                 info = "early power domain ";
624                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
625         } else if (dev->type && dev->type->pm) {
626                 info = "early type ";
627                 callback = pm_late_early_op(dev->type->pm, state);
628         } else if (dev->class && dev->class->pm) {
629                 info = "early class ";
630                 callback = pm_late_early_op(dev->class->pm, state);
631         } else if (dev->bus && dev->bus->pm) {
632                 info = "early bus ";
633                 callback = pm_late_early_op(dev->bus->pm, state);
634         }
635
636         if (!callback && dev->driver && dev->driver->pm) {
637                 info = "early driver ";
638                 callback = pm_late_early_op(dev->driver->pm, state);
639         }
640
641         error = dpm_run_callback(callback, dev, state, info);
642         dev->power.is_late_suspended = false;
643
644  Out:
645         TRACE_RESUME(error);
646
647         pm_runtime_enable(dev);
648         complete_all(&dev->power.completion);
649         return error;
650 }
651
652 static void async_resume_early(void *data, async_cookie_t cookie)
653 {
654         struct device *dev = (struct device *)data;
655         int error;
656
657         error = device_resume_early(dev, pm_transition, true);
658         if (error)
659                 pm_dev_err(dev, pm_transition, " async", error);
660
661         put_device(dev);
662 }
663
664 /**
665  * dpm_resume_early - Execute "early resume" callbacks for all devices.
666  * @state: PM transition of the system being carried out.
667  */
668 void dpm_resume_early(pm_message_t state)
669 {
670         struct device *dev;
671         ktime_t starttime = ktime_get();
672
673         trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
674         mutex_lock(&dpm_list_mtx);
675         pm_transition = state;
676
677         /*
678          * Advanced the async threads upfront,
679          * in case the starting of async threads is
680          * delayed by non-async resuming devices.
681          */
682         list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
683                 reinit_completion(&dev->power.completion);
684                 if (is_async(dev)) {
685                         get_device(dev);
686                         async_schedule(async_resume_early, dev);
687                 }
688         }
689
690         while (!list_empty(&dpm_late_early_list)) {
691                 dev = to_device(dpm_late_early_list.next);
692                 get_device(dev);
693                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
694                 mutex_unlock(&dpm_list_mtx);
695
696                 if (!is_async(dev)) {
697                         int error;
698
699                         error = device_resume_early(dev, state, false);
700                         if (error) {
701                                 suspend_stats.failed_resume_early++;
702                                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
703                                 dpm_save_failed_dev(dev_name(dev));
704                                 pm_dev_err(dev, state, " early", error);
705                         }
706                 }
707                 mutex_lock(&dpm_list_mtx);
708                 put_device(dev);
709         }
710         mutex_unlock(&dpm_list_mtx);
711         async_synchronize_full();
712         dpm_show_time(starttime, state, "early");
713         trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
714 }
715
716 /**
717  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
718  * @state: PM transition of the system being carried out.
719  */
720 void dpm_resume_start(pm_message_t state)
721 {
722         dpm_resume_noirq(state);
723         dpm_resume_early(state);
724 }
725 EXPORT_SYMBOL_GPL(dpm_resume_start);
726
727 /**
728  * device_resume - Execute "resume" callbacks for given device.
729  * @dev: Device to handle.
730  * @state: PM transition of the system being carried out.
731  * @async: If true, the device is being resumed asynchronously.
732  */
733 static int device_resume(struct device *dev, pm_message_t state, bool async)
734 {
735         pm_callback_t callback = NULL;
736         char *info = NULL;
737         int error = 0;
738         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
739
740         TRACE_DEVICE(dev);
741         TRACE_RESUME(0);
742
743         if (dev->power.syscore)
744                 goto Complete;
745
746         if (dev->power.direct_complete) {
747                 /* Match the pm_runtime_disable() in __device_suspend(). */
748                 pm_runtime_enable(dev);
749                 goto Complete;
750         }
751
752         dpm_wait(dev->parent, async);
753         dpm_watchdog_set(&wd, dev);
754         device_lock(dev);
755
756         /*
757          * This is a fib.  But we'll allow new children to be added below
758          * a resumed device, even if the device hasn't been completed yet.
759          */
760         dev->power.is_prepared = false;
761
762         if (!dev->power.is_suspended)
763                 goto Unlock;
764
765         if (dev->pm_domain) {
766                 info = "power domain ";
767                 callback = pm_op(&dev->pm_domain->ops, state);
768                 goto Driver;
769         }
770
771         if (dev->type && dev->type->pm) {
772                 info = "type ";
773                 callback = pm_op(dev->type->pm, state);
774                 goto Driver;
775         }
776
777         if (dev->class) {
778                 if (dev->class->pm) {
779                         info = "class ";
780                         callback = pm_op(dev->class->pm, state);
781                         goto Driver;
782                 } else if (dev->class->resume) {
783                         info = "legacy class ";
784                         callback = dev->class->resume;
785                         goto End;
786                 }
787         }
788
789         if (dev->bus) {
790                 if (dev->bus->pm) {
791                         info = "bus ";
792                         callback = pm_op(dev->bus->pm, state);
793                 } else if (dev->bus->resume) {
794                         info = "legacy bus ";
795                         callback = dev->bus->resume;
796                         goto End;
797                 }
798         }
799
800  Driver:
801         if (!callback && dev->driver && dev->driver->pm) {
802                 info = "driver ";
803                 callback = pm_op(dev->driver->pm, state);
804         }
805
806  End:
807         error = dpm_run_callback(callback, dev, state, info);
808         dev->power.is_suspended = false;
809
810  Unlock:
811         device_unlock(dev);
812         dpm_watchdog_clear(&wd);
813
814  Complete:
815         complete_all(&dev->power.completion);
816
817         TRACE_RESUME(error);
818
819         return error;
820 }
821
822 static void async_resume(void *data, async_cookie_t cookie)
823 {
824         struct device *dev = (struct device *)data;
825         int error;
826
827         error = device_resume(dev, pm_transition, true);
828         if (error)
829                 pm_dev_err(dev, pm_transition, " async", error);
830         put_device(dev);
831 }
832
833 /**
834  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
835  * @state: PM transition of the system being carried out.
836  *
837  * Execute the appropriate "resume" callback for all devices whose status
838  * indicates that they are suspended.
839  */
840 void dpm_resume(pm_message_t state)
841 {
842         struct device *dev;
843         ktime_t starttime = ktime_get();
844
845         trace_suspend_resume(TPS("dpm_resume"), state.event, true);
846         might_sleep();
847
848         mutex_lock(&dpm_list_mtx);
849         pm_transition = state;
850         async_error = 0;
851
852         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
853                 reinit_completion(&dev->power.completion);
854                 if (is_async(dev)) {
855                         get_device(dev);
856                         async_schedule(async_resume, dev);
857                 }
858         }
859
860         while (!list_empty(&dpm_suspended_list)) {
861                 dev = to_device(dpm_suspended_list.next);
862                 get_device(dev);
863                 if (!is_async(dev)) {
864                         int error;
865
866                         mutex_unlock(&dpm_list_mtx);
867
868                         error = device_resume(dev, state, false);
869                         if (error) {
870                                 suspend_stats.failed_resume++;
871                                 dpm_save_failed_step(SUSPEND_RESUME);
872                                 dpm_save_failed_dev(dev_name(dev));
873                                 pm_dev_err(dev, state, "", error);
874                         }
875
876                         mutex_lock(&dpm_list_mtx);
877                 }
878                 if (!list_empty(&dev->power.entry))
879                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
880                 put_device(dev);
881         }
882         mutex_unlock(&dpm_list_mtx);
883         async_synchronize_full();
884         dpm_show_time(starttime, state, NULL);
885
886         cpufreq_resume();
887         trace_suspend_resume(TPS("dpm_resume"), state.event, false);
888 }
889
890 /**
891  * device_complete - Complete a PM transition for given device.
892  * @dev: Device to handle.
893  * @state: PM transition of the system being carried out.
894  */
895 static void device_complete(struct device *dev, pm_message_t state)
896 {
897         void (*callback)(struct device *) = NULL;
898         char *info = NULL;
899
900         if (dev->power.syscore)
901                 return;
902
903         device_lock(dev);
904
905         if (dev->pm_domain) {
906                 info = "completing power domain ";
907                 callback = dev->pm_domain->ops.complete;
908         } else if (dev->type && dev->type->pm) {
909                 info = "completing type ";
910                 callback = dev->type->pm->complete;
911         } else if (dev->class && dev->class->pm) {
912                 info = "completing class ";
913                 callback = dev->class->pm->complete;
914         } else if (dev->bus && dev->bus->pm) {
915                 info = "completing bus ";
916                 callback = dev->bus->pm->complete;
917         }
918
919         if (!callback && dev->driver && dev->driver->pm) {
920                 info = "completing driver ";
921                 callback = dev->driver->pm->complete;
922         }
923
924         if (callback) {
925                 pm_dev_dbg(dev, state, info);
926                 callback(dev);
927         }
928
929         device_unlock(dev);
930
931         pm_runtime_put(dev);
932 }
933
934 /**
935  * dpm_complete - Complete a PM transition for all non-sysdev devices.
936  * @state: PM transition of the system being carried out.
937  *
938  * Execute the ->complete() callbacks for all devices whose PM status is not
939  * DPM_ON (this allows new devices to be registered).
940  */
941 void dpm_complete(pm_message_t state)
942 {
943         struct list_head list;
944
945         trace_suspend_resume(TPS("dpm_complete"), state.event, true);
946         might_sleep();
947
948         INIT_LIST_HEAD(&list);
949         mutex_lock(&dpm_list_mtx);
950         while (!list_empty(&dpm_prepared_list)) {
951                 struct device *dev = to_device(dpm_prepared_list.prev);
952
953                 get_device(dev);
954                 dev->power.is_prepared = false;
955                 list_move(&dev->power.entry, &list);
956                 mutex_unlock(&dpm_list_mtx);
957
958                 trace_device_pm_callback_start(dev, "", state.event);
959                 device_complete(dev, state);
960                 trace_device_pm_callback_end(dev, 0);
961
962                 mutex_lock(&dpm_list_mtx);
963                 put_device(dev);
964         }
965         list_splice(&list, &dpm_list);
966         mutex_unlock(&dpm_list_mtx);
967         trace_suspend_resume(TPS("dpm_complete"), state.event, false);
968 }
969
970 /**
971  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
972  * @state: PM transition of the system being carried out.
973  *
974  * Execute "resume" callbacks for all devices and complete the PM transition of
975  * the system.
976  */
977 void dpm_resume_end(pm_message_t state)
978 {
979         dpm_resume(state);
980         dpm_complete(state);
981 }
982 EXPORT_SYMBOL_GPL(dpm_resume_end);
983
984
985 /*------------------------- Suspend routines -------------------------*/
986
987 /**
988  * resume_event - Return a "resume" message for given "suspend" sleep state.
989  * @sleep_state: PM message representing a sleep state.
990  *
991  * Return a PM message representing the resume event corresponding to given
992  * sleep state.
993  */
994 static pm_message_t resume_event(pm_message_t sleep_state)
995 {
996         switch (sleep_state.event) {
997         case PM_EVENT_SUSPEND:
998                 return PMSG_RESUME;
999         case PM_EVENT_FREEZE:
1000         case PM_EVENT_QUIESCE:
1001                 return PMSG_RECOVER;
1002         case PM_EVENT_HIBERNATE:
1003                 return PMSG_RESTORE;
1004         }
1005         return PMSG_ON;
1006 }
1007
1008 /**
1009  * device_suspend_noirq - Execute a "late suspend" callback for given device.
1010  * @dev: Device to handle.
1011  * @state: PM transition of the system being carried out.
1012  * @async: If true, the device is being suspended asynchronously.
1013  *
1014  * The driver of @dev will not receive interrupts while this function is being
1015  * executed.
1016  */
1017 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1018 {
1019         pm_callback_t callback = NULL;
1020         char *info = NULL;
1021         int error = 0;
1022
1023         TRACE_DEVICE(dev);
1024         TRACE_SUSPEND(0);
1025
1026         if (async_error)
1027                 goto Complete;
1028
1029         if (pm_wakeup_pending()) {
1030                 async_error = -EBUSY;
1031                 goto Complete;
1032         }
1033
1034         if (dev->power.syscore || dev->power.direct_complete)
1035                 goto Complete;
1036
1037         dpm_wait_for_children(dev, async);
1038
1039         if (dev->pm_domain) {
1040                 info = "noirq power domain ";
1041                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1042         } else if (dev->type && dev->type->pm) {
1043                 info = "noirq type ";
1044                 callback = pm_noirq_op(dev->type->pm, state);
1045         } else if (dev->class && dev->class->pm) {
1046                 info = "noirq class ";
1047                 callback = pm_noirq_op(dev->class->pm, state);
1048         } else if (dev->bus && dev->bus->pm) {
1049                 info = "noirq bus ";
1050                 callback = pm_noirq_op(dev->bus->pm, state);
1051         }
1052
1053         if (!callback && dev->driver && dev->driver->pm) {
1054                 info = "noirq driver ";
1055                 callback = pm_noirq_op(dev->driver->pm, state);
1056         }
1057
1058         error = dpm_run_callback(callback, dev, state, info);
1059         if (!error)
1060                 dev->power.is_noirq_suspended = true;
1061         else
1062                 async_error = error;
1063
1064 Complete:
1065         complete_all(&dev->power.completion);
1066         TRACE_SUSPEND(error);
1067         return error;
1068 }
1069
1070 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1071 {
1072         struct device *dev = (struct device *)data;
1073         int error;
1074
1075         error = __device_suspend_noirq(dev, pm_transition, true);
1076         if (error) {
1077                 dpm_save_failed_dev(dev_name(dev));
1078                 pm_dev_err(dev, pm_transition, " async", error);
1079         }
1080
1081         put_device(dev);
1082 }
1083
1084 static int device_suspend_noirq(struct device *dev)
1085 {
1086         reinit_completion(&dev->power.completion);
1087
1088         if (is_async(dev)) {
1089                 get_device(dev);
1090                 async_schedule(async_suspend_noirq, dev);
1091                 return 0;
1092         }
1093         return __device_suspend_noirq(dev, pm_transition, false);
1094 }
1095
1096 /**
1097  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1098  * @state: PM transition of the system being carried out.
1099  *
1100  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1101  * handlers for all non-sysdev devices.
1102  */
1103 int dpm_suspend_noirq(pm_message_t state)
1104 {
1105         ktime_t starttime = ktime_get();
1106         int error = 0;
1107
1108         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1109         cpuidle_pause();
1110         device_wakeup_arm_wake_irqs();
1111         suspend_device_irqs();
1112         mutex_lock(&dpm_list_mtx);
1113         pm_transition = state;
1114         async_error = 0;
1115
1116         while (!list_empty(&dpm_late_early_list)) {
1117                 struct device *dev = to_device(dpm_late_early_list.prev);
1118
1119                 get_device(dev);
1120                 mutex_unlock(&dpm_list_mtx);
1121
1122                 error = device_suspend_noirq(dev);
1123
1124                 mutex_lock(&dpm_list_mtx);
1125                 if (error) {
1126                         pm_dev_err(dev, state, " noirq", error);
1127                         dpm_save_failed_dev(dev_name(dev));
1128                         put_device(dev);
1129                         break;
1130                 }
1131                 if (!list_empty(&dev->power.entry))
1132                         list_move(&dev->power.entry, &dpm_noirq_list);
1133                 put_device(dev);
1134
1135                 if (async_error)
1136                         break;
1137         }
1138         mutex_unlock(&dpm_list_mtx);
1139         async_synchronize_full();
1140         if (!error)
1141                 error = async_error;
1142
1143         if (error) {
1144                 suspend_stats.failed_suspend_noirq++;
1145                 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1146                 dpm_resume_noirq(resume_event(state));
1147         } else {
1148                 dpm_show_time(starttime, state, "noirq");
1149         }
1150         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1151         return error;
1152 }
1153
1154 /**
1155  * device_suspend_late - Execute a "late suspend" callback for given device.
1156  * @dev: Device to handle.
1157  * @state: PM transition of the system being carried out.
1158  * @async: If true, the device is being suspended asynchronously.
1159  *
1160  * Runtime PM is disabled for @dev while this function is being executed.
1161  */
1162 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1163 {
1164         pm_callback_t callback = NULL;
1165         char *info = NULL;
1166         int error = 0;
1167
1168         TRACE_DEVICE(dev);
1169         TRACE_SUSPEND(0);
1170
1171         __pm_runtime_disable(dev, false);
1172
1173         if (async_error)
1174                 goto Complete;
1175
1176         if (pm_wakeup_pending()) {
1177                 async_error = -EBUSY;
1178                 goto Complete;
1179         }
1180
1181         if (dev->power.syscore || dev->power.direct_complete)
1182                 goto Complete;
1183
1184         dpm_wait_for_children(dev, async);
1185
1186         if (dev->pm_domain) {
1187                 info = "late power domain ";
1188                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1189         } else if (dev->type && dev->type->pm) {
1190                 info = "late type ";
1191                 callback = pm_late_early_op(dev->type->pm, state);
1192         } else if (dev->class && dev->class->pm) {
1193                 info = "late class ";
1194                 callback = pm_late_early_op(dev->class->pm, state);
1195         } else if (dev->bus && dev->bus->pm) {
1196                 info = "late bus ";
1197                 callback = pm_late_early_op(dev->bus->pm, state);
1198         }
1199
1200         if (!callback && dev->driver && dev->driver->pm) {
1201                 info = "late driver ";
1202                 callback = pm_late_early_op(dev->driver->pm, state);
1203         }
1204
1205         error = dpm_run_callback(callback, dev, state, info);
1206         if (!error)
1207                 dev->power.is_late_suspended = true;
1208         else
1209                 async_error = error;
1210
1211 Complete:
1212         TRACE_SUSPEND(error);
1213         complete_all(&dev->power.completion);
1214         return error;
1215 }
1216
1217 static void async_suspend_late(void *data, async_cookie_t cookie)
1218 {
1219         struct device *dev = (struct device *)data;
1220         int error;
1221
1222         error = __device_suspend_late(dev, pm_transition, true);
1223         if (error) {
1224                 dpm_save_failed_dev(dev_name(dev));
1225                 pm_dev_err(dev, pm_transition, " async", error);
1226         }
1227         put_device(dev);
1228 }
1229
1230 static int device_suspend_late(struct device *dev)
1231 {
1232         reinit_completion(&dev->power.completion);
1233
1234         if (is_async(dev)) {
1235                 get_device(dev);
1236                 async_schedule(async_suspend_late, dev);
1237                 return 0;
1238         }
1239
1240         return __device_suspend_late(dev, pm_transition, false);
1241 }
1242
1243 /**
1244  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1245  * @state: PM transition of the system being carried out.
1246  */
1247 int dpm_suspend_late(pm_message_t state)
1248 {
1249         ktime_t starttime = ktime_get();
1250         int error = 0;
1251
1252         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1253         mutex_lock(&dpm_list_mtx);
1254         pm_transition = state;
1255         async_error = 0;
1256
1257         while (!list_empty(&dpm_suspended_list)) {
1258                 struct device *dev = to_device(dpm_suspended_list.prev);
1259
1260                 get_device(dev);
1261                 mutex_unlock(&dpm_list_mtx);
1262
1263                 error = device_suspend_late(dev);
1264
1265                 mutex_lock(&dpm_list_mtx);
1266                 if (error) {
1267                         pm_dev_err(dev, state, " late", error);
1268                         dpm_save_failed_dev(dev_name(dev));
1269                         put_device(dev);
1270                         break;
1271                 }
1272                 if (!list_empty(&dev->power.entry))
1273                         list_move(&dev->power.entry, &dpm_late_early_list);
1274                 put_device(dev);
1275
1276                 if (async_error)
1277                         break;
1278         }
1279         mutex_unlock(&dpm_list_mtx);
1280         async_synchronize_full();
1281         if (!error)
1282                 error = async_error;
1283         if (error) {
1284                 suspend_stats.failed_suspend_late++;
1285                 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1286                 dpm_resume_early(resume_event(state));
1287         } else {
1288                 dpm_show_time(starttime, state, "late");
1289         }
1290         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1291         return error;
1292 }
1293
1294 /**
1295  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1296  * @state: PM transition of the system being carried out.
1297  */
1298 int dpm_suspend_end(pm_message_t state)
1299 {
1300         int error = dpm_suspend_late(state);
1301         if (error)
1302                 return error;
1303
1304         error = dpm_suspend_noirq(state);
1305         if (error) {
1306                 dpm_resume_early(resume_event(state));
1307                 return error;
1308         }
1309
1310         return 0;
1311 }
1312 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1313
1314 /**
1315  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1316  * @dev: Device to suspend.
1317  * @state: PM transition of the system being carried out.
1318  * @cb: Suspend callback to execute.
1319  * @info: string description of caller.
1320  */
1321 static int legacy_suspend(struct device *dev, pm_message_t state,
1322                           int (*cb)(struct device *dev, pm_message_t state),
1323                           char *info)
1324 {
1325         int error;
1326         ktime_t calltime;
1327
1328         calltime = initcall_debug_start(dev);
1329
1330         trace_device_pm_callback_start(dev, info, state.event);
1331         error = cb(dev, state);
1332         trace_device_pm_callback_end(dev, error);
1333         suspend_report_result(cb, error);
1334
1335         initcall_debug_report(dev, calltime, error, state, info);
1336
1337         return error;
1338 }
1339
1340 /**
1341  * device_suspend - Execute "suspend" callbacks for given device.
1342  * @dev: Device to handle.
1343  * @state: PM transition of the system being carried out.
1344  * @async: If true, the device is being suspended asynchronously.
1345  */
1346 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1347 {
1348         pm_callback_t callback = NULL;
1349         char *info = NULL;
1350         int error = 0;
1351         char suspend_abort[MAX_SUSPEND_ABORT_LEN];
1352         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1353
1354         TRACE_DEVICE(dev);
1355         TRACE_SUSPEND(0);
1356
1357         dpm_wait_for_children(dev, async);
1358
1359         if (async_error)
1360                 goto Complete;
1361
1362         /*
1363          * If a device configured to wake up the system from sleep states
1364          * has been suspended at run time and there's a resume request pending
1365          * for it, this is equivalent to the device signaling wakeup, so the
1366          * system suspend operation should be aborted.
1367          */
1368         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1369                 pm_wakeup_event(dev, 0);
1370
1371         if (pm_wakeup_pending()) {
1372                 pm_get_active_wakeup_sources(suspend_abort,
1373                         MAX_SUSPEND_ABORT_LEN);
1374                 log_suspend_abort_reason(suspend_abort);
1375                 async_error = -EBUSY;
1376                 goto Complete;
1377         }
1378
1379         if (dev->power.syscore)
1380                 goto Complete;
1381
1382         if (dev->power.direct_complete) {
1383                 if (pm_runtime_status_suspended(dev)) {
1384                         pm_runtime_disable(dev);
1385                         if (pm_runtime_status_suspended(dev))
1386                                 goto Complete;
1387
1388                         pm_runtime_enable(dev);
1389                 }
1390                 dev->power.direct_complete = false;
1391         }
1392
1393         dpm_watchdog_set(&wd, dev);
1394         device_lock(dev);
1395
1396         if (dev->pm_domain) {
1397                 info = "power domain ";
1398                 callback = pm_op(&dev->pm_domain->ops, state);
1399                 goto Run;
1400         }
1401
1402         if (dev->type && dev->type->pm) {
1403                 info = "type ";
1404                 callback = pm_op(dev->type->pm, state);
1405                 goto Run;
1406         }
1407
1408         if (dev->class) {
1409                 if (dev->class->pm) {
1410                         info = "class ";
1411                         callback = pm_op(dev->class->pm, state);
1412                         goto Run;
1413                 } else if (dev->class->suspend) {
1414                         pm_dev_dbg(dev, state, "legacy class ");
1415                         error = legacy_suspend(dev, state, dev->class->suspend,
1416                                                 "legacy class ");
1417                         goto End;
1418                 }
1419         }
1420
1421         if (dev->bus) {
1422                 if (dev->bus->pm) {
1423                         info = "bus ";
1424                         callback = pm_op(dev->bus->pm, state);
1425                 } else if (dev->bus->suspend) {
1426                         pm_dev_dbg(dev, state, "legacy bus ");
1427                         error = legacy_suspend(dev, state, dev->bus->suspend,
1428                                                 "legacy bus ");
1429                         goto End;
1430                 }
1431         }
1432
1433  Run:
1434         if (!callback && dev->driver && dev->driver->pm) {
1435                 info = "driver ";
1436                 callback = pm_op(dev->driver->pm, state);
1437         }
1438
1439         error = dpm_run_callback(callback, dev, state, info);
1440
1441  End:
1442         if (!error) {
1443                 struct device *parent = dev->parent;
1444
1445                 dev->power.is_suspended = true;
1446                 if (parent) {
1447                         spin_lock_irq(&parent->power.lock);
1448
1449                         dev->parent->power.direct_complete = false;
1450                         if (dev->power.wakeup_path
1451                             && !dev->parent->power.ignore_children)
1452                                 dev->parent->power.wakeup_path = true;
1453
1454                         spin_unlock_irq(&parent->power.lock);
1455                 }
1456         }
1457
1458         device_unlock(dev);
1459         dpm_watchdog_clear(&wd);
1460
1461  Complete:
1462         complete_all(&dev->power.completion);
1463         if (error)
1464                 async_error = error;
1465
1466         TRACE_SUSPEND(error);
1467         return error;
1468 }
1469
1470 static void async_suspend(void *data, async_cookie_t cookie)
1471 {
1472         struct device *dev = (struct device *)data;
1473         int error;
1474
1475         error = __device_suspend(dev, pm_transition, true);
1476         if (error) {
1477                 dpm_save_failed_dev(dev_name(dev));
1478                 pm_dev_err(dev, pm_transition, " async", error);
1479         }
1480
1481         put_device(dev);
1482 }
1483
1484 static int device_suspend(struct device *dev)
1485 {
1486         reinit_completion(&dev->power.completion);
1487
1488         if (is_async(dev)) {
1489                 get_device(dev);
1490                 async_schedule(async_suspend, dev);
1491                 return 0;
1492         }
1493
1494         return __device_suspend(dev, pm_transition, false);
1495 }
1496
1497 /**
1498  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1499  * @state: PM transition of the system being carried out.
1500  */
1501 int dpm_suspend(pm_message_t state)
1502 {
1503         ktime_t starttime = ktime_get();
1504         int error = 0;
1505
1506         trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1507         might_sleep();
1508
1509         cpufreq_suspend();
1510
1511         mutex_lock(&dpm_list_mtx);
1512         pm_transition = state;
1513         async_error = 0;
1514         while (!list_empty(&dpm_prepared_list)) {
1515                 struct device *dev = to_device(dpm_prepared_list.prev);
1516
1517                 get_device(dev);
1518                 mutex_unlock(&dpm_list_mtx);
1519
1520                 error = device_suspend(dev);
1521
1522                 mutex_lock(&dpm_list_mtx);
1523                 if (error) {
1524                         pm_dev_err(dev, state, "", error);
1525                         dpm_save_failed_dev(dev_name(dev));
1526                         put_device(dev);
1527                         break;
1528                 }
1529                 if (!list_empty(&dev->power.entry))
1530                         list_move(&dev->power.entry, &dpm_suspended_list);
1531                 put_device(dev);
1532                 if (async_error)
1533                         break;
1534         }
1535         mutex_unlock(&dpm_list_mtx);
1536         async_synchronize_full();
1537         if (!error)
1538                 error = async_error;
1539         if (error) {
1540                 suspend_stats.failed_suspend++;
1541                 dpm_save_failed_step(SUSPEND_SUSPEND);
1542         } else
1543                 dpm_show_time(starttime, state, NULL);
1544         trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1545         return error;
1546 }
1547
1548 /**
1549  * device_prepare - Prepare a device for system power transition.
1550  * @dev: Device to handle.
1551  * @state: PM transition of the system being carried out.
1552  *
1553  * Execute the ->prepare() callback(s) for given device.  No new children of the
1554  * device may be registered after this function has returned.
1555  */
1556 static int device_prepare(struct device *dev, pm_message_t state)
1557 {
1558         int (*callback)(struct device *) = NULL;
1559         char *info = NULL;
1560         int ret = 0;
1561
1562         if (dev->power.syscore)
1563                 return 0;
1564
1565         /*
1566          * If a device's parent goes into runtime suspend at the wrong time,
1567          * it won't be possible to resume the device.  To prevent this we
1568          * block runtime suspend here, during the prepare phase, and allow
1569          * it again during the complete phase.
1570          */
1571         pm_runtime_get_noresume(dev);
1572
1573         device_lock(dev);
1574
1575         dev->power.wakeup_path = device_may_wakeup(dev);
1576
1577         if (dev->pm_domain) {
1578                 info = "preparing power domain ";
1579                 callback = dev->pm_domain->ops.prepare;
1580         } else if (dev->type && dev->type->pm) {
1581                 info = "preparing type ";
1582                 callback = dev->type->pm->prepare;
1583         } else if (dev->class && dev->class->pm) {
1584                 info = "preparing class ";
1585                 callback = dev->class->pm->prepare;
1586         } else if (dev->bus && dev->bus->pm) {
1587                 info = "preparing bus ";
1588                 callback = dev->bus->pm->prepare;
1589         }
1590
1591         if (!callback && dev->driver && dev->driver->pm) {
1592                 info = "preparing driver ";
1593                 callback = dev->driver->pm->prepare;
1594         }
1595
1596         if (callback)
1597                 ret = callback(dev);
1598
1599         device_unlock(dev);
1600
1601         if (ret < 0) {
1602                 suspend_report_result(callback, ret);
1603                 pm_runtime_put(dev);
1604                 return ret;
1605         }
1606         /*
1607          * A positive return value from ->prepare() means "this device appears
1608          * to be runtime-suspended and its state is fine, so if it really is
1609          * runtime-suspended, you can leave it in that state provided that you
1610          * will do the same thing with all of its descendants".  This only
1611          * applies to suspend transitions, however.
1612          */
1613         spin_lock_irq(&dev->power.lock);
1614         dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1615         spin_unlock_irq(&dev->power.lock);
1616         return 0;
1617 }
1618
1619 /**
1620  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1621  * @state: PM transition of the system being carried out.
1622  *
1623  * Execute the ->prepare() callback(s) for all devices.
1624  */
1625 int dpm_prepare(pm_message_t state)
1626 {
1627         int error = 0;
1628
1629         trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1630         might_sleep();
1631
1632         mutex_lock(&dpm_list_mtx);
1633         while (!list_empty(&dpm_list)) {
1634                 struct device *dev = to_device(dpm_list.next);
1635
1636                 get_device(dev);
1637                 mutex_unlock(&dpm_list_mtx);
1638
1639                 trace_device_pm_callback_start(dev, "", state.event);
1640                 error = device_prepare(dev, state);
1641                 trace_device_pm_callback_end(dev, error);
1642
1643                 mutex_lock(&dpm_list_mtx);
1644                 if (error) {
1645                         if (error == -EAGAIN) {
1646                                 put_device(dev);
1647                                 error = 0;
1648                                 continue;
1649                         }
1650                         printk(KERN_INFO "PM: Device %s not prepared "
1651                                 "for power transition: code %d\n",
1652                                 dev_name(dev), error);
1653                         put_device(dev);
1654                         break;
1655                 }
1656                 dev->power.is_prepared = true;
1657                 if (!list_empty(&dev->power.entry))
1658                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1659                 put_device(dev);
1660         }
1661         mutex_unlock(&dpm_list_mtx);
1662         trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1663         return error;
1664 }
1665
1666 /**
1667  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1668  * @state: PM transition of the system being carried out.
1669  *
1670  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1671  * callbacks for them.
1672  */
1673 int dpm_suspend_start(pm_message_t state)
1674 {
1675         int error;
1676
1677         error = dpm_prepare(state);
1678         if (error) {
1679                 suspend_stats.failed_prepare++;
1680                 dpm_save_failed_step(SUSPEND_PREPARE);
1681         } else
1682                 error = dpm_suspend(state);
1683         return error;
1684 }
1685 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1686
1687 void __suspend_report_result(const char *function, void *fn, int ret)
1688 {
1689         if (ret)
1690                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1691 }
1692 EXPORT_SYMBOL_GPL(__suspend_report_result);
1693
1694 /**
1695  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1696  * @dev: Device to wait for.
1697  * @subordinate: Device that needs to wait for @dev.
1698  */
1699 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1700 {
1701         dpm_wait(dev, subordinate->power.async_suspend);
1702         return async_error;
1703 }
1704 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1705
1706 /**
1707  * dpm_for_each_dev - device iterator.
1708  * @data: data for the callback.
1709  * @fn: function to be called for each device.
1710  *
1711  * Iterate over devices in dpm_list, and call @fn for each device,
1712  * passing it @data.
1713  */
1714 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1715 {
1716         struct device *dev;
1717
1718         if (!fn)
1719                 return;
1720
1721         device_pm_lock();
1722         list_for_each_entry(dev, &dpm_list, power.entry)
1723                 fn(dev, data);
1724         device_pm_unlock();
1725 }
1726 EXPORT_SYMBOL_GPL(dpm_for_each_dev);