982f586e9290b5acd65c713b9a5742e785bfd863
[firefly-linux-kernel-4.4.55.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/timer.h>
36 #include <linux/wakeup_reason.h>
37
38 #include "../base.h"
39 #include "power.h"
40
41 typedef int (*pm_callback_t)(struct device *);
42
43 /*
44  * The entries in the dpm_list list are in a depth first order, simply
45  * because children are guaranteed to be discovered after parents, and
46  * are inserted at the back of the list on discovery.
47  *
48  * Since device_pm_add() may be called with a device lock held,
49  * we must never try to acquire a device lock while holding
50  * dpm_list_mutex.
51  */
52
53 LIST_HEAD(dpm_list);
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
58
59 struct suspend_stats suspend_stats;
60 static DEFINE_MUTEX(dpm_list_mtx);
61 static pm_message_t pm_transition;
62
63 static int async_error;
64
65 static char *pm_verb(int event)
66 {
67         switch (event) {
68         case PM_EVENT_SUSPEND:
69                 return "suspend";
70         case PM_EVENT_RESUME:
71                 return "resume";
72         case PM_EVENT_FREEZE:
73                 return "freeze";
74         case PM_EVENT_QUIESCE:
75                 return "quiesce";
76         case PM_EVENT_HIBERNATE:
77                 return "hibernate";
78         case PM_EVENT_THAW:
79                 return "thaw";
80         case PM_EVENT_RESTORE:
81                 return "restore";
82         case PM_EVENT_RECOVER:
83                 return "recover";
84         default:
85                 return "(unknown PM event)";
86         }
87 }
88
89 /**
90  * device_pm_sleep_init - Initialize system suspend-related device fields.
91  * @dev: Device object being initialized.
92  */
93 void device_pm_sleep_init(struct device *dev)
94 {
95         dev->power.is_prepared = false;
96         dev->power.is_suspended = false;
97         dev->power.is_noirq_suspended = false;
98         dev->power.is_late_suspended = false;
99         init_completion(&dev->power.completion);
100         complete_all(&dev->power.completion);
101         dev->power.wakeup = NULL;
102         INIT_LIST_HEAD(&dev->power.entry);
103 }
104
105 /**
106  * device_pm_lock - Lock the list of active devices used by the PM core.
107  */
108 void device_pm_lock(void)
109 {
110         mutex_lock(&dpm_list_mtx);
111 }
112
113 /**
114  * device_pm_unlock - Unlock the list of active devices used by the PM core.
115  */
116 void device_pm_unlock(void)
117 {
118         mutex_unlock(&dpm_list_mtx);
119 }
120
121 /**
122  * device_pm_add - Add a device to the PM core's list of active devices.
123  * @dev: Device to add to the list.
124  */
125 void device_pm_add(struct device *dev)
126 {
127         pr_debug("PM: Adding info for %s:%s\n",
128                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
129         device_pm_check_callbacks(dev);
130         mutex_lock(&dpm_list_mtx);
131         if (dev->parent && dev->parent->power.is_prepared)
132                 dev_warn(dev, "parent %s should not be sleeping\n",
133                         dev_name(dev->parent));
134         list_add_tail(&dev->power.entry, &dpm_list);
135         mutex_unlock(&dpm_list_mtx);
136 }
137
138 /**
139  * device_pm_remove - Remove a device from the PM core's list of active devices.
140  * @dev: Device to be removed from the list.
141  */
142 void device_pm_remove(struct device *dev)
143 {
144         pr_debug("PM: Removing info for %s:%s\n",
145                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
146         complete_all(&dev->power.completion);
147         mutex_lock(&dpm_list_mtx);
148         list_del_init(&dev->power.entry);
149         mutex_unlock(&dpm_list_mtx);
150         device_wakeup_disable(dev);
151         pm_runtime_remove(dev);
152         device_pm_check_callbacks(dev);
153 }
154
155 /**
156  * device_pm_move_before - Move device in the PM core's list of active devices.
157  * @deva: Device to move in dpm_list.
158  * @devb: Device @deva should come before.
159  */
160 void device_pm_move_before(struct device *deva, struct device *devb)
161 {
162         pr_debug("PM: Moving %s:%s before %s:%s\n",
163                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
164                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
165         /* Delete deva from dpm_list and reinsert before devb. */
166         list_move_tail(&deva->power.entry, &devb->power.entry);
167 }
168
169 /**
170  * device_pm_move_after - Move device in the PM core's list of active devices.
171  * @deva: Device to move in dpm_list.
172  * @devb: Device @deva should come after.
173  */
174 void device_pm_move_after(struct device *deva, struct device *devb)
175 {
176         pr_debug("PM: Moving %s:%s after %s:%s\n",
177                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
178                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
179         /* Delete deva from dpm_list and reinsert after devb. */
180         list_move(&deva->power.entry, &devb->power.entry);
181 }
182
183 /**
184  * device_pm_move_last - Move device to end of the PM core's list of devices.
185  * @dev: Device to move in dpm_list.
186  */
187 void device_pm_move_last(struct device *dev)
188 {
189         pr_debug("PM: Moving %s:%s to end of list\n",
190                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
191         list_move_tail(&dev->power.entry, &dpm_list);
192 }
193
194 static ktime_t initcall_debug_start(struct device *dev, void *cb)
195 {
196         ktime_t calltime = ktime_set(0, 0);
197
198         if (pm_print_times_enabled) {
199                 pr_info("calling  %s+ @ %i, parent: %s, cb: %pf\n",
200                         dev_name(dev), task_pid_nr(current),
201                         dev->parent ? dev_name(dev->parent) : "none", cb);
202                 calltime = ktime_get();
203         }
204
205         return calltime;
206 }
207
208 static void initcall_debug_report(struct device *dev, ktime_t calltime,
209                                   int error, pm_message_t state, char *info)
210 {
211         ktime_t rettime;
212         s64 nsecs;
213
214         rettime = ktime_get();
215         nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
216
217         if (pm_print_times_enabled) {
218                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
219                         error, (unsigned long long)nsecs >> 10);
220         }
221 }
222
223 /**
224  * dpm_wait - Wait for a PM operation to complete.
225  * @dev: Device to wait for.
226  * @async: If unset, wait only if the device's power.async_suspend flag is set.
227  */
228 static void dpm_wait(struct device *dev, bool async)
229 {
230         if (!dev)
231                 return;
232
233         if (async || (pm_async_enabled && dev->power.async_suspend))
234                 wait_for_completion(&dev->power.completion);
235 }
236
237 static int dpm_wait_fn(struct device *dev, void *async_ptr)
238 {
239         dpm_wait(dev, *((bool *)async_ptr));
240         return 0;
241 }
242
243 static void dpm_wait_for_children(struct device *dev, bool async)
244 {
245        device_for_each_child(dev, &async, dpm_wait_fn);
246 }
247
248 /**
249  * pm_op - Return the PM operation appropriate for given PM event.
250  * @ops: PM operations to choose from.
251  * @state: PM transition of the system being carried out.
252  */
253 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
254 {
255         switch (state.event) {
256 #ifdef CONFIG_SUSPEND
257         case PM_EVENT_SUSPEND:
258                 return ops->suspend;
259         case PM_EVENT_RESUME:
260                 return ops->resume;
261 #endif /* CONFIG_SUSPEND */
262 #ifdef CONFIG_HIBERNATE_CALLBACKS
263         case PM_EVENT_FREEZE:
264         case PM_EVENT_QUIESCE:
265                 return ops->freeze;
266         case PM_EVENT_HIBERNATE:
267                 return ops->poweroff;
268         case PM_EVENT_THAW:
269         case PM_EVENT_RECOVER:
270                 return ops->thaw;
271                 break;
272         case PM_EVENT_RESTORE:
273                 return ops->restore;
274 #endif /* CONFIG_HIBERNATE_CALLBACKS */
275         }
276
277         return NULL;
278 }
279
280 /**
281  * pm_late_early_op - Return the PM operation appropriate for given PM event.
282  * @ops: PM operations to choose from.
283  * @state: PM transition of the system being carried out.
284  *
285  * Runtime PM is disabled for @dev while this function is being executed.
286  */
287 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
288                                       pm_message_t state)
289 {
290         switch (state.event) {
291 #ifdef CONFIG_SUSPEND
292         case PM_EVENT_SUSPEND:
293                 return ops->suspend_late;
294         case PM_EVENT_RESUME:
295                 return ops->resume_early;
296 #endif /* CONFIG_SUSPEND */
297 #ifdef CONFIG_HIBERNATE_CALLBACKS
298         case PM_EVENT_FREEZE:
299         case PM_EVENT_QUIESCE:
300                 return ops->freeze_late;
301         case PM_EVENT_HIBERNATE:
302                 return ops->poweroff_late;
303         case PM_EVENT_THAW:
304         case PM_EVENT_RECOVER:
305                 return ops->thaw_early;
306         case PM_EVENT_RESTORE:
307                 return ops->restore_early;
308 #endif /* CONFIG_HIBERNATE_CALLBACKS */
309         }
310
311         return NULL;
312 }
313
314 /**
315  * pm_noirq_op - Return the PM operation appropriate for given PM event.
316  * @ops: PM operations to choose from.
317  * @state: PM transition of the system being carried out.
318  *
319  * The driver of @dev will not receive interrupts while this function is being
320  * executed.
321  */
322 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
323 {
324         switch (state.event) {
325 #ifdef CONFIG_SUSPEND
326         case PM_EVENT_SUSPEND:
327                 return ops->suspend_noirq;
328         case PM_EVENT_RESUME:
329                 return ops->resume_noirq;
330 #endif /* CONFIG_SUSPEND */
331 #ifdef CONFIG_HIBERNATE_CALLBACKS
332         case PM_EVENT_FREEZE:
333         case PM_EVENT_QUIESCE:
334                 return ops->freeze_noirq;
335         case PM_EVENT_HIBERNATE:
336                 return ops->poweroff_noirq;
337         case PM_EVENT_THAW:
338         case PM_EVENT_RECOVER:
339                 return ops->thaw_noirq;
340         case PM_EVENT_RESTORE:
341                 return ops->restore_noirq;
342 #endif /* CONFIG_HIBERNATE_CALLBACKS */
343         }
344
345         return NULL;
346 }
347
348 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
349 {
350         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
351                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
352                 ", may wakeup" : "");
353 }
354
355 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
356                         int error)
357 {
358         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
359                 dev_name(dev), pm_verb(state.event), info, error);
360 }
361
362 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
363 {
364         ktime_t calltime;
365         u64 usecs64;
366         int usecs;
367
368         calltime = ktime_get();
369         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
370         do_div(usecs64, NSEC_PER_USEC);
371         usecs = usecs64;
372         if (usecs == 0)
373                 usecs = 1;
374         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
375                 info ?: "", info ? " " : "", pm_verb(state.event),
376                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
377 }
378
379 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
380                             pm_message_t state, char *info)
381 {
382         ktime_t calltime;
383         int error;
384
385         if (!cb)
386                 return 0;
387
388         calltime = initcall_debug_start(dev, cb);
389
390         pm_dev_dbg(dev, state, info);
391         trace_device_pm_callback_start(dev, info, state.event);
392         error = cb(dev);
393         trace_device_pm_callback_end(dev, error);
394         suspend_report_result(cb, error);
395
396         initcall_debug_report(dev, calltime, error, state, info);
397
398         return error;
399 }
400
401 #ifdef CONFIG_DPM_WATCHDOG
402 struct dpm_watchdog {
403         struct device           *dev;
404         struct task_struct      *tsk;
405         struct timer_list       timer;
406 };
407
408 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
409         struct dpm_watchdog wd
410
411 /**
412  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
413  * @data: Watchdog object address.
414  *
415  * Called when a driver has timed out suspending or resuming.
416  * There's not much we can do here to recover so panic() to
417  * capture a crash-dump in pstore.
418  */
419 static void dpm_watchdog_handler(unsigned long data)
420 {
421         struct dpm_watchdog *wd = (void *)data;
422
423         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
424         show_stack(wd->tsk, NULL);
425         panic("%s %s: unrecoverable failure\n",
426                 dev_driver_string(wd->dev), dev_name(wd->dev));
427 }
428
429 /**
430  * dpm_watchdog_set - Enable pm watchdog for given device.
431  * @wd: Watchdog. Must be allocated on the stack.
432  * @dev: Device to handle.
433  */
434 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
435 {
436         struct timer_list *timer = &wd->timer;
437
438         wd->dev = dev;
439         wd->tsk = current;
440
441         init_timer_on_stack(timer);
442         /* use same timeout value for both suspend and resume */
443         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
444         timer->function = dpm_watchdog_handler;
445         timer->data = (unsigned long)wd;
446         add_timer(timer);
447 }
448
449 /**
450  * dpm_watchdog_clear - Disable suspend/resume watchdog.
451  * @wd: Watchdog to disable.
452  */
453 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
454 {
455         struct timer_list *timer = &wd->timer;
456
457         del_timer_sync(timer);
458         destroy_timer_on_stack(timer);
459 }
460 #else
461 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
462 #define dpm_watchdog_set(x, y)
463 #define dpm_watchdog_clear(x)
464 #endif
465
466 /*------------------------- Resume routines -------------------------*/
467
468 /**
469  * device_resume_noirq - Execute an "early resume" callback for given device.
470  * @dev: Device to handle.
471  * @state: PM transition of the system being carried out.
472  * @async: If true, the device is being resumed asynchronously.
473  *
474  * The driver of @dev will not receive interrupts while this function is being
475  * executed.
476  */
477 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
478 {
479         pm_callback_t callback = NULL;
480         char *info = NULL;
481         int error = 0;
482
483         TRACE_DEVICE(dev);
484         TRACE_RESUME(0);
485
486         if (dev->power.syscore || dev->power.direct_complete)
487                 goto Out;
488
489         if (!dev->power.is_noirq_suspended)
490                 goto Out;
491
492         dpm_wait(dev->parent, async);
493
494         if (dev->pm_domain) {
495                 info = "noirq power domain ";
496                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
497         } else if (dev->type && dev->type->pm) {
498                 info = "noirq type ";
499                 callback = pm_noirq_op(dev->type->pm, state);
500         } else if (dev->class && dev->class->pm) {
501                 info = "noirq class ";
502                 callback = pm_noirq_op(dev->class->pm, state);
503         } else if (dev->bus && dev->bus->pm) {
504                 info = "noirq bus ";
505                 callback = pm_noirq_op(dev->bus->pm, state);
506         }
507
508         if (!callback && dev->driver && dev->driver->pm) {
509                 info = "noirq driver ";
510                 callback = pm_noirq_op(dev->driver->pm, state);
511         }
512
513         error = dpm_run_callback(callback, dev, state, info);
514         dev->power.is_noirq_suspended = false;
515
516  Out:
517         complete_all(&dev->power.completion);
518         TRACE_RESUME(error);
519         return error;
520 }
521
522 static bool is_async(struct device *dev)
523 {
524         return dev->power.async_suspend && pm_async_enabled
525                 && !pm_trace_is_enabled();
526 }
527
528 static void async_resume_noirq(void *data, async_cookie_t cookie)
529 {
530         struct device *dev = (struct device *)data;
531         int error;
532
533         error = device_resume_noirq(dev, pm_transition, true);
534         if (error)
535                 pm_dev_err(dev, pm_transition, " async", error);
536
537         put_device(dev);
538 }
539
540 /**
541  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
542  * @state: PM transition of the system being carried out.
543  *
544  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
545  * enable device drivers to receive interrupts.
546  */
547 void dpm_resume_noirq(pm_message_t state)
548 {
549         struct device *dev;
550         ktime_t starttime = ktime_get();
551
552         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
553         mutex_lock(&dpm_list_mtx);
554         pm_transition = state;
555
556         /*
557          * Advanced the async threads upfront,
558          * in case the starting of async threads is
559          * delayed by non-async resuming devices.
560          */
561         list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
562                 reinit_completion(&dev->power.completion);
563                 if (is_async(dev)) {
564                         get_device(dev);
565                         async_schedule(async_resume_noirq, dev);
566                 }
567         }
568
569         while (!list_empty(&dpm_noirq_list)) {
570                 dev = to_device(dpm_noirq_list.next);
571                 get_device(dev);
572                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
573                 mutex_unlock(&dpm_list_mtx);
574
575                 if (!is_async(dev)) {
576                         int error;
577
578                         error = device_resume_noirq(dev, state, false);
579                         if (error) {
580                                 suspend_stats.failed_resume_noirq++;
581                                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
582                                 dpm_save_failed_dev(dev_name(dev));
583                                 pm_dev_err(dev, state, " noirq", error);
584                         }
585                 }
586
587                 mutex_lock(&dpm_list_mtx);
588                 put_device(dev);
589         }
590         mutex_unlock(&dpm_list_mtx);
591         async_synchronize_full();
592         dpm_show_time(starttime, state, "noirq");
593         resume_device_irqs();
594         device_wakeup_disarm_wake_irqs();
595         cpuidle_resume();
596         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
597 }
598
599 /**
600  * device_resume_early - Execute an "early resume" callback for given device.
601  * @dev: Device to handle.
602  * @state: PM transition of the system being carried out.
603  * @async: If true, the device is being resumed asynchronously.
604  *
605  * Runtime PM is disabled for @dev while this function is being executed.
606  */
607 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
608 {
609         pm_callback_t callback = NULL;
610         char *info = NULL;
611         int error = 0;
612
613         TRACE_DEVICE(dev);
614         TRACE_RESUME(0);
615
616         if (dev->power.syscore || dev->power.direct_complete)
617                 goto Out;
618
619         if (!dev->power.is_late_suspended)
620                 goto Out;
621
622         dpm_wait(dev->parent, async);
623
624         if (dev->pm_domain) {
625                 info = "early power domain ";
626                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
627         } else if (dev->type && dev->type->pm) {
628                 info = "early type ";
629                 callback = pm_late_early_op(dev->type->pm, state);
630         } else if (dev->class && dev->class->pm) {
631                 info = "early class ";
632                 callback = pm_late_early_op(dev->class->pm, state);
633         } else if (dev->bus && dev->bus->pm) {
634                 info = "early bus ";
635                 callback = pm_late_early_op(dev->bus->pm, state);
636         }
637
638         if (!callback && dev->driver && dev->driver->pm) {
639                 info = "early driver ";
640                 callback = pm_late_early_op(dev->driver->pm, state);
641         }
642
643         error = dpm_run_callback(callback, dev, state, info);
644         dev->power.is_late_suspended = false;
645
646  Out:
647         TRACE_RESUME(error);
648
649         pm_runtime_enable(dev);
650         complete_all(&dev->power.completion);
651         return error;
652 }
653
654 static void async_resume_early(void *data, async_cookie_t cookie)
655 {
656         struct device *dev = (struct device *)data;
657         int error;
658
659         error = device_resume_early(dev, pm_transition, true);
660         if (error)
661                 pm_dev_err(dev, pm_transition, " async", error);
662
663         put_device(dev);
664 }
665
666 /**
667  * dpm_resume_early - Execute "early resume" callbacks for all devices.
668  * @state: PM transition of the system being carried out.
669  */
670 void dpm_resume_early(pm_message_t state)
671 {
672         struct device *dev;
673         ktime_t starttime = ktime_get();
674
675         trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
676         mutex_lock(&dpm_list_mtx);
677         pm_transition = state;
678
679         /*
680          * Advanced the async threads upfront,
681          * in case the starting of async threads is
682          * delayed by non-async resuming devices.
683          */
684         list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
685                 reinit_completion(&dev->power.completion);
686                 if (is_async(dev)) {
687                         get_device(dev);
688                         async_schedule(async_resume_early, dev);
689                 }
690         }
691
692         while (!list_empty(&dpm_late_early_list)) {
693                 dev = to_device(dpm_late_early_list.next);
694                 get_device(dev);
695                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
696                 mutex_unlock(&dpm_list_mtx);
697
698                 if (!is_async(dev)) {
699                         int error;
700
701                         error = device_resume_early(dev, state, false);
702                         if (error) {
703                                 suspend_stats.failed_resume_early++;
704                                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
705                                 dpm_save_failed_dev(dev_name(dev));
706                                 pm_dev_err(dev, state, " early", error);
707                         }
708                 }
709                 mutex_lock(&dpm_list_mtx);
710                 put_device(dev);
711         }
712         mutex_unlock(&dpm_list_mtx);
713         async_synchronize_full();
714         dpm_show_time(starttime, state, "early");
715         trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
716 }
717
718 /**
719  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
720  * @state: PM transition of the system being carried out.
721  */
722 void dpm_resume_start(pm_message_t state)
723 {
724         dpm_resume_noirq(state);
725         dpm_resume_early(state);
726 }
727 EXPORT_SYMBOL_GPL(dpm_resume_start);
728
729 /**
730  * device_resume - Execute "resume" callbacks for given device.
731  * @dev: Device to handle.
732  * @state: PM transition of the system being carried out.
733  * @async: If true, the device is being resumed asynchronously.
734  */
735 static int device_resume(struct device *dev, pm_message_t state, bool async)
736 {
737         pm_callback_t callback = NULL;
738         char *info = NULL;
739         int error = 0;
740         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
741
742         TRACE_DEVICE(dev);
743         TRACE_RESUME(0);
744
745         if (dev->power.syscore)
746                 goto Complete;
747
748         if (dev->power.direct_complete) {
749                 /* Match the pm_runtime_disable() in __device_suspend(). */
750                 pm_runtime_enable(dev);
751                 goto Complete;
752         }
753
754         dpm_wait(dev->parent, async);
755         dpm_watchdog_set(&wd, dev);
756         device_lock(dev);
757
758         /*
759          * This is a fib.  But we'll allow new children to be added below
760          * a resumed device, even if the device hasn't been completed yet.
761          */
762         dev->power.is_prepared = false;
763
764         if (!dev->power.is_suspended)
765                 goto Unlock;
766
767         if (dev->pm_domain) {
768                 info = "power domain ";
769                 callback = pm_op(&dev->pm_domain->ops, state);
770                 goto Driver;
771         }
772
773         if (dev->type && dev->type->pm) {
774                 info = "type ";
775                 callback = pm_op(dev->type->pm, state);
776                 goto Driver;
777         }
778
779         if (dev->class) {
780                 if (dev->class->pm) {
781                         info = "class ";
782                         callback = pm_op(dev->class->pm, state);
783                         goto Driver;
784                 } else if (dev->class->resume) {
785                         info = "legacy class ";
786                         callback = dev->class->resume;
787                         goto End;
788                 }
789         }
790
791         if (dev->bus) {
792                 if (dev->bus->pm) {
793                         info = "bus ";
794                         callback = pm_op(dev->bus->pm, state);
795                 } else if (dev->bus->resume) {
796                         info = "legacy bus ";
797                         callback = dev->bus->resume;
798                         goto End;
799                 }
800         }
801
802  Driver:
803         if (!callback && dev->driver && dev->driver->pm) {
804                 info = "driver ";
805                 callback = pm_op(dev->driver->pm, state);
806         }
807
808  End:
809         error = dpm_run_callback(callback, dev, state, info);
810         dev->power.is_suspended = false;
811
812  Unlock:
813         device_unlock(dev);
814         dpm_watchdog_clear(&wd);
815
816  Complete:
817         complete_all(&dev->power.completion);
818
819         TRACE_RESUME(error);
820
821         return error;
822 }
823
824 static void async_resume(void *data, async_cookie_t cookie)
825 {
826         struct device *dev = (struct device *)data;
827         int error;
828
829         error = device_resume(dev, pm_transition, true);
830         if (error)
831                 pm_dev_err(dev, pm_transition, " async", error);
832         put_device(dev);
833 }
834
835 /**
836  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
837  * @state: PM transition of the system being carried out.
838  *
839  * Execute the appropriate "resume" callback for all devices whose status
840  * indicates that they are suspended.
841  */
842 void dpm_resume(pm_message_t state)
843 {
844         struct device *dev;
845         ktime_t starttime = ktime_get();
846
847         trace_suspend_resume(TPS("dpm_resume"), state.event, true);
848         might_sleep();
849
850         mutex_lock(&dpm_list_mtx);
851         pm_transition = state;
852         async_error = 0;
853
854         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
855                 reinit_completion(&dev->power.completion);
856                 if (is_async(dev)) {
857                         get_device(dev);
858                         async_schedule(async_resume, dev);
859                 }
860         }
861
862         while (!list_empty(&dpm_suspended_list)) {
863                 dev = to_device(dpm_suspended_list.next);
864                 get_device(dev);
865                 if (!is_async(dev)) {
866                         int error;
867
868                         mutex_unlock(&dpm_list_mtx);
869
870                         error = device_resume(dev, state, false);
871                         if (error) {
872                                 suspend_stats.failed_resume++;
873                                 dpm_save_failed_step(SUSPEND_RESUME);
874                                 dpm_save_failed_dev(dev_name(dev));
875                                 pm_dev_err(dev, state, "", error);
876                         }
877
878                         mutex_lock(&dpm_list_mtx);
879                 }
880                 if (!list_empty(&dev->power.entry))
881                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
882                 put_device(dev);
883         }
884         mutex_unlock(&dpm_list_mtx);
885         async_synchronize_full();
886         dpm_show_time(starttime, state, NULL);
887
888         cpufreq_resume();
889         trace_suspend_resume(TPS("dpm_resume"), state.event, false);
890 }
891
892 /**
893  * device_complete - Complete a PM transition for given device.
894  * @dev: Device to handle.
895  * @state: PM transition of the system being carried out.
896  */
897 static void device_complete(struct device *dev, pm_message_t state)
898 {
899         void (*callback)(struct device *) = NULL;
900         char *info = NULL;
901
902         if (dev->power.syscore)
903                 return;
904
905         device_lock(dev);
906
907         if (dev->pm_domain) {
908                 info = "completing power domain ";
909                 callback = dev->pm_domain->ops.complete;
910         } else if (dev->type && dev->type->pm) {
911                 info = "completing type ";
912                 callback = dev->type->pm->complete;
913         } else if (dev->class && dev->class->pm) {
914                 info = "completing class ";
915                 callback = dev->class->pm->complete;
916         } else if (dev->bus && dev->bus->pm) {
917                 info = "completing bus ";
918                 callback = dev->bus->pm->complete;
919         }
920
921         if (!callback && dev->driver && dev->driver->pm) {
922                 info = "completing driver ";
923                 callback = dev->driver->pm->complete;
924         }
925
926         if (callback) {
927                 pm_dev_dbg(dev, state, info);
928                 callback(dev);
929         }
930
931         device_unlock(dev);
932
933         pm_runtime_put(dev);
934 }
935
936 /**
937  * dpm_complete - Complete a PM transition for all non-sysdev devices.
938  * @state: PM transition of the system being carried out.
939  *
940  * Execute the ->complete() callbacks for all devices whose PM status is not
941  * DPM_ON (this allows new devices to be registered).
942  */
943 void dpm_complete(pm_message_t state)
944 {
945         struct list_head list;
946
947         trace_suspend_resume(TPS("dpm_complete"), state.event, true);
948         might_sleep();
949
950         INIT_LIST_HEAD(&list);
951         mutex_lock(&dpm_list_mtx);
952         while (!list_empty(&dpm_prepared_list)) {
953                 struct device *dev = to_device(dpm_prepared_list.prev);
954
955                 get_device(dev);
956                 dev->power.is_prepared = false;
957                 list_move(&dev->power.entry, &list);
958                 mutex_unlock(&dpm_list_mtx);
959
960                 trace_device_pm_callback_start(dev, "", state.event);
961                 device_complete(dev, state);
962                 trace_device_pm_callback_end(dev, 0);
963
964                 mutex_lock(&dpm_list_mtx);
965                 put_device(dev);
966         }
967         list_splice(&list, &dpm_list);
968         mutex_unlock(&dpm_list_mtx);
969         trace_suspend_resume(TPS("dpm_complete"), state.event, false);
970 }
971
972 /**
973  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
974  * @state: PM transition of the system being carried out.
975  *
976  * Execute "resume" callbacks for all devices and complete the PM transition of
977  * the system.
978  */
979 void dpm_resume_end(pm_message_t state)
980 {
981         dpm_resume(state);
982         dpm_complete(state);
983 }
984 EXPORT_SYMBOL_GPL(dpm_resume_end);
985
986
987 /*------------------------- Suspend routines -------------------------*/
988
989 /**
990  * resume_event - Return a "resume" message for given "suspend" sleep state.
991  * @sleep_state: PM message representing a sleep state.
992  *
993  * Return a PM message representing the resume event corresponding to given
994  * sleep state.
995  */
996 static pm_message_t resume_event(pm_message_t sleep_state)
997 {
998         switch (sleep_state.event) {
999         case PM_EVENT_SUSPEND:
1000                 return PMSG_RESUME;
1001         case PM_EVENT_FREEZE:
1002         case PM_EVENT_QUIESCE:
1003                 return PMSG_RECOVER;
1004         case PM_EVENT_HIBERNATE:
1005                 return PMSG_RESTORE;
1006         }
1007         return PMSG_ON;
1008 }
1009
1010 /**
1011  * device_suspend_noirq - Execute a "late suspend" callback for given device.
1012  * @dev: Device to handle.
1013  * @state: PM transition of the system being carried out.
1014  * @async: If true, the device is being suspended asynchronously.
1015  *
1016  * The driver of @dev will not receive interrupts while this function is being
1017  * executed.
1018  */
1019 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1020 {
1021         pm_callback_t callback = NULL;
1022         char *info = NULL;
1023         int error = 0;
1024
1025         TRACE_DEVICE(dev);
1026         TRACE_SUSPEND(0);
1027
1028         if (async_error)
1029                 goto Complete;
1030
1031         if (pm_wakeup_pending()) {
1032                 async_error = -EBUSY;
1033                 goto Complete;
1034         }
1035
1036         if (dev->power.syscore || dev->power.direct_complete)
1037                 goto Complete;
1038
1039         dpm_wait_for_children(dev, async);
1040
1041         if (async_error)
1042                 goto Complete;
1043
1044         if (dev->pm_domain) {
1045                 info = "noirq power domain ";
1046                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1047         } else if (dev->type && dev->type->pm) {
1048                 info = "noirq type ";
1049                 callback = pm_noirq_op(dev->type->pm, state);
1050         } else if (dev->class && dev->class->pm) {
1051                 info = "noirq class ";
1052                 callback = pm_noirq_op(dev->class->pm, state);
1053         } else if (dev->bus && dev->bus->pm) {
1054                 info = "noirq bus ";
1055                 callback = pm_noirq_op(dev->bus->pm, state);
1056         }
1057
1058         if (!callback && dev->driver && dev->driver->pm) {
1059                 info = "noirq driver ";
1060                 callback = pm_noirq_op(dev->driver->pm, state);
1061         }
1062
1063         error = dpm_run_callback(callback, dev, state, info);
1064         if (!error)
1065                 dev->power.is_noirq_suspended = true;
1066         else
1067                 async_error = error;
1068
1069 Complete:
1070         complete_all(&dev->power.completion);
1071         TRACE_SUSPEND(error);
1072         return error;
1073 }
1074
1075 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1076 {
1077         struct device *dev = (struct device *)data;
1078         int error;
1079
1080         error = __device_suspend_noirq(dev, pm_transition, true);
1081         if (error) {
1082                 dpm_save_failed_dev(dev_name(dev));
1083                 pm_dev_err(dev, pm_transition, " async", error);
1084         }
1085
1086         put_device(dev);
1087 }
1088
1089 static int device_suspend_noirq(struct device *dev)
1090 {
1091         reinit_completion(&dev->power.completion);
1092
1093         if (is_async(dev)) {
1094                 get_device(dev);
1095                 async_schedule(async_suspend_noirq, dev);
1096                 return 0;
1097         }
1098         return __device_suspend_noirq(dev, pm_transition, false);
1099 }
1100
1101 /**
1102  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1103  * @state: PM transition of the system being carried out.
1104  *
1105  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1106  * handlers for all non-sysdev devices.
1107  */
1108 int dpm_suspend_noirq(pm_message_t state)
1109 {
1110         ktime_t starttime = ktime_get();
1111         int error = 0;
1112
1113         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1114         cpuidle_pause();
1115         device_wakeup_arm_wake_irqs();
1116         suspend_device_irqs();
1117         mutex_lock(&dpm_list_mtx);
1118         pm_transition = state;
1119         async_error = 0;
1120
1121         while (!list_empty(&dpm_late_early_list)) {
1122                 struct device *dev = to_device(dpm_late_early_list.prev);
1123
1124                 get_device(dev);
1125                 mutex_unlock(&dpm_list_mtx);
1126
1127                 error = device_suspend_noirq(dev);
1128
1129                 mutex_lock(&dpm_list_mtx);
1130                 if (error) {
1131                         pm_dev_err(dev, state, " noirq", error);
1132                         dpm_save_failed_dev(dev_name(dev));
1133                         put_device(dev);
1134                         break;
1135                 }
1136                 if (!list_empty(&dev->power.entry))
1137                         list_move(&dev->power.entry, &dpm_noirq_list);
1138                 put_device(dev);
1139
1140                 if (async_error)
1141                         break;
1142         }
1143         mutex_unlock(&dpm_list_mtx);
1144         async_synchronize_full();
1145         if (!error)
1146                 error = async_error;
1147
1148         if (error) {
1149                 suspend_stats.failed_suspend_noirq++;
1150                 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1151                 dpm_resume_noirq(resume_event(state));
1152         } else {
1153                 dpm_show_time(starttime, state, "noirq");
1154         }
1155         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1156         return error;
1157 }
1158
1159 /**
1160  * device_suspend_late - Execute a "late suspend" callback for given device.
1161  * @dev: Device to handle.
1162  * @state: PM transition of the system being carried out.
1163  * @async: If true, the device is being suspended asynchronously.
1164  *
1165  * Runtime PM is disabled for @dev while this function is being executed.
1166  */
1167 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1168 {
1169         pm_callback_t callback = NULL;
1170         char *info = NULL;
1171         int error = 0;
1172
1173         TRACE_DEVICE(dev);
1174         TRACE_SUSPEND(0);
1175
1176         __pm_runtime_disable(dev, false);
1177
1178         if (async_error)
1179                 goto Complete;
1180
1181         if (pm_wakeup_pending()) {
1182                 async_error = -EBUSY;
1183                 goto Complete;
1184         }
1185
1186         if (dev->power.syscore || dev->power.direct_complete)
1187                 goto Complete;
1188
1189         dpm_wait_for_children(dev, async);
1190
1191         if (async_error)
1192                 goto Complete;
1193
1194         if (dev->pm_domain) {
1195                 info = "late power domain ";
1196                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1197         } else if (dev->type && dev->type->pm) {
1198                 info = "late type ";
1199                 callback = pm_late_early_op(dev->type->pm, state);
1200         } else if (dev->class && dev->class->pm) {
1201                 info = "late class ";
1202                 callback = pm_late_early_op(dev->class->pm, state);
1203         } else if (dev->bus && dev->bus->pm) {
1204                 info = "late bus ";
1205                 callback = pm_late_early_op(dev->bus->pm, state);
1206         }
1207
1208         if (!callback && dev->driver && dev->driver->pm) {
1209                 info = "late driver ";
1210                 callback = pm_late_early_op(dev->driver->pm, state);
1211         }
1212
1213         error = dpm_run_callback(callback, dev, state, info);
1214         if (!error)
1215                 dev->power.is_late_suspended = true;
1216         else
1217                 async_error = error;
1218
1219 Complete:
1220         TRACE_SUSPEND(error);
1221         complete_all(&dev->power.completion);
1222         return error;
1223 }
1224
1225 static void async_suspend_late(void *data, async_cookie_t cookie)
1226 {
1227         struct device *dev = (struct device *)data;
1228         int error;
1229
1230         error = __device_suspend_late(dev, pm_transition, true);
1231         if (error) {
1232                 dpm_save_failed_dev(dev_name(dev));
1233                 pm_dev_err(dev, pm_transition, " async", error);
1234         }
1235         put_device(dev);
1236 }
1237
1238 static int device_suspend_late(struct device *dev)
1239 {
1240         reinit_completion(&dev->power.completion);
1241
1242         if (is_async(dev)) {
1243                 get_device(dev);
1244                 async_schedule(async_suspend_late, dev);
1245                 return 0;
1246         }
1247
1248         return __device_suspend_late(dev, pm_transition, false);
1249 }
1250
1251 /**
1252  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1253  * @state: PM transition of the system being carried out.
1254  */
1255 int dpm_suspend_late(pm_message_t state)
1256 {
1257         ktime_t starttime = ktime_get();
1258         int error = 0;
1259
1260         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1261         mutex_lock(&dpm_list_mtx);
1262         pm_transition = state;
1263         async_error = 0;
1264
1265         while (!list_empty(&dpm_suspended_list)) {
1266                 struct device *dev = to_device(dpm_suspended_list.prev);
1267
1268                 get_device(dev);
1269                 mutex_unlock(&dpm_list_mtx);
1270
1271                 error = device_suspend_late(dev);
1272
1273                 mutex_lock(&dpm_list_mtx);
1274                 if (!list_empty(&dev->power.entry))
1275                         list_move(&dev->power.entry, &dpm_late_early_list);
1276
1277                 if (error) {
1278                         pm_dev_err(dev, state, " late", error);
1279                         dpm_save_failed_dev(dev_name(dev));
1280                         put_device(dev);
1281                         break;
1282                 }
1283                 put_device(dev);
1284
1285                 if (async_error)
1286                         break;
1287         }
1288         mutex_unlock(&dpm_list_mtx);
1289         async_synchronize_full();
1290         if (!error)
1291                 error = async_error;
1292         if (error) {
1293                 suspend_stats.failed_suspend_late++;
1294                 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1295                 dpm_resume_early(resume_event(state));
1296         } else {
1297                 dpm_show_time(starttime, state, "late");
1298         }
1299         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1300         return error;
1301 }
1302
1303 /**
1304  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1305  * @state: PM transition of the system being carried out.
1306  */
1307 int dpm_suspend_end(pm_message_t state)
1308 {
1309         int error = dpm_suspend_late(state);
1310         if (error)
1311                 return error;
1312
1313         error = dpm_suspend_noirq(state);
1314         if (error) {
1315                 dpm_resume_early(resume_event(state));
1316                 return error;
1317         }
1318
1319         return 0;
1320 }
1321 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1322
1323 /**
1324  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1325  * @dev: Device to suspend.
1326  * @state: PM transition of the system being carried out.
1327  * @cb: Suspend callback to execute.
1328  * @info: string description of caller.
1329  */
1330 static int legacy_suspend(struct device *dev, pm_message_t state,
1331                           int (*cb)(struct device *dev, pm_message_t state),
1332                           char *info)
1333 {
1334         int error;
1335         ktime_t calltime;
1336
1337         calltime = initcall_debug_start(dev, cb);
1338
1339         trace_device_pm_callback_start(dev, info, state.event);
1340         error = cb(dev, state);
1341         trace_device_pm_callback_end(dev, error);
1342         suspend_report_result(cb, error);
1343
1344         initcall_debug_report(dev, calltime, error, state, info);
1345
1346         return error;
1347 }
1348
1349 /**
1350  * device_suspend - Execute "suspend" callbacks for given device.
1351  * @dev: Device to handle.
1352  * @state: PM transition of the system being carried out.
1353  * @async: If true, the device is being suspended asynchronously.
1354  */
1355 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1356 {
1357         pm_callback_t callback = NULL;
1358         char *info = NULL;
1359         int error = 0;
1360         char suspend_abort[MAX_SUSPEND_ABORT_LEN];
1361         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1362
1363         TRACE_DEVICE(dev);
1364         TRACE_SUSPEND(0);
1365
1366         dpm_wait_for_children(dev, async);
1367
1368         if (async_error)
1369                 goto Complete;
1370
1371         /*
1372          * If a device configured to wake up the system from sleep states
1373          * has been suspended at run time and there's a resume request pending
1374          * for it, this is equivalent to the device signaling wakeup, so the
1375          * system suspend operation should be aborted.
1376          */
1377         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1378                 pm_wakeup_event(dev, 0);
1379
1380         if (pm_wakeup_pending()) {
1381                 pm_get_active_wakeup_sources(suspend_abort,
1382                         MAX_SUSPEND_ABORT_LEN);
1383                 log_suspend_abort_reason(suspend_abort);
1384                 async_error = -EBUSY;
1385                 goto Complete;
1386         }
1387
1388         if (dev->power.syscore)
1389                 goto Complete;
1390
1391         if (dev->power.direct_complete) {
1392                 if (pm_runtime_status_suspended(dev)) {
1393                         pm_runtime_disable(dev);
1394                         if (pm_runtime_status_suspended(dev))
1395                                 goto Complete;
1396
1397                         pm_runtime_enable(dev);
1398                 }
1399                 dev->power.direct_complete = false;
1400         }
1401
1402         dpm_watchdog_set(&wd, dev);
1403         device_lock(dev);
1404
1405         if (dev->pm_domain) {
1406                 info = "power domain ";
1407                 callback = pm_op(&dev->pm_domain->ops, state);
1408                 goto Run;
1409         }
1410
1411         if (dev->type && dev->type->pm) {
1412                 info = "type ";
1413                 callback = pm_op(dev->type->pm, state);
1414                 goto Run;
1415         }
1416
1417         if (dev->class) {
1418                 if (dev->class->pm) {
1419                         info = "class ";
1420                         callback = pm_op(dev->class->pm, state);
1421                         goto Run;
1422                 } else if (dev->class->suspend) {
1423                         pm_dev_dbg(dev, state, "legacy class ");
1424                         error = legacy_suspend(dev, state, dev->class->suspend,
1425                                                 "legacy class ");
1426                         goto End;
1427                 }
1428         }
1429
1430         if (dev->bus) {
1431                 if (dev->bus->pm) {
1432                         info = "bus ";
1433                         callback = pm_op(dev->bus->pm, state);
1434                 } else if (dev->bus->suspend) {
1435                         pm_dev_dbg(dev, state, "legacy bus ");
1436                         error = legacy_suspend(dev, state, dev->bus->suspend,
1437                                                 "legacy bus ");
1438                         goto End;
1439                 }
1440         }
1441
1442  Run:
1443         if (!callback && dev->driver && dev->driver->pm) {
1444                 info = "driver ";
1445                 callback = pm_op(dev->driver->pm, state);
1446         }
1447
1448         error = dpm_run_callback(callback, dev, state, info);
1449
1450  End:
1451         if (!error) {
1452                 struct device *parent = dev->parent;
1453
1454                 dev->power.is_suspended = true;
1455                 if (parent) {
1456                         spin_lock_irq(&parent->power.lock);
1457
1458                         dev->parent->power.direct_complete = false;
1459                         if (dev->power.wakeup_path
1460                             && !dev->parent->power.ignore_children)
1461                                 dev->parent->power.wakeup_path = true;
1462
1463                         spin_unlock_irq(&parent->power.lock);
1464                 }
1465         }
1466
1467         device_unlock(dev);
1468         dpm_watchdog_clear(&wd);
1469
1470  Complete:
1471         complete_all(&dev->power.completion);
1472         if (error)
1473                 async_error = error;
1474
1475         TRACE_SUSPEND(error);
1476         return error;
1477 }
1478
1479 static void async_suspend(void *data, async_cookie_t cookie)
1480 {
1481         struct device *dev = (struct device *)data;
1482         int error;
1483
1484         error = __device_suspend(dev, pm_transition, true);
1485         if (error) {
1486                 dpm_save_failed_dev(dev_name(dev));
1487                 pm_dev_err(dev, pm_transition, " async", error);
1488         }
1489
1490         put_device(dev);
1491 }
1492
1493 static int device_suspend(struct device *dev)
1494 {
1495         reinit_completion(&dev->power.completion);
1496
1497         if (is_async(dev)) {
1498                 get_device(dev);
1499                 async_schedule(async_suspend, dev);
1500                 return 0;
1501         }
1502
1503         return __device_suspend(dev, pm_transition, false);
1504 }
1505
1506 /**
1507  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1508  * @state: PM transition of the system being carried out.
1509  */
1510 int dpm_suspend(pm_message_t state)
1511 {
1512         ktime_t starttime = ktime_get();
1513         int error = 0;
1514
1515         trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1516         might_sleep();
1517
1518         cpufreq_suspend();
1519
1520         mutex_lock(&dpm_list_mtx);
1521         pm_transition = state;
1522         async_error = 0;
1523         while (!list_empty(&dpm_prepared_list)) {
1524                 struct device *dev = to_device(dpm_prepared_list.prev);
1525
1526                 get_device(dev);
1527                 mutex_unlock(&dpm_list_mtx);
1528
1529                 error = device_suspend(dev);
1530
1531                 mutex_lock(&dpm_list_mtx);
1532                 if (error) {
1533                         pm_dev_err(dev, state, "", error);
1534                         dpm_save_failed_dev(dev_name(dev));
1535                         put_device(dev);
1536                         break;
1537                 }
1538                 if (!list_empty(&dev->power.entry))
1539                         list_move(&dev->power.entry, &dpm_suspended_list);
1540                 put_device(dev);
1541                 if (async_error)
1542                         break;
1543         }
1544         mutex_unlock(&dpm_list_mtx);
1545         async_synchronize_full();
1546         if (!error)
1547                 error = async_error;
1548         if (error) {
1549                 suspend_stats.failed_suspend++;
1550                 dpm_save_failed_step(SUSPEND_SUSPEND);
1551         } else
1552                 dpm_show_time(starttime, state, NULL);
1553         trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1554         return error;
1555 }
1556
1557 /**
1558  * device_prepare - Prepare a device for system power transition.
1559  * @dev: Device to handle.
1560  * @state: PM transition of the system being carried out.
1561  *
1562  * Execute the ->prepare() callback(s) for given device.  No new children of the
1563  * device may be registered after this function has returned.
1564  */
1565 static int device_prepare(struct device *dev, pm_message_t state)
1566 {
1567         int (*callback)(struct device *) = NULL;
1568         char *info = NULL;
1569         int ret = 0;
1570
1571         if (dev->power.syscore)
1572                 return 0;
1573
1574         /*
1575          * If a device's parent goes into runtime suspend at the wrong time,
1576          * it won't be possible to resume the device.  To prevent this we
1577          * block runtime suspend here, during the prepare phase, and allow
1578          * it again during the complete phase.
1579          */
1580         pm_runtime_get_noresume(dev);
1581
1582         device_lock(dev);
1583
1584         dev->power.wakeup_path = device_may_wakeup(dev);
1585
1586         if (dev->power.no_pm_callbacks) {
1587                 ret = 1;        /* Let device go direct_complete */
1588                 goto unlock;
1589         }
1590
1591         if (dev->pm_domain) {
1592                 info = "preparing power domain ";
1593                 callback = dev->pm_domain->ops.prepare;
1594         } else if (dev->type && dev->type->pm) {
1595                 info = "preparing type ";
1596                 callback = dev->type->pm->prepare;
1597         } else if (dev->class && dev->class->pm) {
1598                 info = "preparing class ";
1599                 callback = dev->class->pm->prepare;
1600         } else if (dev->bus && dev->bus->pm) {
1601                 info = "preparing bus ";
1602                 callback = dev->bus->pm->prepare;
1603         }
1604
1605         if (!callback && dev->driver && dev->driver->pm) {
1606                 info = "preparing driver ";
1607                 callback = dev->driver->pm->prepare;
1608         }
1609
1610         if (callback)
1611                 ret = callback(dev);
1612
1613 unlock:
1614         device_unlock(dev);
1615
1616         if (ret < 0) {
1617                 suspend_report_result(callback, ret);
1618                 pm_runtime_put(dev);
1619                 return ret;
1620         }
1621         /*
1622          * A positive return value from ->prepare() means "this device appears
1623          * to be runtime-suspended and its state is fine, so if it really is
1624          * runtime-suspended, you can leave it in that state provided that you
1625          * will do the same thing with all of its descendants".  This only
1626          * applies to suspend transitions, however.
1627          */
1628         spin_lock_irq(&dev->power.lock);
1629         dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1630         spin_unlock_irq(&dev->power.lock);
1631         return 0;
1632 }
1633
1634 /**
1635  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1636  * @state: PM transition of the system being carried out.
1637  *
1638  * Execute the ->prepare() callback(s) for all devices.
1639  */
1640 int dpm_prepare(pm_message_t state)
1641 {
1642         int error = 0;
1643
1644         trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1645         might_sleep();
1646
1647         mutex_lock(&dpm_list_mtx);
1648         while (!list_empty(&dpm_list)) {
1649                 struct device *dev = to_device(dpm_list.next);
1650
1651                 get_device(dev);
1652                 mutex_unlock(&dpm_list_mtx);
1653
1654                 trace_device_pm_callback_start(dev, "", state.event);
1655                 error = device_prepare(dev, state);
1656                 trace_device_pm_callback_end(dev, error);
1657
1658                 mutex_lock(&dpm_list_mtx);
1659                 if (error) {
1660                         if (error == -EAGAIN) {
1661                                 put_device(dev);
1662                                 error = 0;
1663                                 continue;
1664                         }
1665                         printk(KERN_INFO "PM: Device %s not prepared "
1666                                 "for power transition: code %d\n",
1667                                 dev_name(dev), error);
1668                         put_device(dev);
1669                         break;
1670                 }
1671                 dev->power.is_prepared = true;
1672                 if (!list_empty(&dev->power.entry))
1673                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1674                 put_device(dev);
1675         }
1676         mutex_unlock(&dpm_list_mtx);
1677         trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1678         return error;
1679 }
1680
1681 /**
1682  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1683  * @state: PM transition of the system being carried out.
1684  *
1685  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1686  * callbacks for them.
1687  */
1688 int dpm_suspend_start(pm_message_t state)
1689 {
1690         int error;
1691
1692         error = dpm_prepare(state);
1693         if (error) {
1694                 suspend_stats.failed_prepare++;
1695                 dpm_save_failed_step(SUSPEND_PREPARE);
1696         } else
1697                 error = dpm_suspend(state);
1698         return error;
1699 }
1700 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1701
1702 void __suspend_report_result(const char *function, void *fn, int ret)
1703 {
1704         if (ret)
1705                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1706 }
1707 EXPORT_SYMBOL_GPL(__suspend_report_result);
1708
1709 /**
1710  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1711  * @dev: Device to wait for.
1712  * @subordinate: Device that needs to wait for @dev.
1713  */
1714 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1715 {
1716         dpm_wait(dev, subordinate->power.async_suspend);
1717         return async_error;
1718 }
1719 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1720
1721 /**
1722  * dpm_for_each_dev - device iterator.
1723  * @data: data for the callback.
1724  * @fn: function to be called for each device.
1725  *
1726  * Iterate over devices in dpm_list, and call @fn for each device,
1727  * passing it @data.
1728  */
1729 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1730 {
1731         struct device *dev;
1732
1733         if (!fn)
1734                 return;
1735
1736         device_pm_lock();
1737         list_for_each_entry(dev, &dpm_list, power.entry)
1738                 fn(dev, data);
1739         device_pm_unlock();
1740 }
1741 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1742
1743 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1744 {
1745         if (!ops)
1746                 return true;
1747
1748         return !ops->prepare &&
1749                !ops->suspend &&
1750                !ops->suspend_late &&
1751                !ops->suspend_noirq &&
1752                !ops->resume_noirq &&
1753                !ops->resume_early &&
1754                !ops->resume &&
1755                !ops->complete;
1756 }
1757
1758 void device_pm_check_callbacks(struct device *dev)
1759 {
1760         spin_lock_irq(&dev->power.lock);
1761         dev->power.no_pm_callbacks =
1762                 (!dev->bus || pm_ops_is_empty(dev->bus->pm)) &&
1763                 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
1764                 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1765                 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1766                 (!dev->driver || pm_ops_is_empty(dev->driver->pm));
1767         spin_unlock_irq(&dev->power.lock);
1768 }