Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livep...
[firefly-linux-kernel-4.4.55.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/timer.h>
36
37 #include "../base.h"
38 #include "power.h"
39
40 typedef int (*pm_callback_t)(struct device *);
41
42 /*
43  * The entries in the dpm_list list are in a depth first order, simply
44  * because children are guaranteed to be discovered after parents, and
45  * are inserted at the back of the list on discovery.
46  *
47  * Since device_pm_add() may be called with a device lock held,
48  * we must never try to acquire a device lock while holding
49  * dpm_list_mutex.
50  */
51
52 LIST_HEAD(dpm_list);
53 static LIST_HEAD(dpm_prepared_list);
54 static LIST_HEAD(dpm_suspended_list);
55 static LIST_HEAD(dpm_late_early_list);
56 static LIST_HEAD(dpm_noirq_list);
57
58 struct suspend_stats suspend_stats;
59 static DEFINE_MUTEX(dpm_list_mtx);
60 static pm_message_t pm_transition;
61
62 static int async_error;
63
64 static char *pm_verb(int event)
65 {
66         switch (event) {
67         case PM_EVENT_SUSPEND:
68                 return "suspend";
69         case PM_EVENT_RESUME:
70                 return "resume";
71         case PM_EVENT_FREEZE:
72                 return "freeze";
73         case PM_EVENT_QUIESCE:
74                 return "quiesce";
75         case PM_EVENT_HIBERNATE:
76                 return "hibernate";
77         case PM_EVENT_THAW:
78                 return "thaw";
79         case PM_EVENT_RESTORE:
80                 return "restore";
81         case PM_EVENT_RECOVER:
82                 return "recover";
83         default:
84                 return "(unknown PM event)";
85         }
86 }
87
88 /**
89  * device_pm_sleep_init - Initialize system suspend-related device fields.
90  * @dev: Device object being initialized.
91  */
92 void device_pm_sleep_init(struct device *dev)
93 {
94         dev->power.is_prepared = false;
95         dev->power.is_suspended = false;
96         dev->power.is_noirq_suspended = false;
97         dev->power.is_late_suspended = false;
98         init_completion(&dev->power.completion);
99         complete_all(&dev->power.completion);
100         dev->power.wakeup = NULL;
101         INIT_LIST_HEAD(&dev->power.entry);
102 }
103
104 /**
105  * device_pm_lock - Lock the list of active devices used by the PM core.
106  */
107 void device_pm_lock(void)
108 {
109         mutex_lock(&dpm_list_mtx);
110 }
111
112 /**
113  * device_pm_unlock - Unlock the list of active devices used by the PM core.
114  */
115 void device_pm_unlock(void)
116 {
117         mutex_unlock(&dpm_list_mtx);
118 }
119
120 /**
121  * device_pm_add - Add a device to the PM core's list of active devices.
122  * @dev: Device to add to the list.
123  */
124 void device_pm_add(struct device *dev)
125 {
126         pr_debug("PM: Adding info for %s:%s\n",
127                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
128         mutex_lock(&dpm_list_mtx);
129         if (dev->parent && dev->parent->power.is_prepared)
130                 dev_warn(dev, "parent %s should not be sleeping\n",
131                         dev_name(dev->parent));
132         list_add_tail(&dev->power.entry, &dpm_list);
133         mutex_unlock(&dpm_list_mtx);
134 }
135
136 /**
137  * device_pm_remove - Remove a device from the PM core's list of active devices.
138  * @dev: Device to be removed from the list.
139  */
140 void device_pm_remove(struct device *dev)
141 {
142         pr_debug("PM: Removing info for %s:%s\n",
143                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
144         complete_all(&dev->power.completion);
145         mutex_lock(&dpm_list_mtx);
146         list_del_init(&dev->power.entry);
147         mutex_unlock(&dpm_list_mtx);
148         device_wakeup_disable(dev);
149         pm_runtime_remove(dev);
150 }
151
152 /**
153  * device_pm_move_before - Move device in the PM core's list of active devices.
154  * @deva: Device to move in dpm_list.
155  * @devb: Device @deva should come before.
156  */
157 void device_pm_move_before(struct device *deva, struct device *devb)
158 {
159         pr_debug("PM: Moving %s:%s before %s:%s\n",
160                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
161                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
162         /* Delete deva from dpm_list and reinsert before devb. */
163         list_move_tail(&deva->power.entry, &devb->power.entry);
164 }
165
166 /**
167  * device_pm_move_after - Move device in the PM core's list of active devices.
168  * @deva: Device to move in dpm_list.
169  * @devb: Device @deva should come after.
170  */
171 void device_pm_move_after(struct device *deva, struct device *devb)
172 {
173         pr_debug("PM: Moving %s:%s after %s:%s\n",
174                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
175                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
176         /* Delete deva from dpm_list and reinsert after devb. */
177         list_move(&deva->power.entry, &devb->power.entry);
178 }
179
180 /**
181  * device_pm_move_last - Move device to end of the PM core's list of devices.
182  * @dev: Device to move in dpm_list.
183  */
184 void device_pm_move_last(struct device *dev)
185 {
186         pr_debug("PM: Moving %s:%s to end of list\n",
187                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
188         list_move_tail(&dev->power.entry, &dpm_list);
189 }
190
191 static ktime_t initcall_debug_start(struct device *dev)
192 {
193         ktime_t calltime = ktime_set(0, 0);
194
195         if (pm_print_times_enabled) {
196                 pr_info("calling  %s+ @ %i, parent: %s\n",
197                         dev_name(dev), task_pid_nr(current),
198                         dev->parent ? dev_name(dev->parent) : "none");
199                 calltime = ktime_get();
200         }
201
202         return calltime;
203 }
204
205 static void initcall_debug_report(struct device *dev, ktime_t calltime,
206                                   int error, pm_message_t state, char *info)
207 {
208         ktime_t rettime;
209         s64 nsecs;
210
211         rettime = ktime_get();
212         nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
213
214         if (pm_print_times_enabled) {
215                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
216                         error, (unsigned long long)nsecs >> 10);
217         }
218 }
219
220 /**
221  * dpm_wait - Wait for a PM operation to complete.
222  * @dev: Device to wait for.
223  * @async: If unset, wait only if the device's power.async_suspend flag is set.
224  */
225 static void dpm_wait(struct device *dev, bool async)
226 {
227         if (!dev)
228                 return;
229
230         if (async || (pm_async_enabled && dev->power.async_suspend))
231                 wait_for_completion(&dev->power.completion);
232 }
233
234 static int dpm_wait_fn(struct device *dev, void *async_ptr)
235 {
236         dpm_wait(dev, *((bool *)async_ptr));
237         return 0;
238 }
239
240 static void dpm_wait_for_children(struct device *dev, bool async)
241 {
242        device_for_each_child(dev, &async, dpm_wait_fn);
243 }
244
245 /**
246  * pm_op - Return the PM operation appropriate for given PM event.
247  * @ops: PM operations to choose from.
248  * @state: PM transition of the system being carried out.
249  */
250 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
251 {
252         switch (state.event) {
253 #ifdef CONFIG_SUSPEND
254         case PM_EVENT_SUSPEND:
255                 return ops->suspend;
256         case PM_EVENT_RESUME:
257                 return ops->resume;
258 #endif /* CONFIG_SUSPEND */
259 #ifdef CONFIG_HIBERNATE_CALLBACKS
260         case PM_EVENT_FREEZE:
261         case PM_EVENT_QUIESCE:
262                 return ops->freeze;
263         case PM_EVENT_HIBERNATE:
264                 return ops->poweroff;
265         case PM_EVENT_THAW:
266         case PM_EVENT_RECOVER:
267                 return ops->thaw;
268                 break;
269         case PM_EVENT_RESTORE:
270                 return ops->restore;
271 #endif /* CONFIG_HIBERNATE_CALLBACKS */
272         }
273
274         return NULL;
275 }
276
277 /**
278  * pm_late_early_op - Return the PM operation appropriate for given PM event.
279  * @ops: PM operations to choose from.
280  * @state: PM transition of the system being carried out.
281  *
282  * Runtime PM is disabled for @dev while this function is being executed.
283  */
284 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
285                                       pm_message_t state)
286 {
287         switch (state.event) {
288 #ifdef CONFIG_SUSPEND
289         case PM_EVENT_SUSPEND:
290                 return ops->suspend_late;
291         case PM_EVENT_RESUME:
292                 return ops->resume_early;
293 #endif /* CONFIG_SUSPEND */
294 #ifdef CONFIG_HIBERNATE_CALLBACKS
295         case PM_EVENT_FREEZE:
296         case PM_EVENT_QUIESCE:
297                 return ops->freeze_late;
298         case PM_EVENT_HIBERNATE:
299                 return ops->poweroff_late;
300         case PM_EVENT_THAW:
301         case PM_EVENT_RECOVER:
302                 return ops->thaw_early;
303         case PM_EVENT_RESTORE:
304                 return ops->restore_early;
305 #endif /* CONFIG_HIBERNATE_CALLBACKS */
306         }
307
308         return NULL;
309 }
310
311 /**
312  * pm_noirq_op - Return the PM operation appropriate for given PM event.
313  * @ops: PM operations to choose from.
314  * @state: PM transition of the system being carried out.
315  *
316  * The driver of @dev will not receive interrupts while this function is being
317  * executed.
318  */
319 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
320 {
321         switch (state.event) {
322 #ifdef CONFIG_SUSPEND
323         case PM_EVENT_SUSPEND:
324                 return ops->suspend_noirq;
325         case PM_EVENT_RESUME:
326                 return ops->resume_noirq;
327 #endif /* CONFIG_SUSPEND */
328 #ifdef CONFIG_HIBERNATE_CALLBACKS
329         case PM_EVENT_FREEZE:
330         case PM_EVENT_QUIESCE:
331                 return ops->freeze_noirq;
332         case PM_EVENT_HIBERNATE:
333                 return ops->poweroff_noirq;
334         case PM_EVENT_THAW:
335         case PM_EVENT_RECOVER:
336                 return ops->thaw_noirq;
337         case PM_EVENT_RESTORE:
338                 return ops->restore_noirq;
339 #endif /* CONFIG_HIBERNATE_CALLBACKS */
340         }
341
342         return NULL;
343 }
344
345 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
346 {
347         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
348                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
349                 ", may wakeup" : "");
350 }
351
352 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
353                         int error)
354 {
355         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
356                 dev_name(dev), pm_verb(state.event), info, error);
357 }
358
359 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
360 {
361         ktime_t calltime;
362         u64 usecs64;
363         int usecs;
364
365         calltime = ktime_get();
366         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
367         do_div(usecs64, NSEC_PER_USEC);
368         usecs = usecs64;
369         if (usecs == 0)
370                 usecs = 1;
371         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
372                 info ?: "", info ? " " : "", pm_verb(state.event),
373                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
374 }
375
376 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
377                             pm_message_t state, char *info)
378 {
379         ktime_t calltime;
380         int error;
381
382         if (!cb)
383                 return 0;
384
385         calltime = initcall_debug_start(dev);
386
387         pm_dev_dbg(dev, state, info);
388         trace_device_pm_callback_start(dev, info, state.event);
389         error = cb(dev);
390         trace_device_pm_callback_end(dev, error);
391         suspend_report_result(cb, error);
392
393         initcall_debug_report(dev, calltime, error, state, info);
394
395         return error;
396 }
397
398 #ifdef CONFIG_DPM_WATCHDOG
399 struct dpm_watchdog {
400         struct device           *dev;
401         struct task_struct      *tsk;
402         struct timer_list       timer;
403 };
404
405 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
406         struct dpm_watchdog wd
407
408 /**
409  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
410  * @data: Watchdog object address.
411  *
412  * Called when a driver has timed out suspending or resuming.
413  * There's not much we can do here to recover so panic() to
414  * capture a crash-dump in pstore.
415  */
416 static void dpm_watchdog_handler(unsigned long data)
417 {
418         struct dpm_watchdog *wd = (void *)data;
419
420         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
421         show_stack(wd->tsk, NULL);
422         panic("%s %s: unrecoverable failure\n",
423                 dev_driver_string(wd->dev), dev_name(wd->dev));
424 }
425
426 /**
427  * dpm_watchdog_set - Enable pm watchdog for given device.
428  * @wd: Watchdog. Must be allocated on the stack.
429  * @dev: Device to handle.
430  */
431 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
432 {
433         struct timer_list *timer = &wd->timer;
434
435         wd->dev = dev;
436         wd->tsk = current;
437
438         init_timer_on_stack(timer);
439         /* use same timeout value for both suspend and resume */
440         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
441         timer->function = dpm_watchdog_handler;
442         timer->data = (unsigned long)wd;
443         add_timer(timer);
444 }
445
446 /**
447  * dpm_watchdog_clear - Disable suspend/resume watchdog.
448  * @wd: Watchdog to disable.
449  */
450 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
451 {
452         struct timer_list *timer = &wd->timer;
453
454         del_timer_sync(timer);
455         destroy_timer_on_stack(timer);
456 }
457 #else
458 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
459 #define dpm_watchdog_set(x, y)
460 #define dpm_watchdog_clear(x)
461 #endif
462
463 /*------------------------- Resume routines -------------------------*/
464
465 /**
466  * device_resume_noirq - Execute an "early resume" callback for given device.
467  * @dev: Device to handle.
468  * @state: PM transition of the system being carried out.
469  * @async: If true, the device is being resumed asynchronously.
470  *
471  * The driver of @dev will not receive interrupts while this function is being
472  * executed.
473  */
474 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
475 {
476         pm_callback_t callback = NULL;
477         char *info = NULL;
478         int error = 0;
479
480         TRACE_DEVICE(dev);
481         TRACE_RESUME(0);
482
483         if (dev->power.syscore || dev->power.direct_complete)
484                 goto Out;
485
486         if (!dev->power.is_noirq_suspended)
487                 goto Out;
488
489         dpm_wait(dev->parent, async);
490
491         if (dev->pm_domain) {
492                 info = "noirq power domain ";
493                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
494         } else if (dev->type && dev->type->pm) {
495                 info = "noirq type ";
496                 callback = pm_noirq_op(dev->type->pm, state);
497         } else if (dev->class && dev->class->pm) {
498                 info = "noirq class ";
499                 callback = pm_noirq_op(dev->class->pm, state);
500         } else if (dev->bus && dev->bus->pm) {
501                 info = "noirq bus ";
502                 callback = pm_noirq_op(dev->bus->pm, state);
503         }
504
505         if (!callback && dev->driver && dev->driver->pm) {
506                 info = "noirq driver ";
507                 callback = pm_noirq_op(dev->driver->pm, state);
508         }
509
510         error = dpm_run_callback(callback, dev, state, info);
511         dev->power.is_noirq_suspended = false;
512
513  Out:
514         complete_all(&dev->power.completion);
515         TRACE_RESUME(error);
516         return error;
517 }
518
519 static bool is_async(struct device *dev)
520 {
521         return dev->power.async_suspend && pm_async_enabled
522                 && !pm_trace_is_enabled();
523 }
524
525 static void async_resume_noirq(void *data, async_cookie_t cookie)
526 {
527         struct device *dev = (struct device *)data;
528         int error;
529
530         error = device_resume_noirq(dev, pm_transition, true);
531         if (error)
532                 pm_dev_err(dev, pm_transition, " async", error);
533
534         put_device(dev);
535 }
536
537 /**
538  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
539  * @state: PM transition of the system being carried out.
540  *
541  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
542  * enable device drivers to receive interrupts.
543  */
544 void dpm_resume_noirq(pm_message_t state)
545 {
546         struct device *dev;
547         ktime_t starttime = ktime_get();
548
549         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
550         mutex_lock(&dpm_list_mtx);
551         pm_transition = state;
552
553         /*
554          * Advanced the async threads upfront,
555          * in case the starting of async threads is
556          * delayed by non-async resuming devices.
557          */
558         list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
559                 reinit_completion(&dev->power.completion);
560                 if (is_async(dev)) {
561                         get_device(dev);
562                         async_schedule(async_resume_noirq, dev);
563                 }
564         }
565
566         while (!list_empty(&dpm_noirq_list)) {
567                 dev = to_device(dpm_noirq_list.next);
568                 get_device(dev);
569                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
570                 mutex_unlock(&dpm_list_mtx);
571
572                 if (!is_async(dev)) {
573                         int error;
574
575                         error = device_resume_noirq(dev, state, false);
576                         if (error) {
577                                 suspend_stats.failed_resume_noirq++;
578                                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
579                                 dpm_save_failed_dev(dev_name(dev));
580                                 pm_dev_err(dev, state, " noirq", error);
581                         }
582                 }
583
584                 mutex_lock(&dpm_list_mtx);
585                 put_device(dev);
586         }
587         mutex_unlock(&dpm_list_mtx);
588         async_synchronize_full();
589         dpm_show_time(starttime, state, "noirq");
590         resume_device_irqs();
591         device_wakeup_disarm_wake_irqs();
592         cpuidle_resume();
593         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
594 }
595
596 /**
597  * device_resume_early - Execute an "early resume" callback for given device.
598  * @dev: Device to handle.
599  * @state: PM transition of the system being carried out.
600  * @async: If true, the device is being resumed asynchronously.
601  *
602  * Runtime PM is disabled for @dev while this function is being executed.
603  */
604 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
605 {
606         pm_callback_t callback = NULL;
607         char *info = NULL;
608         int error = 0;
609
610         TRACE_DEVICE(dev);
611         TRACE_RESUME(0);
612
613         if (dev->power.syscore || dev->power.direct_complete)
614                 goto Out;
615
616         if (!dev->power.is_late_suspended)
617                 goto Out;
618
619         dpm_wait(dev->parent, async);
620
621         if (dev->pm_domain) {
622                 info = "early power domain ";
623                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
624         } else if (dev->type && dev->type->pm) {
625                 info = "early type ";
626                 callback = pm_late_early_op(dev->type->pm, state);
627         } else if (dev->class && dev->class->pm) {
628                 info = "early class ";
629                 callback = pm_late_early_op(dev->class->pm, state);
630         } else if (dev->bus && dev->bus->pm) {
631                 info = "early bus ";
632                 callback = pm_late_early_op(dev->bus->pm, state);
633         }
634
635         if (!callback && dev->driver && dev->driver->pm) {
636                 info = "early driver ";
637                 callback = pm_late_early_op(dev->driver->pm, state);
638         }
639
640         error = dpm_run_callback(callback, dev, state, info);
641         dev->power.is_late_suspended = false;
642
643  Out:
644         TRACE_RESUME(error);
645
646         pm_runtime_enable(dev);
647         complete_all(&dev->power.completion);
648         return error;
649 }
650
651 static void async_resume_early(void *data, async_cookie_t cookie)
652 {
653         struct device *dev = (struct device *)data;
654         int error;
655
656         error = device_resume_early(dev, pm_transition, true);
657         if (error)
658                 pm_dev_err(dev, pm_transition, " async", error);
659
660         put_device(dev);
661 }
662
663 /**
664  * dpm_resume_early - Execute "early resume" callbacks for all devices.
665  * @state: PM transition of the system being carried out.
666  */
667 void dpm_resume_early(pm_message_t state)
668 {
669         struct device *dev;
670         ktime_t starttime = ktime_get();
671
672         trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
673         mutex_lock(&dpm_list_mtx);
674         pm_transition = state;
675
676         /*
677          * Advanced the async threads upfront,
678          * in case the starting of async threads is
679          * delayed by non-async resuming devices.
680          */
681         list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
682                 reinit_completion(&dev->power.completion);
683                 if (is_async(dev)) {
684                         get_device(dev);
685                         async_schedule(async_resume_early, dev);
686                 }
687         }
688
689         while (!list_empty(&dpm_late_early_list)) {
690                 dev = to_device(dpm_late_early_list.next);
691                 get_device(dev);
692                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
693                 mutex_unlock(&dpm_list_mtx);
694
695                 if (!is_async(dev)) {
696                         int error;
697
698                         error = device_resume_early(dev, state, false);
699                         if (error) {
700                                 suspend_stats.failed_resume_early++;
701                                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
702                                 dpm_save_failed_dev(dev_name(dev));
703                                 pm_dev_err(dev, state, " early", error);
704                         }
705                 }
706                 mutex_lock(&dpm_list_mtx);
707                 put_device(dev);
708         }
709         mutex_unlock(&dpm_list_mtx);
710         async_synchronize_full();
711         dpm_show_time(starttime, state, "early");
712         trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
713 }
714
715 /**
716  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
717  * @state: PM transition of the system being carried out.
718  */
719 void dpm_resume_start(pm_message_t state)
720 {
721         dpm_resume_noirq(state);
722         dpm_resume_early(state);
723 }
724 EXPORT_SYMBOL_GPL(dpm_resume_start);
725
726 /**
727  * device_resume - Execute "resume" callbacks for given device.
728  * @dev: Device to handle.
729  * @state: PM transition of the system being carried out.
730  * @async: If true, the device is being resumed asynchronously.
731  */
732 static int device_resume(struct device *dev, pm_message_t state, bool async)
733 {
734         pm_callback_t callback = NULL;
735         char *info = NULL;
736         int error = 0;
737         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
738
739         TRACE_DEVICE(dev);
740         TRACE_RESUME(0);
741
742         if (dev->power.syscore)
743                 goto Complete;
744
745         if (dev->power.direct_complete) {
746                 /* Match the pm_runtime_disable() in __device_suspend(). */
747                 pm_runtime_enable(dev);
748                 goto Complete;
749         }
750
751         dpm_wait(dev->parent, async);
752         dpm_watchdog_set(&wd, dev);
753         device_lock(dev);
754
755         /*
756          * This is a fib.  But we'll allow new children to be added below
757          * a resumed device, even if the device hasn't been completed yet.
758          */
759         dev->power.is_prepared = false;
760
761         if (!dev->power.is_suspended)
762                 goto Unlock;
763
764         if (dev->pm_domain) {
765                 info = "power domain ";
766                 callback = pm_op(&dev->pm_domain->ops, state);
767                 goto Driver;
768         }
769
770         if (dev->type && dev->type->pm) {
771                 info = "type ";
772                 callback = pm_op(dev->type->pm, state);
773                 goto Driver;
774         }
775
776         if (dev->class) {
777                 if (dev->class->pm) {
778                         info = "class ";
779                         callback = pm_op(dev->class->pm, state);
780                         goto Driver;
781                 } else if (dev->class->resume) {
782                         info = "legacy class ";
783                         callback = dev->class->resume;
784                         goto End;
785                 }
786         }
787
788         if (dev->bus) {
789                 if (dev->bus->pm) {
790                         info = "bus ";
791                         callback = pm_op(dev->bus->pm, state);
792                 } else if (dev->bus->resume) {
793                         info = "legacy bus ";
794                         callback = dev->bus->resume;
795                         goto End;
796                 }
797         }
798
799  Driver:
800         if (!callback && dev->driver && dev->driver->pm) {
801                 info = "driver ";
802                 callback = pm_op(dev->driver->pm, state);
803         }
804
805  End:
806         error = dpm_run_callback(callback, dev, state, info);
807         dev->power.is_suspended = false;
808
809  Unlock:
810         device_unlock(dev);
811         dpm_watchdog_clear(&wd);
812
813  Complete:
814         complete_all(&dev->power.completion);
815
816         TRACE_RESUME(error);
817
818         return error;
819 }
820
821 static void async_resume(void *data, async_cookie_t cookie)
822 {
823         struct device *dev = (struct device *)data;
824         int error;
825
826         error = device_resume(dev, pm_transition, true);
827         if (error)
828                 pm_dev_err(dev, pm_transition, " async", error);
829         put_device(dev);
830 }
831
832 /**
833  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
834  * @state: PM transition of the system being carried out.
835  *
836  * Execute the appropriate "resume" callback for all devices whose status
837  * indicates that they are suspended.
838  */
839 void dpm_resume(pm_message_t state)
840 {
841         struct device *dev;
842         ktime_t starttime = ktime_get();
843
844         trace_suspend_resume(TPS("dpm_resume"), state.event, true);
845         might_sleep();
846
847         mutex_lock(&dpm_list_mtx);
848         pm_transition = state;
849         async_error = 0;
850
851         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
852                 reinit_completion(&dev->power.completion);
853                 if (is_async(dev)) {
854                         get_device(dev);
855                         async_schedule(async_resume, dev);
856                 }
857         }
858
859         while (!list_empty(&dpm_suspended_list)) {
860                 dev = to_device(dpm_suspended_list.next);
861                 get_device(dev);
862                 if (!is_async(dev)) {
863                         int error;
864
865                         mutex_unlock(&dpm_list_mtx);
866
867                         error = device_resume(dev, state, false);
868                         if (error) {
869                                 suspend_stats.failed_resume++;
870                                 dpm_save_failed_step(SUSPEND_RESUME);
871                                 dpm_save_failed_dev(dev_name(dev));
872                                 pm_dev_err(dev, state, "", error);
873                         }
874
875                         mutex_lock(&dpm_list_mtx);
876                 }
877                 if (!list_empty(&dev->power.entry))
878                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
879                 put_device(dev);
880         }
881         mutex_unlock(&dpm_list_mtx);
882         async_synchronize_full();
883         dpm_show_time(starttime, state, NULL);
884
885         cpufreq_resume();
886         trace_suspend_resume(TPS("dpm_resume"), state.event, false);
887 }
888
889 /**
890  * device_complete - Complete a PM transition for given device.
891  * @dev: Device to handle.
892  * @state: PM transition of the system being carried out.
893  */
894 static void device_complete(struct device *dev, pm_message_t state)
895 {
896         void (*callback)(struct device *) = NULL;
897         char *info = NULL;
898
899         if (dev->power.syscore)
900                 return;
901
902         device_lock(dev);
903
904         if (dev->pm_domain) {
905                 info = "completing power domain ";
906                 callback = dev->pm_domain->ops.complete;
907         } else if (dev->type && dev->type->pm) {
908                 info = "completing type ";
909                 callback = dev->type->pm->complete;
910         } else if (dev->class && dev->class->pm) {
911                 info = "completing class ";
912                 callback = dev->class->pm->complete;
913         } else if (dev->bus && dev->bus->pm) {
914                 info = "completing bus ";
915                 callback = dev->bus->pm->complete;
916         }
917
918         if (!callback && dev->driver && dev->driver->pm) {
919                 info = "completing driver ";
920                 callback = dev->driver->pm->complete;
921         }
922
923         if (callback) {
924                 pm_dev_dbg(dev, state, info);
925                 callback(dev);
926         }
927
928         device_unlock(dev);
929
930         pm_runtime_put(dev);
931 }
932
933 /**
934  * dpm_complete - Complete a PM transition for all non-sysdev devices.
935  * @state: PM transition of the system being carried out.
936  *
937  * Execute the ->complete() callbacks for all devices whose PM status is not
938  * DPM_ON (this allows new devices to be registered).
939  */
940 void dpm_complete(pm_message_t state)
941 {
942         struct list_head list;
943
944         trace_suspend_resume(TPS("dpm_complete"), state.event, true);
945         might_sleep();
946
947         INIT_LIST_HEAD(&list);
948         mutex_lock(&dpm_list_mtx);
949         while (!list_empty(&dpm_prepared_list)) {
950                 struct device *dev = to_device(dpm_prepared_list.prev);
951
952                 get_device(dev);
953                 dev->power.is_prepared = false;
954                 list_move(&dev->power.entry, &list);
955                 mutex_unlock(&dpm_list_mtx);
956
957                 trace_device_pm_callback_start(dev, "", state.event);
958                 device_complete(dev, state);
959                 trace_device_pm_callback_end(dev, 0);
960
961                 mutex_lock(&dpm_list_mtx);
962                 put_device(dev);
963         }
964         list_splice(&list, &dpm_list);
965         mutex_unlock(&dpm_list_mtx);
966         trace_suspend_resume(TPS("dpm_complete"), state.event, false);
967 }
968
969 /**
970  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
971  * @state: PM transition of the system being carried out.
972  *
973  * Execute "resume" callbacks for all devices and complete the PM transition of
974  * the system.
975  */
976 void dpm_resume_end(pm_message_t state)
977 {
978         dpm_resume(state);
979         dpm_complete(state);
980 }
981 EXPORT_SYMBOL_GPL(dpm_resume_end);
982
983
984 /*------------------------- Suspend routines -------------------------*/
985
986 /**
987  * resume_event - Return a "resume" message for given "suspend" sleep state.
988  * @sleep_state: PM message representing a sleep state.
989  *
990  * Return a PM message representing the resume event corresponding to given
991  * sleep state.
992  */
993 static pm_message_t resume_event(pm_message_t sleep_state)
994 {
995         switch (sleep_state.event) {
996         case PM_EVENT_SUSPEND:
997                 return PMSG_RESUME;
998         case PM_EVENT_FREEZE:
999         case PM_EVENT_QUIESCE:
1000                 return PMSG_RECOVER;
1001         case PM_EVENT_HIBERNATE:
1002                 return PMSG_RESTORE;
1003         }
1004         return PMSG_ON;
1005 }
1006
1007 /**
1008  * device_suspend_noirq - Execute a "late suspend" callback for given device.
1009  * @dev: Device to handle.
1010  * @state: PM transition of the system being carried out.
1011  * @async: If true, the device is being suspended asynchronously.
1012  *
1013  * The driver of @dev will not receive interrupts while this function is being
1014  * executed.
1015  */
1016 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1017 {
1018         pm_callback_t callback = NULL;
1019         char *info = NULL;
1020         int error = 0;
1021
1022         TRACE_DEVICE(dev);
1023         TRACE_SUSPEND(0);
1024
1025         if (async_error)
1026                 goto Complete;
1027
1028         if (pm_wakeup_pending()) {
1029                 async_error = -EBUSY;
1030                 goto Complete;
1031         }
1032
1033         if (dev->power.syscore || dev->power.direct_complete)
1034                 goto Complete;
1035
1036         dpm_wait_for_children(dev, async);
1037
1038         if (dev->pm_domain) {
1039                 info = "noirq power domain ";
1040                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1041         } else if (dev->type && dev->type->pm) {
1042                 info = "noirq type ";
1043                 callback = pm_noirq_op(dev->type->pm, state);
1044         } else if (dev->class && dev->class->pm) {
1045                 info = "noirq class ";
1046                 callback = pm_noirq_op(dev->class->pm, state);
1047         } else if (dev->bus && dev->bus->pm) {
1048                 info = "noirq bus ";
1049                 callback = pm_noirq_op(dev->bus->pm, state);
1050         }
1051
1052         if (!callback && dev->driver && dev->driver->pm) {
1053                 info = "noirq driver ";
1054                 callback = pm_noirq_op(dev->driver->pm, state);
1055         }
1056
1057         error = dpm_run_callback(callback, dev, state, info);
1058         if (!error)
1059                 dev->power.is_noirq_suspended = true;
1060         else
1061                 async_error = error;
1062
1063 Complete:
1064         complete_all(&dev->power.completion);
1065         TRACE_SUSPEND(error);
1066         return error;
1067 }
1068
1069 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1070 {
1071         struct device *dev = (struct device *)data;
1072         int error;
1073
1074         error = __device_suspend_noirq(dev, pm_transition, true);
1075         if (error) {
1076                 dpm_save_failed_dev(dev_name(dev));
1077                 pm_dev_err(dev, pm_transition, " async", error);
1078         }
1079
1080         put_device(dev);
1081 }
1082
1083 static int device_suspend_noirq(struct device *dev)
1084 {
1085         reinit_completion(&dev->power.completion);
1086
1087         if (is_async(dev)) {
1088                 get_device(dev);
1089                 async_schedule(async_suspend_noirq, dev);
1090                 return 0;
1091         }
1092         return __device_suspend_noirq(dev, pm_transition, false);
1093 }
1094
1095 /**
1096  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1097  * @state: PM transition of the system being carried out.
1098  *
1099  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1100  * handlers for all non-sysdev devices.
1101  */
1102 int dpm_suspend_noirq(pm_message_t state)
1103 {
1104         ktime_t starttime = ktime_get();
1105         int error = 0;
1106
1107         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1108         cpuidle_pause();
1109         device_wakeup_arm_wake_irqs();
1110         suspend_device_irqs();
1111         mutex_lock(&dpm_list_mtx);
1112         pm_transition = state;
1113         async_error = 0;
1114
1115         while (!list_empty(&dpm_late_early_list)) {
1116                 struct device *dev = to_device(dpm_late_early_list.prev);
1117
1118                 get_device(dev);
1119                 mutex_unlock(&dpm_list_mtx);
1120
1121                 error = device_suspend_noirq(dev);
1122
1123                 mutex_lock(&dpm_list_mtx);
1124                 if (error) {
1125                         pm_dev_err(dev, state, " noirq", error);
1126                         dpm_save_failed_dev(dev_name(dev));
1127                         put_device(dev);
1128                         break;
1129                 }
1130                 if (!list_empty(&dev->power.entry))
1131                         list_move(&dev->power.entry, &dpm_noirq_list);
1132                 put_device(dev);
1133
1134                 if (async_error)
1135                         break;
1136         }
1137         mutex_unlock(&dpm_list_mtx);
1138         async_synchronize_full();
1139         if (!error)
1140                 error = async_error;
1141
1142         if (error) {
1143                 suspend_stats.failed_suspend_noirq++;
1144                 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1145                 dpm_resume_noirq(resume_event(state));
1146         } else {
1147                 dpm_show_time(starttime, state, "noirq");
1148         }
1149         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1150         return error;
1151 }
1152
1153 /**
1154  * device_suspend_late - Execute a "late suspend" callback for given device.
1155  * @dev: Device to handle.
1156  * @state: PM transition of the system being carried out.
1157  * @async: If true, the device is being suspended asynchronously.
1158  *
1159  * Runtime PM is disabled for @dev while this function is being executed.
1160  */
1161 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1162 {
1163         pm_callback_t callback = NULL;
1164         char *info = NULL;
1165         int error = 0;
1166
1167         TRACE_DEVICE(dev);
1168         TRACE_SUSPEND(0);
1169
1170         __pm_runtime_disable(dev, false);
1171
1172         if (async_error)
1173                 goto Complete;
1174
1175         if (pm_wakeup_pending()) {
1176                 async_error = -EBUSY;
1177                 goto Complete;
1178         }
1179
1180         if (dev->power.syscore || dev->power.direct_complete)
1181                 goto Complete;
1182
1183         dpm_wait_for_children(dev, async);
1184
1185         if (dev->pm_domain) {
1186                 info = "late power domain ";
1187                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1188         } else if (dev->type && dev->type->pm) {
1189                 info = "late type ";
1190                 callback = pm_late_early_op(dev->type->pm, state);
1191         } else if (dev->class && dev->class->pm) {
1192                 info = "late class ";
1193                 callback = pm_late_early_op(dev->class->pm, state);
1194         } else if (dev->bus && dev->bus->pm) {
1195                 info = "late bus ";
1196                 callback = pm_late_early_op(dev->bus->pm, state);
1197         }
1198
1199         if (!callback && dev->driver && dev->driver->pm) {
1200                 info = "late driver ";
1201                 callback = pm_late_early_op(dev->driver->pm, state);
1202         }
1203
1204         error = dpm_run_callback(callback, dev, state, info);
1205         if (!error)
1206                 dev->power.is_late_suspended = true;
1207         else
1208                 async_error = error;
1209
1210 Complete:
1211         TRACE_SUSPEND(error);
1212         complete_all(&dev->power.completion);
1213         return error;
1214 }
1215
1216 static void async_suspend_late(void *data, async_cookie_t cookie)
1217 {
1218         struct device *dev = (struct device *)data;
1219         int error;
1220
1221         error = __device_suspend_late(dev, pm_transition, true);
1222         if (error) {
1223                 dpm_save_failed_dev(dev_name(dev));
1224                 pm_dev_err(dev, pm_transition, " async", error);
1225         }
1226         put_device(dev);
1227 }
1228
1229 static int device_suspend_late(struct device *dev)
1230 {
1231         reinit_completion(&dev->power.completion);
1232
1233         if (is_async(dev)) {
1234                 get_device(dev);
1235                 async_schedule(async_suspend_late, dev);
1236                 return 0;
1237         }
1238
1239         return __device_suspend_late(dev, pm_transition, false);
1240 }
1241
1242 /**
1243  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1244  * @state: PM transition of the system being carried out.
1245  */
1246 int dpm_suspend_late(pm_message_t state)
1247 {
1248         ktime_t starttime = ktime_get();
1249         int error = 0;
1250
1251         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1252         mutex_lock(&dpm_list_mtx);
1253         pm_transition = state;
1254         async_error = 0;
1255
1256         while (!list_empty(&dpm_suspended_list)) {
1257                 struct device *dev = to_device(dpm_suspended_list.prev);
1258
1259                 get_device(dev);
1260                 mutex_unlock(&dpm_list_mtx);
1261
1262                 error = device_suspend_late(dev);
1263
1264                 mutex_lock(&dpm_list_mtx);
1265                 if (error) {
1266                         pm_dev_err(dev, state, " late", error);
1267                         dpm_save_failed_dev(dev_name(dev));
1268                         put_device(dev);
1269                         break;
1270                 }
1271                 if (!list_empty(&dev->power.entry))
1272                         list_move(&dev->power.entry, &dpm_late_early_list);
1273                 put_device(dev);
1274
1275                 if (async_error)
1276                         break;
1277         }
1278         mutex_unlock(&dpm_list_mtx);
1279         async_synchronize_full();
1280         if (!error)
1281                 error = async_error;
1282         if (error) {
1283                 suspend_stats.failed_suspend_late++;
1284                 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1285                 dpm_resume_early(resume_event(state));
1286         } else {
1287                 dpm_show_time(starttime, state, "late");
1288         }
1289         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1290         return error;
1291 }
1292
1293 /**
1294  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1295  * @state: PM transition of the system being carried out.
1296  */
1297 int dpm_suspend_end(pm_message_t state)
1298 {
1299         int error = dpm_suspend_late(state);
1300         if (error)
1301                 return error;
1302
1303         error = dpm_suspend_noirq(state);
1304         if (error) {
1305                 dpm_resume_early(resume_event(state));
1306                 return error;
1307         }
1308
1309         return 0;
1310 }
1311 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1312
1313 /**
1314  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1315  * @dev: Device to suspend.
1316  * @state: PM transition of the system being carried out.
1317  * @cb: Suspend callback to execute.
1318  * @info: string description of caller.
1319  */
1320 static int legacy_suspend(struct device *dev, pm_message_t state,
1321                           int (*cb)(struct device *dev, pm_message_t state),
1322                           char *info)
1323 {
1324         int error;
1325         ktime_t calltime;
1326
1327         calltime = initcall_debug_start(dev);
1328
1329         trace_device_pm_callback_start(dev, info, state.event);
1330         error = cb(dev, state);
1331         trace_device_pm_callback_end(dev, error);
1332         suspend_report_result(cb, error);
1333
1334         initcall_debug_report(dev, calltime, error, state, info);
1335
1336         return error;
1337 }
1338
1339 /**
1340  * device_suspend - Execute "suspend" callbacks for given device.
1341  * @dev: Device to handle.
1342  * @state: PM transition of the system being carried out.
1343  * @async: If true, the device is being suspended asynchronously.
1344  */
1345 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1346 {
1347         pm_callback_t callback = NULL;
1348         char *info = NULL;
1349         int error = 0;
1350         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1351
1352         TRACE_DEVICE(dev);
1353         TRACE_SUSPEND(0);
1354
1355         dpm_wait_for_children(dev, async);
1356
1357         if (async_error)
1358                 goto Complete;
1359
1360         /*
1361          * If a device configured to wake up the system from sleep states
1362          * has been suspended at run time and there's a resume request pending
1363          * for it, this is equivalent to the device signaling wakeup, so the
1364          * system suspend operation should be aborted.
1365          */
1366         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1367                 pm_wakeup_event(dev, 0);
1368
1369         if (pm_wakeup_pending()) {
1370                 async_error = -EBUSY;
1371                 goto Complete;
1372         }
1373
1374         if (dev->power.syscore)
1375                 goto Complete;
1376
1377         if (dev->power.direct_complete) {
1378                 if (pm_runtime_status_suspended(dev)) {
1379                         pm_runtime_disable(dev);
1380                         if (pm_runtime_status_suspended(dev))
1381                                 goto Complete;
1382
1383                         pm_runtime_enable(dev);
1384                 }
1385                 dev->power.direct_complete = false;
1386         }
1387
1388         dpm_watchdog_set(&wd, dev);
1389         device_lock(dev);
1390
1391         if (dev->pm_domain) {
1392                 info = "power domain ";
1393                 callback = pm_op(&dev->pm_domain->ops, state);
1394                 goto Run;
1395         }
1396
1397         if (dev->type && dev->type->pm) {
1398                 info = "type ";
1399                 callback = pm_op(dev->type->pm, state);
1400                 goto Run;
1401         }
1402
1403         if (dev->class) {
1404                 if (dev->class->pm) {
1405                         info = "class ";
1406                         callback = pm_op(dev->class->pm, state);
1407                         goto Run;
1408                 } else if (dev->class->suspend) {
1409                         pm_dev_dbg(dev, state, "legacy class ");
1410                         error = legacy_suspend(dev, state, dev->class->suspend,
1411                                                 "legacy class ");
1412                         goto End;
1413                 }
1414         }
1415
1416         if (dev->bus) {
1417                 if (dev->bus->pm) {
1418                         info = "bus ";
1419                         callback = pm_op(dev->bus->pm, state);
1420                 } else if (dev->bus->suspend) {
1421                         pm_dev_dbg(dev, state, "legacy bus ");
1422                         error = legacy_suspend(dev, state, dev->bus->suspend,
1423                                                 "legacy bus ");
1424                         goto End;
1425                 }
1426         }
1427
1428  Run:
1429         if (!callback && dev->driver && dev->driver->pm) {
1430                 info = "driver ";
1431                 callback = pm_op(dev->driver->pm, state);
1432         }
1433
1434         error = dpm_run_callback(callback, dev, state, info);
1435
1436  End:
1437         if (!error) {
1438                 struct device *parent = dev->parent;
1439
1440                 dev->power.is_suspended = true;
1441                 if (parent) {
1442                         spin_lock_irq(&parent->power.lock);
1443
1444                         dev->parent->power.direct_complete = false;
1445                         if (dev->power.wakeup_path
1446                             && !dev->parent->power.ignore_children)
1447                                 dev->parent->power.wakeup_path = true;
1448
1449                         spin_unlock_irq(&parent->power.lock);
1450                 }
1451         }
1452
1453         device_unlock(dev);
1454         dpm_watchdog_clear(&wd);
1455
1456  Complete:
1457         complete_all(&dev->power.completion);
1458         if (error)
1459                 async_error = error;
1460
1461         TRACE_SUSPEND(error);
1462         return error;
1463 }
1464
1465 static void async_suspend(void *data, async_cookie_t cookie)
1466 {
1467         struct device *dev = (struct device *)data;
1468         int error;
1469
1470         error = __device_suspend(dev, pm_transition, true);
1471         if (error) {
1472                 dpm_save_failed_dev(dev_name(dev));
1473                 pm_dev_err(dev, pm_transition, " async", error);
1474         }
1475
1476         put_device(dev);
1477 }
1478
1479 static int device_suspend(struct device *dev)
1480 {
1481         reinit_completion(&dev->power.completion);
1482
1483         if (is_async(dev)) {
1484                 get_device(dev);
1485                 async_schedule(async_suspend, dev);
1486                 return 0;
1487         }
1488
1489         return __device_suspend(dev, pm_transition, false);
1490 }
1491
1492 /**
1493  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1494  * @state: PM transition of the system being carried out.
1495  */
1496 int dpm_suspend(pm_message_t state)
1497 {
1498         ktime_t starttime = ktime_get();
1499         int error = 0;
1500
1501         trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1502         might_sleep();
1503
1504         cpufreq_suspend();
1505
1506         mutex_lock(&dpm_list_mtx);
1507         pm_transition = state;
1508         async_error = 0;
1509         while (!list_empty(&dpm_prepared_list)) {
1510                 struct device *dev = to_device(dpm_prepared_list.prev);
1511
1512                 get_device(dev);
1513                 mutex_unlock(&dpm_list_mtx);
1514
1515                 error = device_suspend(dev);
1516
1517                 mutex_lock(&dpm_list_mtx);
1518                 if (error) {
1519                         pm_dev_err(dev, state, "", error);
1520                         dpm_save_failed_dev(dev_name(dev));
1521                         put_device(dev);
1522                         break;
1523                 }
1524                 if (!list_empty(&dev->power.entry))
1525                         list_move(&dev->power.entry, &dpm_suspended_list);
1526                 put_device(dev);
1527                 if (async_error)
1528                         break;
1529         }
1530         mutex_unlock(&dpm_list_mtx);
1531         async_synchronize_full();
1532         if (!error)
1533                 error = async_error;
1534         if (error) {
1535                 suspend_stats.failed_suspend++;
1536                 dpm_save_failed_step(SUSPEND_SUSPEND);
1537         } else
1538                 dpm_show_time(starttime, state, NULL);
1539         trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1540         return error;
1541 }
1542
1543 /**
1544  * device_prepare - Prepare a device for system power transition.
1545  * @dev: Device to handle.
1546  * @state: PM transition of the system being carried out.
1547  *
1548  * Execute the ->prepare() callback(s) for given device.  No new children of the
1549  * device may be registered after this function has returned.
1550  */
1551 static int device_prepare(struct device *dev, pm_message_t state)
1552 {
1553         int (*callback)(struct device *) = NULL;
1554         char *info = NULL;
1555         int ret = 0;
1556
1557         if (dev->power.syscore)
1558                 return 0;
1559
1560         /*
1561          * If a device's parent goes into runtime suspend at the wrong time,
1562          * it won't be possible to resume the device.  To prevent this we
1563          * block runtime suspend here, during the prepare phase, and allow
1564          * it again during the complete phase.
1565          */
1566         pm_runtime_get_noresume(dev);
1567
1568         device_lock(dev);
1569
1570         dev->power.wakeup_path = device_may_wakeup(dev);
1571
1572         if (dev->pm_domain) {
1573                 info = "preparing power domain ";
1574                 callback = dev->pm_domain->ops.prepare;
1575         } else if (dev->type && dev->type->pm) {
1576                 info = "preparing type ";
1577                 callback = dev->type->pm->prepare;
1578         } else if (dev->class && dev->class->pm) {
1579                 info = "preparing class ";
1580                 callback = dev->class->pm->prepare;
1581         } else if (dev->bus && dev->bus->pm) {
1582                 info = "preparing bus ";
1583                 callback = dev->bus->pm->prepare;
1584         }
1585
1586         if (!callback && dev->driver && dev->driver->pm) {
1587                 info = "preparing driver ";
1588                 callback = dev->driver->pm->prepare;
1589         }
1590
1591         if (callback)
1592                 ret = callback(dev);
1593
1594         device_unlock(dev);
1595
1596         if (ret < 0) {
1597                 suspend_report_result(callback, ret);
1598                 pm_runtime_put(dev);
1599                 return ret;
1600         }
1601         /*
1602          * A positive return value from ->prepare() means "this device appears
1603          * to be runtime-suspended and its state is fine, so if it really is
1604          * runtime-suspended, you can leave it in that state provided that you
1605          * will do the same thing with all of its descendants".  This only
1606          * applies to suspend transitions, however.
1607          */
1608         spin_lock_irq(&dev->power.lock);
1609         dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1610         spin_unlock_irq(&dev->power.lock);
1611         return 0;
1612 }
1613
1614 /**
1615  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1616  * @state: PM transition of the system being carried out.
1617  *
1618  * Execute the ->prepare() callback(s) for all devices.
1619  */
1620 int dpm_prepare(pm_message_t state)
1621 {
1622         int error = 0;
1623
1624         trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1625         might_sleep();
1626
1627         mutex_lock(&dpm_list_mtx);
1628         while (!list_empty(&dpm_list)) {
1629                 struct device *dev = to_device(dpm_list.next);
1630
1631                 get_device(dev);
1632                 mutex_unlock(&dpm_list_mtx);
1633
1634                 trace_device_pm_callback_start(dev, "", state.event);
1635                 error = device_prepare(dev, state);
1636                 trace_device_pm_callback_end(dev, error);
1637
1638                 mutex_lock(&dpm_list_mtx);
1639                 if (error) {
1640                         if (error == -EAGAIN) {
1641                                 put_device(dev);
1642                                 error = 0;
1643                                 continue;
1644                         }
1645                         printk(KERN_INFO "PM: Device %s not prepared "
1646                                 "for power transition: code %d\n",
1647                                 dev_name(dev), error);
1648                         put_device(dev);
1649                         break;
1650                 }
1651                 dev->power.is_prepared = true;
1652                 if (!list_empty(&dev->power.entry))
1653                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1654                 put_device(dev);
1655         }
1656         mutex_unlock(&dpm_list_mtx);
1657         trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1658         return error;
1659 }
1660
1661 /**
1662  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1663  * @state: PM transition of the system being carried out.
1664  *
1665  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1666  * callbacks for them.
1667  */
1668 int dpm_suspend_start(pm_message_t state)
1669 {
1670         int error;
1671
1672         error = dpm_prepare(state);
1673         if (error) {
1674                 suspend_stats.failed_prepare++;
1675                 dpm_save_failed_step(SUSPEND_PREPARE);
1676         } else
1677                 error = dpm_suspend(state);
1678         return error;
1679 }
1680 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1681
1682 void __suspend_report_result(const char *function, void *fn, int ret)
1683 {
1684         if (ret)
1685                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1686 }
1687 EXPORT_SYMBOL_GPL(__suspend_report_result);
1688
1689 /**
1690  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1691  * @dev: Device to wait for.
1692  * @subordinate: Device that needs to wait for @dev.
1693  */
1694 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1695 {
1696         dpm_wait(dev, subordinate->power.async_suspend);
1697         return async_error;
1698 }
1699 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1700
1701 /**
1702  * dpm_for_each_dev - device iterator.
1703  * @data: data for the callback.
1704  * @fn: function to be called for each device.
1705  *
1706  * Iterate over devices in dpm_list, and call @fn for each device,
1707  * passing it @data.
1708  */
1709 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1710 {
1711         struct device *dev;
1712
1713         if (!fn)
1714                 return;
1715
1716         device_pm_lock();
1717         list_for_each_entry(dev, &dpm_list, power.entry)
1718                 fn(dev, data);
1719         device_pm_unlock();
1720 }
1721 EXPORT_SYMBOL_GPL(dpm_for_each_dev);