7eea95d490e614d5cefaba9e870ebb8115a32f92
[firefly-linux-kernel-4.4.55.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/timer.h>
36 #include <linux/wakeup_reason.h>
37
38 #include "../base.h"
39 #include "power.h"
40
41 typedef int (*pm_callback_t)(struct device *);
42
43 /*
44  * The entries in the dpm_list list are in a depth first order, simply
45  * because children are guaranteed to be discovered after parents, and
46  * are inserted at the back of the list on discovery.
47  *
48  * Since device_pm_add() may be called with a device lock held,
49  * we must never try to acquire a device lock while holding
50  * dpm_list_mutex.
51  */
52
53 LIST_HEAD(dpm_list);
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
58
59 struct suspend_stats suspend_stats;
60 static DEFINE_MUTEX(dpm_list_mtx);
61 static pm_message_t pm_transition;
62
63 static int async_error;
64
65 static char *pm_verb(int event)
66 {
67         switch (event) {
68         case PM_EVENT_SUSPEND:
69                 return "suspend";
70         case PM_EVENT_RESUME:
71                 return "resume";
72         case PM_EVENT_FREEZE:
73                 return "freeze";
74         case PM_EVENT_QUIESCE:
75                 return "quiesce";
76         case PM_EVENT_HIBERNATE:
77                 return "hibernate";
78         case PM_EVENT_THAW:
79                 return "thaw";
80         case PM_EVENT_RESTORE:
81                 return "restore";
82         case PM_EVENT_RECOVER:
83                 return "recover";
84         default:
85                 return "(unknown PM event)";
86         }
87 }
88
89 /**
90  * device_pm_sleep_init - Initialize system suspend-related device fields.
91  * @dev: Device object being initialized.
92  */
93 void device_pm_sleep_init(struct device *dev)
94 {
95         dev->power.is_prepared = false;
96         dev->power.is_suspended = false;
97         dev->power.is_noirq_suspended = false;
98         dev->power.is_late_suspended = false;
99         init_completion(&dev->power.completion);
100         complete_all(&dev->power.completion);
101         dev->power.wakeup = NULL;
102         INIT_LIST_HEAD(&dev->power.entry);
103 }
104
105 /**
106  * device_pm_lock - Lock the list of active devices used by the PM core.
107  */
108 void device_pm_lock(void)
109 {
110         mutex_lock(&dpm_list_mtx);
111 }
112
113 /**
114  * device_pm_unlock - Unlock the list of active devices used by the PM core.
115  */
116 void device_pm_unlock(void)
117 {
118         mutex_unlock(&dpm_list_mtx);
119 }
120
121 /**
122  * device_pm_add - Add a device to the PM core's list of active devices.
123  * @dev: Device to add to the list.
124  */
125 void device_pm_add(struct device *dev)
126 {
127         pr_debug("PM: Adding info for %s:%s\n",
128                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
129         device_pm_check_callbacks(dev);
130         mutex_lock(&dpm_list_mtx);
131         if (dev->parent && dev->parent->power.is_prepared)
132                 dev_warn(dev, "parent %s should not be sleeping\n",
133                         dev_name(dev->parent));
134         list_add_tail(&dev->power.entry, &dpm_list);
135         mutex_unlock(&dpm_list_mtx);
136 }
137
138 /**
139  * device_pm_remove - Remove a device from the PM core's list of active devices.
140  * @dev: Device to be removed from the list.
141  */
142 void device_pm_remove(struct device *dev)
143 {
144         pr_debug("PM: Removing info for %s:%s\n",
145                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
146         complete_all(&dev->power.completion);
147         mutex_lock(&dpm_list_mtx);
148         list_del_init(&dev->power.entry);
149         mutex_unlock(&dpm_list_mtx);
150         device_wakeup_disable(dev);
151         pm_runtime_remove(dev);
152         device_pm_check_callbacks(dev);
153 }
154
155 /**
156  * device_pm_move_before - Move device in the PM core's list of active devices.
157  * @deva: Device to move in dpm_list.
158  * @devb: Device @deva should come before.
159  */
160 void device_pm_move_before(struct device *deva, struct device *devb)
161 {
162         pr_debug("PM: Moving %s:%s before %s:%s\n",
163                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
164                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
165         /* Delete deva from dpm_list and reinsert before devb. */
166         list_move_tail(&deva->power.entry, &devb->power.entry);
167 }
168
169 /**
170  * device_pm_move_after - Move device in the PM core's list of active devices.
171  * @deva: Device to move in dpm_list.
172  * @devb: Device @deva should come after.
173  */
174 void device_pm_move_after(struct device *deva, struct device *devb)
175 {
176         pr_debug("PM: Moving %s:%s after %s:%s\n",
177                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
178                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
179         /* Delete deva from dpm_list and reinsert after devb. */
180         list_move(&deva->power.entry, &devb->power.entry);
181 }
182
183 /**
184  * device_pm_move_last - Move device to end of the PM core's list of devices.
185  * @dev: Device to move in dpm_list.
186  */
187 void device_pm_move_last(struct device *dev)
188 {
189         pr_debug("PM: Moving %s:%s to end of list\n",
190                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
191         list_move_tail(&dev->power.entry, &dpm_list);
192 }
193
194 static ktime_t initcall_debug_start(struct device *dev)
195 {
196         ktime_t calltime = ktime_set(0, 0);
197
198         if (pm_print_times_enabled) {
199                 pr_info("calling  %s+ @ %i, parent: %s\n",
200                         dev_name(dev), task_pid_nr(current),
201                         dev->parent ? dev_name(dev->parent) : "none");
202                 calltime = ktime_get();
203         }
204
205         return calltime;
206 }
207
208 static void initcall_debug_report(struct device *dev, ktime_t calltime,
209                                   int error, pm_message_t state, char *info)
210 {
211         ktime_t rettime;
212         s64 nsecs;
213
214         rettime = ktime_get();
215         nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
216
217         if (pm_print_times_enabled) {
218                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
219                         error, (unsigned long long)nsecs >> 10);
220         }
221 }
222
223 /**
224  * dpm_wait - Wait for a PM operation to complete.
225  * @dev: Device to wait for.
226  * @async: If unset, wait only if the device's power.async_suspend flag is set.
227  */
228 static void dpm_wait(struct device *dev, bool async)
229 {
230         if (!dev)
231                 return;
232
233         if (async || (pm_async_enabled && dev->power.async_suspend))
234                 wait_for_completion(&dev->power.completion);
235 }
236
237 static int dpm_wait_fn(struct device *dev, void *async_ptr)
238 {
239         dpm_wait(dev, *((bool *)async_ptr));
240         return 0;
241 }
242
243 static void dpm_wait_for_children(struct device *dev, bool async)
244 {
245        device_for_each_child(dev, &async, dpm_wait_fn);
246 }
247
248 /**
249  * pm_op - Return the PM operation appropriate for given PM event.
250  * @ops: PM operations to choose from.
251  * @state: PM transition of the system being carried out.
252  */
253 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
254 {
255         switch (state.event) {
256 #ifdef CONFIG_SUSPEND
257         case PM_EVENT_SUSPEND:
258                 return ops->suspend;
259         case PM_EVENT_RESUME:
260                 return ops->resume;
261 #endif /* CONFIG_SUSPEND */
262 #ifdef CONFIG_HIBERNATE_CALLBACKS
263         case PM_EVENT_FREEZE:
264         case PM_EVENT_QUIESCE:
265                 return ops->freeze;
266         case PM_EVENT_HIBERNATE:
267                 return ops->poweroff;
268         case PM_EVENT_THAW:
269         case PM_EVENT_RECOVER:
270                 return ops->thaw;
271                 break;
272         case PM_EVENT_RESTORE:
273                 return ops->restore;
274 #endif /* CONFIG_HIBERNATE_CALLBACKS */
275         }
276
277         return NULL;
278 }
279
280 /**
281  * pm_late_early_op - Return the PM operation appropriate for given PM event.
282  * @ops: PM operations to choose from.
283  * @state: PM transition of the system being carried out.
284  *
285  * Runtime PM is disabled for @dev while this function is being executed.
286  */
287 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
288                                       pm_message_t state)
289 {
290         switch (state.event) {
291 #ifdef CONFIG_SUSPEND
292         case PM_EVENT_SUSPEND:
293                 return ops->suspend_late;
294         case PM_EVENT_RESUME:
295                 return ops->resume_early;
296 #endif /* CONFIG_SUSPEND */
297 #ifdef CONFIG_HIBERNATE_CALLBACKS
298         case PM_EVENT_FREEZE:
299         case PM_EVENT_QUIESCE:
300                 return ops->freeze_late;
301         case PM_EVENT_HIBERNATE:
302                 return ops->poweroff_late;
303         case PM_EVENT_THAW:
304         case PM_EVENT_RECOVER:
305                 return ops->thaw_early;
306         case PM_EVENT_RESTORE:
307                 return ops->restore_early;
308 #endif /* CONFIG_HIBERNATE_CALLBACKS */
309         }
310
311         return NULL;
312 }
313
314 /**
315  * pm_noirq_op - Return the PM operation appropriate for given PM event.
316  * @ops: PM operations to choose from.
317  * @state: PM transition of the system being carried out.
318  *
319  * The driver of @dev will not receive interrupts while this function is being
320  * executed.
321  */
322 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
323 {
324         switch (state.event) {
325 #ifdef CONFIG_SUSPEND
326         case PM_EVENT_SUSPEND:
327                 return ops->suspend_noirq;
328         case PM_EVENT_RESUME:
329                 return ops->resume_noirq;
330 #endif /* CONFIG_SUSPEND */
331 #ifdef CONFIG_HIBERNATE_CALLBACKS
332         case PM_EVENT_FREEZE:
333         case PM_EVENT_QUIESCE:
334                 return ops->freeze_noirq;
335         case PM_EVENT_HIBERNATE:
336                 return ops->poweroff_noirq;
337         case PM_EVENT_THAW:
338         case PM_EVENT_RECOVER:
339                 return ops->thaw_noirq;
340         case PM_EVENT_RESTORE:
341                 return ops->restore_noirq;
342 #endif /* CONFIG_HIBERNATE_CALLBACKS */
343         }
344
345         return NULL;
346 }
347
348 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
349 {
350         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
351                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
352                 ", may wakeup" : "");
353 }
354
355 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
356                         int error)
357 {
358         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
359                 dev_name(dev), pm_verb(state.event), info, error);
360 }
361
362 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
363 {
364         ktime_t calltime;
365         u64 usecs64;
366         int usecs;
367
368         calltime = ktime_get();
369         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
370         do_div(usecs64, NSEC_PER_USEC);
371         usecs = usecs64;
372         if (usecs == 0)
373                 usecs = 1;
374         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
375                 info ?: "", info ? " " : "", pm_verb(state.event),
376                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
377 }
378
379 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
380                             pm_message_t state, char *info)
381 {
382         ktime_t calltime;
383         int error;
384
385         if (!cb)
386                 return 0;
387
388         calltime = initcall_debug_start(dev);
389
390         pm_dev_dbg(dev, state, info);
391         trace_device_pm_callback_start(dev, info, state.event);
392         error = cb(dev);
393         trace_device_pm_callback_end(dev, error);
394         suspend_report_result(cb, error);
395
396         initcall_debug_report(dev, calltime, error, state, info);
397
398         return error;
399 }
400
401 #ifdef CONFIG_DPM_WATCHDOG
402 struct dpm_watchdog {
403         struct device           *dev;
404         struct task_struct      *tsk;
405         struct timer_list       timer;
406 };
407
408 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
409         struct dpm_watchdog wd
410
411 /**
412  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
413  * @data: Watchdog object address.
414  *
415  * Called when a driver has timed out suspending or resuming.
416  * There's not much we can do here to recover so panic() to
417  * capture a crash-dump in pstore.
418  */
419 static void dpm_watchdog_handler(unsigned long data)
420 {
421         struct dpm_watchdog *wd = (void *)data;
422
423         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
424         show_stack(wd->tsk, NULL);
425         panic("%s %s: unrecoverable failure\n",
426                 dev_driver_string(wd->dev), dev_name(wd->dev));
427 }
428
429 /**
430  * dpm_watchdog_set - Enable pm watchdog for given device.
431  * @wd: Watchdog. Must be allocated on the stack.
432  * @dev: Device to handle.
433  */
434 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
435 {
436         struct timer_list *timer = &wd->timer;
437
438         wd->dev = dev;
439         wd->tsk = current;
440
441         init_timer_on_stack(timer);
442         /* use same timeout value for both suspend and resume */
443         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
444         timer->function = dpm_watchdog_handler;
445         timer->data = (unsigned long)wd;
446         add_timer(timer);
447 }
448
449 /**
450  * dpm_watchdog_clear - Disable suspend/resume watchdog.
451  * @wd: Watchdog to disable.
452  */
453 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
454 {
455         struct timer_list *timer = &wd->timer;
456
457         del_timer_sync(timer);
458         destroy_timer_on_stack(timer);
459 }
460 #else
461 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
462 #define dpm_watchdog_set(x, y)
463 #define dpm_watchdog_clear(x)
464 #endif
465
466 /*------------------------- Resume routines -------------------------*/
467
468 /**
469  * device_resume_noirq - Execute an "early resume" callback for given device.
470  * @dev: Device to handle.
471  * @state: PM transition of the system being carried out.
472  * @async: If true, the device is being resumed asynchronously.
473  *
474  * The driver of @dev will not receive interrupts while this function is being
475  * executed.
476  */
477 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
478 {
479         pm_callback_t callback = NULL;
480         char *info = NULL;
481         int error = 0;
482
483         TRACE_DEVICE(dev);
484         TRACE_RESUME(0);
485
486         if (dev->power.syscore || dev->power.direct_complete)
487                 goto Out;
488
489         if (!dev->power.is_noirq_suspended)
490                 goto Out;
491
492         dpm_wait(dev->parent, async);
493
494         if (dev->pm_domain) {
495                 info = "noirq power domain ";
496                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
497         } else if (dev->type && dev->type->pm) {
498                 info = "noirq type ";
499                 callback = pm_noirq_op(dev->type->pm, state);
500         } else if (dev->class && dev->class->pm) {
501                 info = "noirq class ";
502                 callback = pm_noirq_op(dev->class->pm, state);
503         } else if (dev->bus && dev->bus->pm) {
504                 info = "noirq bus ";
505                 callback = pm_noirq_op(dev->bus->pm, state);
506         }
507
508         if (!callback && dev->driver && dev->driver->pm) {
509                 info = "noirq driver ";
510                 callback = pm_noirq_op(dev->driver->pm, state);
511         }
512
513         error = dpm_run_callback(callback, dev, state, info);
514         dev->power.is_noirq_suspended = false;
515
516  Out:
517         complete_all(&dev->power.completion);
518         TRACE_RESUME(error);
519         return error;
520 }
521
522 static bool is_async(struct device *dev)
523 {
524         return dev->power.async_suspend && pm_async_enabled
525                 && !pm_trace_is_enabled();
526 }
527
528 static void async_resume_noirq(void *data, async_cookie_t cookie)
529 {
530         struct device *dev = (struct device *)data;
531         int error;
532
533         error = device_resume_noirq(dev, pm_transition, true);
534         if (error)
535                 pm_dev_err(dev, pm_transition, " async", error);
536
537         put_device(dev);
538 }
539
540 /**
541  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
542  * @state: PM transition of the system being carried out.
543  *
544  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
545  * enable device drivers to receive interrupts.
546  */
547 void dpm_resume_noirq(pm_message_t state)
548 {
549         struct device *dev;
550         ktime_t starttime = ktime_get();
551
552         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
553         mutex_lock(&dpm_list_mtx);
554         pm_transition = state;
555
556         /*
557          * Advanced the async threads upfront,
558          * in case the starting of async threads is
559          * delayed by non-async resuming devices.
560          */
561         list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
562                 reinit_completion(&dev->power.completion);
563                 if (is_async(dev)) {
564                         get_device(dev);
565                         async_schedule(async_resume_noirq, dev);
566                 }
567         }
568
569         while (!list_empty(&dpm_noirq_list)) {
570                 dev = to_device(dpm_noirq_list.next);
571                 get_device(dev);
572                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
573                 mutex_unlock(&dpm_list_mtx);
574
575                 if (!is_async(dev)) {
576                         int error;
577
578                         error = device_resume_noirq(dev, state, false);
579                         if (error) {
580                                 suspend_stats.failed_resume_noirq++;
581                                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
582                                 dpm_save_failed_dev(dev_name(dev));
583                                 pm_dev_err(dev, state, " noirq", error);
584                         }
585                 }
586
587                 mutex_lock(&dpm_list_mtx);
588                 put_device(dev);
589         }
590         mutex_unlock(&dpm_list_mtx);
591         async_synchronize_full();
592         dpm_show_time(starttime, state, "noirq");
593         resume_device_irqs();
594         device_wakeup_disarm_wake_irqs();
595         cpuidle_resume();
596         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
597 }
598
599 /**
600  * device_resume_early - Execute an "early resume" callback for given device.
601  * @dev: Device to handle.
602  * @state: PM transition of the system being carried out.
603  * @async: If true, the device is being resumed asynchronously.
604  *
605  * Runtime PM is disabled for @dev while this function is being executed.
606  */
607 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
608 {
609         pm_callback_t callback = NULL;
610         char *info = NULL;
611         int error = 0;
612
613         TRACE_DEVICE(dev);
614         TRACE_RESUME(0);
615
616         if (dev->power.syscore || dev->power.direct_complete)
617                 goto Out;
618
619         if (!dev->power.is_late_suspended)
620                 goto Out;
621
622         dpm_wait(dev->parent, async);
623
624         if (dev->pm_domain) {
625                 info = "early power domain ";
626                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
627         } else if (dev->type && dev->type->pm) {
628                 info = "early type ";
629                 callback = pm_late_early_op(dev->type->pm, state);
630         } else if (dev->class && dev->class->pm) {
631                 info = "early class ";
632                 callback = pm_late_early_op(dev->class->pm, state);
633         } else if (dev->bus && dev->bus->pm) {
634                 info = "early bus ";
635                 callback = pm_late_early_op(dev->bus->pm, state);
636         }
637
638         if (!callback && dev->driver && dev->driver->pm) {
639                 info = "early driver ";
640                 callback = pm_late_early_op(dev->driver->pm, state);
641         }
642
643         error = dpm_run_callback(callback, dev, state, info);
644         dev->power.is_late_suspended = false;
645
646  Out:
647         TRACE_RESUME(error);
648
649         pm_runtime_enable(dev);
650         complete_all(&dev->power.completion);
651         return error;
652 }
653
654 static void async_resume_early(void *data, async_cookie_t cookie)
655 {
656         struct device *dev = (struct device *)data;
657         int error;
658
659         error = device_resume_early(dev, pm_transition, true);
660         if (error)
661                 pm_dev_err(dev, pm_transition, " async", error);
662
663         put_device(dev);
664 }
665
666 /**
667  * dpm_resume_early - Execute "early resume" callbacks for all devices.
668  * @state: PM transition of the system being carried out.
669  */
670 void dpm_resume_early(pm_message_t state)
671 {
672         struct device *dev;
673         ktime_t starttime = ktime_get();
674
675         trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
676         mutex_lock(&dpm_list_mtx);
677         pm_transition = state;
678
679         /*
680          * Advanced the async threads upfront,
681          * in case the starting of async threads is
682          * delayed by non-async resuming devices.
683          */
684         list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
685                 reinit_completion(&dev->power.completion);
686                 if (is_async(dev)) {
687                         get_device(dev);
688                         async_schedule(async_resume_early, dev);
689                 }
690         }
691
692         while (!list_empty(&dpm_late_early_list)) {
693                 dev = to_device(dpm_late_early_list.next);
694                 get_device(dev);
695                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
696                 mutex_unlock(&dpm_list_mtx);
697
698                 if (!is_async(dev)) {
699                         int error;
700
701                         error = device_resume_early(dev, state, false);
702                         if (error) {
703                                 suspend_stats.failed_resume_early++;
704                                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
705                                 dpm_save_failed_dev(dev_name(dev));
706                                 pm_dev_err(dev, state, " early", error);
707                         }
708                 }
709                 mutex_lock(&dpm_list_mtx);
710                 put_device(dev);
711         }
712         mutex_unlock(&dpm_list_mtx);
713         async_synchronize_full();
714         dpm_show_time(starttime, state, "early");
715         trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
716 }
717
718 /**
719  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
720  * @state: PM transition of the system being carried out.
721  */
722 void dpm_resume_start(pm_message_t state)
723 {
724         dpm_resume_noirq(state);
725         dpm_resume_early(state);
726 }
727 EXPORT_SYMBOL_GPL(dpm_resume_start);
728
729 /**
730  * device_resume - Execute "resume" callbacks for given device.
731  * @dev: Device to handle.
732  * @state: PM transition of the system being carried out.
733  * @async: If true, the device is being resumed asynchronously.
734  */
735 static int device_resume(struct device *dev, pm_message_t state, bool async)
736 {
737         pm_callback_t callback = NULL;
738         char *info = NULL;
739         int error = 0;
740         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
741
742         TRACE_DEVICE(dev);
743         TRACE_RESUME(0);
744
745         if (dev->power.syscore)
746                 goto Complete;
747
748         if (dev->power.direct_complete) {
749                 /* Match the pm_runtime_disable() in __device_suspend(). */
750                 pm_runtime_enable(dev);
751                 goto Complete;
752         }
753
754         dpm_wait(dev->parent, async);
755         dpm_watchdog_set(&wd, dev);
756         device_lock(dev);
757
758         /*
759          * This is a fib.  But we'll allow new children to be added below
760          * a resumed device, even if the device hasn't been completed yet.
761          */
762         dev->power.is_prepared = false;
763
764         if (!dev->power.is_suspended)
765                 goto Unlock;
766
767         if (dev->pm_domain) {
768                 info = "power domain ";
769                 callback = pm_op(&dev->pm_domain->ops, state);
770                 goto Driver;
771         }
772
773         if (dev->type && dev->type->pm) {
774                 info = "type ";
775                 callback = pm_op(dev->type->pm, state);
776                 goto Driver;
777         }
778
779         if (dev->class) {
780                 if (dev->class->pm) {
781                         info = "class ";
782                         callback = pm_op(dev->class->pm, state);
783                         goto Driver;
784                 } else if (dev->class->resume) {
785                         info = "legacy class ";
786                         callback = dev->class->resume;
787                         goto End;
788                 }
789         }
790
791         if (dev->bus) {
792                 if (dev->bus->pm) {
793                         info = "bus ";
794                         callback = pm_op(dev->bus->pm, state);
795                 } else if (dev->bus->resume) {
796                         info = "legacy bus ";
797                         callback = dev->bus->resume;
798                         goto End;
799                 }
800         }
801
802  Driver:
803         if (!callback && dev->driver && dev->driver->pm) {
804                 info = "driver ";
805                 callback = pm_op(dev->driver->pm, state);
806         }
807
808  End:
809         error = dpm_run_callback(callback, dev, state, info);
810         dev->power.is_suspended = false;
811
812  Unlock:
813         device_unlock(dev);
814         dpm_watchdog_clear(&wd);
815
816  Complete:
817         complete_all(&dev->power.completion);
818
819         TRACE_RESUME(error);
820
821         return error;
822 }
823
824 static void async_resume(void *data, async_cookie_t cookie)
825 {
826         struct device *dev = (struct device *)data;
827         int error;
828
829         error = device_resume(dev, pm_transition, true);
830         if (error)
831                 pm_dev_err(dev, pm_transition, " async", error);
832         put_device(dev);
833 }
834
835 /**
836  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
837  * @state: PM transition of the system being carried out.
838  *
839  * Execute the appropriate "resume" callback for all devices whose status
840  * indicates that they are suspended.
841  */
842 void dpm_resume(pm_message_t state)
843 {
844         struct device *dev;
845         ktime_t starttime = ktime_get();
846
847         trace_suspend_resume(TPS("dpm_resume"), state.event, true);
848         might_sleep();
849
850         mutex_lock(&dpm_list_mtx);
851         pm_transition = state;
852         async_error = 0;
853
854         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
855                 reinit_completion(&dev->power.completion);
856                 if (is_async(dev)) {
857                         get_device(dev);
858                         async_schedule(async_resume, dev);
859                 }
860         }
861
862         while (!list_empty(&dpm_suspended_list)) {
863                 dev = to_device(dpm_suspended_list.next);
864                 get_device(dev);
865                 if (!is_async(dev)) {
866                         int error;
867
868                         mutex_unlock(&dpm_list_mtx);
869
870                         error = device_resume(dev, state, false);
871                         if (error) {
872                                 suspend_stats.failed_resume++;
873                                 dpm_save_failed_step(SUSPEND_RESUME);
874                                 dpm_save_failed_dev(dev_name(dev));
875                                 pm_dev_err(dev, state, "", error);
876                         }
877
878                         mutex_lock(&dpm_list_mtx);
879                 }
880                 if (!list_empty(&dev->power.entry))
881                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
882                 put_device(dev);
883         }
884         mutex_unlock(&dpm_list_mtx);
885         async_synchronize_full();
886         dpm_show_time(starttime, state, NULL);
887
888         cpufreq_resume();
889         trace_suspend_resume(TPS("dpm_resume"), state.event, false);
890 }
891
892 /**
893  * device_complete - Complete a PM transition for given device.
894  * @dev: Device to handle.
895  * @state: PM transition of the system being carried out.
896  */
897 static void device_complete(struct device *dev, pm_message_t state)
898 {
899         void (*callback)(struct device *) = NULL;
900         char *info = NULL;
901
902         if (dev->power.syscore)
903                 return;
904
905         device_lock(dev);
906
907         if (dev->pm_domain) {
908                 info = "completing power domain ";
909                 callback = dev->pm_domain->ops.complete;
910         } else if (dev->type && dev->type->pm) {
911                 info = "completing type ";
912                 callback = dev->type->pm->complete;
913         } else if (dev->class && dev->class->pm) {
914                 info = "completing class ";
915                 callback = dev->class->pm->complete;
916         } else if (dev->bus && dev->bus->pm) {
917                 info = "completing bus ";
918                 callback = dev->bus->pm->complete;
919         }
920
921         if (!callback && dev->driver && dev->driver->pm) {
922                 info = "completing driver ";
923                 callback = dev->driver->pm->complete;
924         }
925
926         if (callback) {
927                 pm_dev_dbg(dev, state, info);
928                 callback(dev);
929         }
930
931         device_unlock(dev);
932
933         pm_runtime_put(dev);
934 }
935
936 /**
937  * dpm_complete - Complete a PM transition for all non-sysdev devices.
938  * @state: PM transition of the system being carried out.
939  *
940  * Execute the ->complete() callbacks for all devices whose PM status is not
941  * DPM_ON (this allows new devices to be registered).
942  */
943 void dpm_complete(pm_message_t state)
944 {
945         struct list_head list;
946
947         trace_suspend_resume(TPS("dpm_complete"), state.event, true);
948         might_sleep();
949
950         INIT_LIST_HEAD(&list);
951         mutex_lock(&dpm_list_mtx);
952         while (!list_empty(&dpm_prepared_list)) {
953                 struct device *dev = to_device(dpm_prepared_list.prev);
954
955                 get_device(dev);
956                 dev->power.is_prepared = false;
957                 list_move(&dev->power.entry, &list);
958                 mutex_unlock(&dpm_list_mtx);
959
960                 trace_device_pm_callback_start(dev, "", state.event);
961                 device_complete(dev, state);
962                 trace_device_pm_callback_end(dev, 0);
963
964                 mutex_lock(&dpm_list_mtx);
965                 put_device(dev);
966         }
967         list_splice(&list, &dpm_list);
968         mutex_unlock(&dpm_list_mtx);
969         trace_suspend_resume(TPS("dpm_complete"), state.event, false);
970 }
971
972 /**
973  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
974  * @state: PM transition of the system being carried out.
975  *
976  * Execute "resume" callbacks for all devices and complete the PM transition of
977  * the system.
978  */
979 void dpm_resume_end(pm_message_t state)
980 {
981         dpm_resume(state);
982         dpm_complete(state);
983 }
984 EXPORT_SYMBOL_GPL(dpm_resume_end);
985
986
987 /*------------------------- Suspend routines -------------------------*/
988
989 /**
990  * resume_event - Return a "resume" message for given "suspend" sleep state.
991  * @sleep_state: PM message representing a sleep state.
992  *
993  * Return a PM message representing the resume event corresponding to given
994  * sleep state.
995  */
996 static pm_message_t resume_event(pm_message_t sleep_state)
997 {
998         switch (sleep_state.event) {
999         case PM_EVENT_SUSPEND:
1000                 return PMSG_RESUME;
1001         case PM_EVENT_FREEZE:
1002         case PM_EVENT_QUIESCE:
1003                 return PMSG_RECOVER;
1004         case PM_EVENT_HIBERNATE:
1005                 return PMSG_RESTORE;
1006         }
1007         return PMSG_ON;
1008 }
1009
1010 /**
1011  * device_suspend_noirq - Execute a "late suspend" callback for given device.
1012  * @dev: Device to handle.
1013  * @state: PM transition of the system being carried out.
1014  * @async: If true, the device is being suspended asynchronously.
1015  *
1016  * The driver of @dev will not receive interrupts while this function is being
1017  * executed.
1018  */
1019 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1020 {
1021         pm_callback_t callback = NULL;
1022         char *info = NULL;
1023         int error = 0;
1024
1025         TRACE_DEVICE(dev);
1026         TRACE_SUSPEND(0);
1027
1028         if (async_error)
1029                 goto Complete;
1030
1031         if (pm_wakeup_pending()) {
1032                 async_error = -EBUSY;
1033                 goto Complete;
1034         }
1035
1036         if (dev->power.syscore || dev->power.direct_complete)
1037                 goto Complete;
1038
1039         dpm_wait_for_children(dev, async);
1040
1041         if (dev->pm_domain) {
1042                 info = "noirq power domain ";
1043                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1044         } else if (dev->type && dev->type->pm) {
1045                 info = "noirq type ";
1046                 callback = pm_noirq_op(dev->type->pm, state);
1047         } else if (dev->class && dev->class->pm) {
1048                 info = "noirq class ";
1049                 callback = pm_noirq_op(dev->class->pm, state);
1050         } else if (dev->bus && dev->bus->pm) {
1051                 info = "noirq bus ";
1052                 callback = pm_noirq_op(dev->bus->pm, state);
1053         }
1054
1055         if (!callback && dev->driver && dev->driver->pm) {
1056                 info = "noirq driver ";
1057                 callback = pm_noirq_op(dev->driver->pm, state);
1058         }
1059
1060         error = dpm_run_callback(callback, dev, state, info);
1061         if (!error)
1062                 dev->power.is_noirq_suspended = true;
1063         else
1064                 async_error = error;
1065
1066 Complete:
1067         complete_all(&dev->power.completion);
1068         TRACE_SUSPEND(error);
1069         return error;
1070 }
1071
1072 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1073 {
1074         struct device *dev = (struct device *)data;
1075         int error;
1076
1077         error = __device_suspend_noirq(dev, pm_transition, true);
1078         if (error) {
1079                 dpm_save_failed_dev(dev_name(dev));
1080                 pm_dev_err(dev, pm_transition, " async", error);
1081         }
1082
1083         put_device(dev);
1084 }
1085
1086 static int device_suspend_noirq(struct device *dev)
1087 {
1088         reinit_completion(&dev->power.completion);
1089
1090         if (is_async(dev)) {
1091                 get_device(dev);
1092                 async_schedule(async_suspend_noirq, dev);
1093                 return 0;
1094         }
1095         return __device_suspend_noirq(dev, pm_transition, false);
1096 }
1097
1098 /**
1099  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1100  * @state: PM transition of the system being carried out.
1101  *
1102  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1103  * handlers for all non-sysdev devices.
1104  */
1105 int dpm_suspend_noirq(pm_message_t state)
1106 {
1107         ktime_t starttime = ktime_get();
1108         int error = 0;
1109
1110         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1111         cpuidle_pause();
1112         device_wakeup_arm_wake_irqs();
1113         suspend_device_irqs();
1114         mutex_lock(&dpm_list_mtx);
1115         pm_transition = state;
1116         async_error = 0;
1117
1118         while (!list_empty(&dpm_late_early_list)) {
1119                 struct device *dev = to_device(dpm_late_early_list.prev);
1120
1121                 get_device(dev);
1122                 mutex_unlock(&dpm_list_mtx);
1123
1124                 error = device_suspend_noirq(dev);
1125
1126                 mutex_lock(&dpm_list_mtx);
1127                 if (error) {
1128                         pm_dev_err(dev, state, " noirq", error);
1129                         dpm_save_failed_dev(dev_name(dev));
1130                         put_device(dev);
1131                         break;
1132                 }
1133                 if (!list_empty(&dev->power.entry))
1134                         list_move(&dev->power.entry, &dpm_noirq_list);
1135                 put_device(dev);
1136
1137                 if (async_error)
1138                         break;
1139         }
1140         mutex_unlock(&dpm_list_mtx);
1141         async_synchronize_full();
1142         if (!error)
1143                 error = async_error;
1144
1145         if (error) {
1146                 suspend_stats.failed_suspend_noirq++;
1147                 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1148                 dpm_resume_noirq(resume_event(state));
1149         } else {
1150                 dpm_show_time(starttime, state, "noirq");
1151         }
1152         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1153         return error;
1154 }
1155
1156 /**
1157  * device_suspend_late - Execute a "late suspend" callback for given device.
1158  * @dev: Device to handle.
1159  * @state: PM transition of the system being carried out.
1160  * @async: If true, the device is being suspended asynchronously.
1161  *
1162  * Runtime PM is disabled for @dev while this function is being executed.
1163  */
1164 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1165 {
1166         pm_callback_t callback = NULL;
1167         char *info = NULL;
1168         int error = 0;
1169
1170         TRACE_DEVICE(dev);
1171         TRACE_SUSPEND(0);
1172
1173         __pm_runtime_disable(dev, false);
1174
1175         if (async_error)
1176                 goto Complete;
1177
1178         if (pm_wakeup_pending()) {
1179                 async_error = -EBUSY;
1180                 goto Complete;
1181         }
1182
1183         if (dev->power.syscore || dev->power.direct_complete)
1184                 goto Complete;
1185
1186         dpm_wait_for_children(dev, async);
1187
1188         if (dev->pm_domain) {
1189                 info = "late power domain ";
1190                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1191         } else if (dev->type && dev->type->pm) {
1192                 info = "late type ";
1193                 callback = pm_late_early_op(dev->type->pm, state);
1194         } else if (dev->class && dev->class->pm) {
1195                 info = "late class ";
1196                 callback = pm_late_early_op(dev->class->pm, state);
1197         } else if (dev->bus && dev->bus->pm) {
1198                 info = "late bus ";
1199                 callback = pm_late_early_op(dev->bus->pm, state);
1200         }
1201
1202         if (!callback && dev->driver && dev->driver->pm) {
1203                 info = "late driver ";
1204                 callback = pm_late_early_op(dev->driver->pm, state);
1205         }
1206
1207         error = dpm_run_callback(callback, dev, state, info);
1208         if (!error)
1209                 dev->power.is_late_suspended = true;
1210         else
1211                 async_error = error;
1212
1213 Complete:
1214         TRACE_SUSPEND(error);
1215         complete_all(&dev->power.completion);
1216         return error;
1217 }
1218
1219 static void async_suspend_late(void *data, async_cookie_t cookie)
1220 {
1221         struct device *dev = (struct device *)data;
1222         int error;
1223
1224         error = __device_suspend_late(dev, pm_transition, true);
1225         if (error) {
1226                 dpm_save_failed_dev(dev_name(dev));
1227                 pm_dev_err(dev, pm_transition, " async", error);
1228         }
1229         put_device(dev);
1230 }
1231
1232 static int device_suspend_late(struct device *dev)
1233 {
1234         reinit_completion(&dev->power.completion);
1235
1236         if (is_async(dev)) {
1237                 get_device(dev);
1238                 async_schedule(async_suspend_late, dev);
1239                 return 0;
1240         }
1241
1242         return __device_suspend_late(dev, pm_transition, false);
1243 }
1244
1245 /**
1246  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1247  * @state: PM transition of the system being carried out.
1248  */
1249 int dpm_suspend_late(pm_message_t state)
1250 {
1251         ktime_t starttime = ktime_get();
1252         int error = 0;
1253
1254         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1255         mutex_lock(&dpm_list_mtx);
1256         pm_transition = state;
1257         async_error = 0;
1258
1259         while (!list_empty(&dpm_suspended_list)) {
1260                 struct device *dev = to_device(dpm_suspended_list.prev);
1261
1262                 get_device(dev);
1263                 mutex_unlock(&dpm_list_mtx);
1264
1265                 error = device_suspend_late(dev);
1266
1267                 mutex_lock(&dpm_list_mtx);
1268                 if (!list_empty(&dev->power.entry))
1269                         list_move(&dev->power.entry, &dpm_late_early_list);
1270
1271                 if (error) {
1272                         pm_dev_err(dev, state, " late", error);
1273                         dpm_save_failed_dev(dev_name(dev));
1274                         put_device(dev);
1275                         break;
1276                 }
1277                 put_device(dev);
1278
1279                 if (async_error)
1280                         break;
1281         }
1282         mutex_unlock(&dpm_list_mtx);
1283         async_synchronize_full();
1284         if (!error)
1285                 error = async_error;
1286         if (error) {
1287                 suspend_stats.failed_suspend_late++;
1288                 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1289                 dpm_resume_early(resume_event(state));
1290         } else {
1291                 dpm_show_time(starttime, state, "late");
1292         }
1293         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1294         return error;
1295 }
1296
1297 /**
1298  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1299  * @state: PM transition of the system being carried out.
1300  */
1301 int dpm_suspend_end(pm_message_t state)
1302 {
1303         int error = dpm_suspend_late(state);
1304         if (error)
1305                 return error;
1306
1307         error = dpm_suspend_noirq(state);
1308         if (error) {
1309                 dpm_resume_early(resume_event(state));
1310                 return error;
1311         }
1312
1313         return 0;
1314 }
1315 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1316
1317 /**
1318  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1319  * @dev: Device to suspend.
1320  * @state: PM transition of the system being carried out.
1321  * @cb: Suspend callback to execute.
1322  * @info: string description of caller.
1323  */
1324 static int legacy_suspend(struct device *dev, pm_message_t state,
1325                           int (*cb)(struct device *dev, pm_message_t state),
1326                           char *info)
1327 {
1328         int error;
1329         ktime_t calltime;
1330
1331         calltime = initcall_debug_start(dev);
1332
1333         trace_device_pm_callback_start(dev, info, state.event);
1334         error = cb(dev, state);
1335         trace_device_pm_callback_end(dev, error);
1336         suspend_report_result(cb, error);
1337
1338         initcall_debug_report(dev, calltime, error, state, info);
1339
1340         return error;
1341 }
1342
1343 /**
1344  * device_suspend - Execute "suspend" callbacks for given device.
1345  * @dev: Device to handle.
1346  * @state: PM transition of the system being carried out.
1347  * @async: If true, the device is being suspended asynchronously.
1348  */
1349 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1350 {
1351         pm_callback_t callback = NULL;
1352         char *info = NULL;
1353         int error = 0;
1354         char suspend_abort[MAX_SUSPEND_ABORT_LEN];
1355         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1356
1357         TRACE_DEVICE(dev);
1358         TRACE_SUSPEND(0);
1359
1360         dpm_wait_for_children(dev, async);
1361
1362         if (async_error)
1363                 goto Complete;
1364
1365         /*
1366          * If a device configured to wake up the system from sleep states
1367          * has been suspended at run time and there's a resume request pending
1368          * for it, this is equivalent to the device signaling wakeup, so the
1369          * system suspend operation should be aborted.
1370          */
1371         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1372                 pm_wakeup_event(dev, 0);
1373
1374         if (pm_wakeup_pending()) {
1375                 pm_get_active_wakeup_sources(suspend_abort,
1376                         MAX_SUSPEND_ABORT_LEN);
1377                 log_suspend_abort_reason(suspend_abort);
1378                 async_error = -EBUSY;
1379                 goto Complete;
1380         }
1381
1382         if (dev->power.syscore)
1383                 goto Complete;
1384
1385         if (dev->power.direct_complete) {
1386                 if (pm_runtime_status_suspended(dev)) {
1387                         pm_runtime_disable(dev);
1388                         if (pm_runtime_status_suspended(dev))
1389                                 goto Complete;
1390
1391                         pm_runtime_enable(dev);
1392                 }
1393                 dev->power.direct_complete = false;
1394         }
1395
1396         dpm_watchdog_set(&wd, dev);
1397         device_lock(dev);
1398
1399         if (dev->pm_domain) {
1400                 info = "power domain ";
1401                 callback = pm_op(&dev->pm_domain->ops, state);
1402                 goto Run;
1403         }
1404
1405         if (dev->type && dev->type->pm) {
1406                 info = "type ";
1407                 callback = pm_op(dev->type->pm, state);
1408                 goto Run;
1409         }
1410
1411         if (dev->class) {
1412                 if (dev->class->pm) {
1413                         info = "class ";
1414                         callback = pm_op(dev->class->pm, state);
1415                         goto Run;
1416                 } else if (dev->class->suspend) {
1417                         pm_dev_dbg(dev, state, "legacy class ");
1418                         error = legacy_suspend(dev, state, dev->class->suspend,
1419                                                 "legacy class ");
1420                         goto End;
1421                 }
1422         }
1423
1424         if (dev->bus) {
1425                 if (dev->bus->pm) {
1426                         info = "bus ";
1427                         callback = pm_op(dev->bus->pm, state);
1428                 } else if (dev->bus->suspend) {
1429                         pm_dev_dbg(dev, state, "legacy bus ");
1430                         error = legacy_suspend(dev, state, dev->bus->suspend,
1431                                                 "legacy bus ");
1432                         goto End;
1433                 }
1434         }
1435
1436  Run:
1437         if (!callback && dev->driver && dev->driver->pm) {
1438                 info = "driver ";
1439                 callback = pm_op(dev->driver->pm, state);
1440         }
1441
1442         error = dpm_run_callback(callback, dev, state, info);
1443
1444  End:
1445         if (!error) {
1446                 struct device *parent = dev->parent;
1447
1448                 dev->power.is_suspended = true;
1449                 if (parent) {
1450                         spin_lock_irq(&parent->power.lock);
1451
1452                         dev->parent->power.direct_complete = false;
1453                         if (dev->power.wakeup_path
1454                             && !dev->parent->power.ignore_children)
1455                                 dev->parent->power.wakeup_path = true;
1456
1457                         spin_unlock_irq(&parent->power.lock);
1458                 }
1459         }
1460
1461         device_unlock(dev);
1462         dpm_watchdog_clear(&wd);
1463
1464  Complete:
1465         complete_all(&dev->power.completion);
1466         if (error)
1467                 async_error = error;
1468
1469         TRACE_SUSPEND(error);
1470         return error;
1471 }
1472
1473 static void async_suspend(void *data, async_cookie_t cookie)
1474 {
1475         struct device *dev = (struct device *)data;
1476         int error;
1477
1478         error = __device_suspend(dev, pm_transition, true);
1479         if (error) {
1480                 dpm_save_failed_dev(dev_name(dev));
1481                 pm_dev_err(dev, pm_transition, " async", error);
1482         }
1483
1484         put_device(dev);
1485 }
1486
1487 static int device_suspend(struct device *dev)
1488 {
1489         reinit_completion(&dev->power.completion);
1490
1491         if (is_async(dev)) {
1492                 get_device(dev);
1493                 async_schedule(async_suspend, dev);
1494                 return 0;
1495         }
1496
1497         return __device_suspend(dev, pm_transition, false);
1498 }
1499
1500 /**
1501  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1502  * @state: PM transition of the system being carried out.
1503  */
1504 int dpm_suspend(pm_message_t state)
1505 {
1506         ktime_t starttime = ktime_get();
1507         int error = 0;
1508
1509         trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1510         might_sleep();
1511
1512         cpufreq_suspend();
1513
1514         mutex_lock(&dpm_list_mtx);
1515         pm_transition = state;
1516         async_error = 0;
1517         while (!list_empty(&dpm_prepared_list)) {
1518                 struct device *dev = to_device(dpm_prepared_list.prev);
1519
1520                 get_device(dev);
1521                 mutex_unlock(&dpm_list_mtx);
1522
1523                 error = device_suspend(dev);
1524
1525                 mutex_lock(&dpm_list_mtx);
1526                 if (error) {
1527                         pm_dev_err(dev, state, "", error);
1528                         dpm_save_failed_dev(dev_name(dev));
1529                         put_device(dev);
1530                         break;
1531                 }
1532                 if (!list_empty(&dev->power.entry))
1533                         list_move(&dev->power.entry, &dpm_suspended_list);
1534                 put_device(dev);
1535                 if (async_error)
1536                         break;
1537         }
1538         mutex_unlock(&dpm_list_mtx);
1539         async_synchronize_full();
1540         if (!error)
1541                 error = async_error;
1542         if (error) {
1543                 suspend_stats.failed_suspend++;
1544                 dpm_save_failed_step(SUSPEND_SUSPEND);
1545         } else
1546                 dpm_show_time(starttime, state, NULL);
1547         trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1548         return error;
1549 }
1550
1551 /**
1552  * device_prepare - Prepare a device for system power transition.
1553  * @dev: Device to handle.
1554  * @state: PM transition of the system being carried out.
1555  *
1556  * Execute the ->prepare() callback(s) for given device.  No new children of the
1557  * device may be registered after this function has returned.
1558  */
1559 static int device_prepare(struct device *dev, pm_message_t state)
1560 {
1561         int (*callback)(struct device *) = NULL;
1562         char *info = NULL;
1563         int ret = 0;
1564
1565         if (dev->power.syscore)
1566                 return 0;
1567
1568         /*
1569          * If a device's parent goes into runtime suspend at the wrong time,
1570          * it won't be possible to resume the device.  To prevent this we
1571          * block runtime suspend here, during the prepare phase, and allow
1572          * it again during the complete phase.
1573          */
1574         pm_runtime_get_noresume(dev);
1575
1576         device_lock(dev);
1577
1578         dev->power.wakeup_path = device_may_wakeup(dev);
1579
1580         if (dev->power.no_pm_callbacks) {
1581                 ret = 1;        /* Let device go direct_complete */
1582                 goto unlock;
1583         }
1584
1585         if (dev->pm_domain) {
1586                 info = "preparing power domain ";
1587                 callback = dev->pm_domain->ops.prepare;
1588         } else if (dev->type && dev->type->pm) {
1589                 info = "preparing type ";
1590                 callback = dev->type->pm->prepare;
1591         } else if (dev->class && dev->class->pm) {
1592                 info = "preparing class ";
1593                 callback = dev->class->pm->prepare;
1594         } else if (dev->bus && dev->bus->pm) {
1595                 info = "preparing bus ";
1596                 callback = dev->bus->pm->prepare;
1597         }
1598
1599         if (!callback && dev->driver && dev->driver->pm) {
1600                 info = "preparing driver ";
1601                 callback = dev->driver->pm->prepare;
1602         }
1603
1604         if (callback)
1605                 ret = callback(dev);
1606
1607 unlock:
1608         device_unlock(dev);
1609
1610         if (ret < 0) {
1611                 suspend_report_result(callback, ret);
1612                 pm_runtime_put(dev);
1613                 return ret;
1614         }
1615         /*
1616          * A positive return value from ->prepare() means "this device appears
1617          * to be runtime-suspended and its state is fine, so if it really is
1618          * runtime-suspended, you can leave it in that state provided that you
1619          * will do the same thing with all of its descendants".  This only
1620          * applies to suspend transitions, however.
1621          */
1622         spin_lock_irq(&dev->power.lock);
1623         dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1624         spin_unlock_irq(&dev->power.lock);
1625         return 0;
1626 }
1627
1628 /**
1629  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1630  * @state: PM transition of the system being carried out.
1631  *
1632  * Execute the ->prepare() callback(s) for all devices.
1633  */
1634 int dpm_prepare(pm_message_t state)
1635 {
1636         int error = 0;
1637
1638         trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1639         might_sleep();
1640
1641         mutex_lock(&dpm_list_mtx);
1642         while (!list_empty(&dpm_list)) {
1643                 struct device *dev = to_device(dpm_list.next);
1644
1645                 get_device(dev);
1646                 mutex_unlock(&dpm_list_mtx);
1647
1648                 trace_device_pm_callback_start(dev, "", state.event);
1649                 error = device_prepare(dev, state);
1650                 trace_device_pm_callback_end(dev, error);
1651
1652                 mutex_lock(&dpm_list_mtx);
1653                 if (error) {
1654                         if (error == -EAGAIN) {
1655                                 put_device(dev);
1656                                 error = 0;
1657                                 continue;
1658                         }
1659                         printk(KERN_INFO "PM: Device %s not prepared "
1660                                 "for power transition: code %d\n",
1661                                 dev_name(dev), error);
1662                         put_device(dev);
1663                         break;
1664                 }
1665                 dev->power.is_prepared = true;
1666                 if (!list_empty(&dev->power.entry))
1667                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1668                 put_device(dev);
1669         }
1670         mutex_unlock(&dpm_list_mtx);
1671         trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1672         return error;
1673 }
1674
1675 /**
1676  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1677  * @state: PM transition of the system being carried out.
1678  *
1679  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1680  * callbacks for them.
1681  */
1682 int dpm_suspend_start(pm_message_t state)
1683 {
1684         int error;
1685
1686         error = dpm_prepare(state);
1687         if (error) {
1688                 suspend_stats.failed_prepare++;
1689                 dpm_save_failed_step(SUSPEND_PREPARE);
1690         } else
1691                 error = dpm_suspend(state);
1692         return error;
1693 }
1694 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1695
1696 void __suspend_report_result(const char *function, void *fn, int ret)
1697 {
1698         if (ret)
1699                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1700 }
1701 EXPORT_SYMBOL_GPL(__suspend_report_result);
1702
1703 /**
1704  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1705  * @dev: Device to wait for.
1706  * @subordinate: Device that needs to wait for @dev.
1707  */
1708 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1709 {
1710         dpm_wait(dev, subordinate->power.async_suspend);
1711         return async_error;
1712 }
1713 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1714
1715 /**
1716  * dpm_for_each_dev - device iterator.
1717  * @data: data for the callback.
1718  * @fn: function to be called for each device.
1719  *
1720  * Iterate over devices in dpm_list, and call @fn for each device,
1721  * passing it @data.
1722  */
1723 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1724 {
1725         struct device *dev;
1726
1727         if (!fn)
1728                 return;
1729
1730         device_pm_lock();
1731         list_for_each_entry(dev, &dpm_list, power.entry)
1732                 fn(dev, data);
1733         device_pm_unlock();
1734 }
1735 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1736
1737 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1738 {
1739         if (!ops)
1740                 return true;
1741
1742         return !ops->prepare &&
1743                !ops->suspend &&
1744                !ops->suspend_late &&
1745                !ops->suspend_noirq &&
1746                !ops->resume_noirq &&
1747                !ops->resume_early &&
1748                !ops->resume &&
1749                !ops->complete;
1750 }
1751
1752 void device_pm_check_callbacks(struct device *dev)
1753 {
1754         spin_lock_irq(&dev->power.lock);
1755         dev->power.no_pm_callbacks =
1756                 (!dev->bus || pm_ops_is_empty(dev->bus->pm)) &&
1757                 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
1758                 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1759                 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1760                 (!dev->driver || pm_ops_is_empty(dev->driver->pm));
1761         spin_unlock_irq(&dev->power.lock);
1762 }