drivers: power: use 'current' instead of 'get_current()'
[firefly-linux-kernel-4.4.55.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/timer.h>
36 #include <linux/wakeup_reason.h>
37
38 #include <asm/current.h>
39
40 #include "../base.h"
41 #include "power.h"
42
43 typedef int (*pm_callback_t)(struct device *);
44
45 /*
46  * The entries in the dpm_list list are in a depth first order, simply
47  * because children are guaranteed to be discovered after parents, and
48  * are inserted at the back of the list on discovery.
49  *
50  * Since device_pm_add() may be called with a device lock held,
51  * we must never try to acquire a device lock while holding
52  * dpm_list_mutex.
53  */
54
55 LIST_HEAD(dpm_list);
56 static LIST_HEAD(dpm_prepared_list);
57 static LIST_HEAD(dpm_suspended_list);
58 static LIST_HEAD(dpm_late_early_list);
59 static LIST_HEAD(dpm_noirq_list);
60
61 struct suspend_stats suspend_stats;
62 static DEFINE_MUTEX(dpm_list_mtx);
63 static pm_message_t pm_transition;
64
65 static void dpm_drv_timeout(unsigned long data);
66 struct dpm_drv_wd_data {
67         struct device *dev;
68         struct task_struct *tsk;
69 };
70
71 static int async_error;
72
73 static char *pm_verb(int event)
74 {
75         switch (event) {
76         case PM_EVENT_SUSPEND:
77                 return "suspend";
78         case PM_EVENT_RESUME:
79                 return "resume";
80         case PM_EVENT_FREEZE:
81                 return "freeze";
82         case PM_EVENT_QUIESCE:
83                 return "quiesce";
84         case PM_EVENT_HIBERNATE:
85                 return "hibernate";
86         case PM_EVENT_THAW:
87                 return "thaw";
88         case PM_EVENT_RESTORE:
89                 return "restore";
90         case PM_EVENT_RECOVER:
91                 return "recover";
92         default:
93                 return "(unknown PM event)";
94         }
95 }
96
97 /**
98  * device_pm_sleep_init - Initialize system suspend-related device fields.
99  * @dev: Device object being initialized.
100  */
101 void device_pm_sleep_init(struct device *dev)
102 {
103         dev->power.is_prepared = false;
104         dev->power.is_suspended = false;
105         dev->power.is_noirq_suspended = false;
106         dev->power.is_late_suspended = false;
107         init_completion(&dev->power.completion);
108         complete_all(&dev->power.completion);
109         dev->power.wakeup = NULL;
110         INIT_LIST_HEAD(&dev->power.entry);
111 }
112
113 /**
114  * device_pm_lock - Lock the list of active devices used by the PM core.
115  */
116 void device_pm_lock(void)
117 {
118         mutex_lock(&dpm_list_mtx);
119 }
120
121 /**
122  * device_pm_unlock - Unlock the list of active devices used by the PM core.
123  */
124 void device_pm_unlock(void)
125 {
126         mutex_unlock(&dpm_list_mtx);
127 }
128
129 /**
130  * device_pm_add - Add a device to the PM core's list of active devices.
131  * @dev: Device to add to the list.
132  */
133 void device_pm_add(struct device *dev)
134 {
135         pr_debug("PM: Adding info for %s:%s\n",
136                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
137         mutex_lock(&dpm_list_mtx);
138         if (dev->parent && dev->parent->power.is_prepared)
139                 dev_warn(dev, "parent %s should not be sleeping\n",
140                         dev_name(dev->parent));
141         list_add_tail(&dev->power.entry, &dpm_list);
142         mutex_unlock(&dpm_list_mtx);
143 }
144
145 /**
146  * device_pm_remove - Remove a device from the PM core's list of active devices.
147  * @dev: Device to be removed from the list.
148  */
149 void device_pm_remove(struct device *dev)
150 {
151         pr_debug("PM: Removing info for %s:%s\n",
152                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
153         complete_all(&dev->power.completion);
154         mutex_lock(&dpm_list_mtx);
155         list_del_init(&dev->power.entry);
156         mutex_unlock(&dpm_list_mtx);
157         device_wakeup_disable(dev);
158         pm_runtime_remove(dev);
159 }
160
161 /**
162  * device_pm_move_before - Move device in the PM core's list of active devices.
163  * @deva: Device to move in dpm_list.
164  * @devb: Device @deva should come before.
165  */
166 void device_pm_move_before(struct device *deva, struct device *devb)
167 {
168         pr_debug("PM: Moving %s:%s before %s:%s\n",
169                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
170                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
171         /* Delete deva from dpm_list and reinsert before devb. */
172         list_move_tail(&deva->power.entry, &devb->power.entry);
173 }
174
175 /**
176  * device_pm_move_after - Move device in the PM core's list of active devices.
177  * @deva: Device to move in dpm_list.
178  * @devb: Device @deva should come after.
179  */
180 void device_pm_move_after(struct device *deva, struct device *devb)
181 {
182         pr_debug("PM: Moving %s:%s after %s:%s\n",
183                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
184                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
185         /* Delete deva from dpm_list and reinsert after devb. */
186         list_move(&deva->power.entry, &devb->power.entry);
187 }
188
189 /**
190  * device_pm_move_last - Move device to end of the PM core's list of devices.
191  * @dev: Device to move in dpm_list.
192  */
193 void device_pm_move_last(struct device *dev)
194 {
195         pr_debug("PM: Moving %s:%s to end of list\n",
196                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
197         list_move_tail(&dev->power.entry, &dpm_list);
198 }
199
200 static ktime_t initcall_debug_start(struct device *dev)
201 {
202         ktime_t calltime = ktime_set(0, 0);
203
204         if (pm_print_times_enabled) {
205                 pr_info("calling  %s+ @ %i, parent: %s\n",
206                         dev_name(dev), task_pid_nr(current),
207                         dev->parent ? dev_name(dev->parent) : "none");
208                 calltime = ktime_get();
209         }
210
211         return calltime;
212 }
213
214 static void initcall_debug_report(struct device *dev, ktime_t calltime,
215                                   int error, pm_message_t state, char *info)
216 {
217         ktime_t rettime;
218         s64 nsecs;
219
220         rettime = ktime_get();
221         nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
222
223         if (pm_print_times_enabled) {
224                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
225                         error, (unsigned long long)nsecs >> 10);
226         }
227 }
228
229 /**
230  * dpm_wait - Wait for a PM operation to complete.
231  * @dev: Device to wait for.
232  * @async: If unset, wait only if the device's power.async_suspend flag is set.
233  */
234 static void dpm_wait(struct device *dev, bool async)
235 {
236         if (!dev)
237                 return;
238
239         if (async || (pm_async_enabled && dev->power.async_suspend))
240                 wait_for_completion(&dev->power.completion);
241 }
242
243 static int dpm_wait_fn(struct device *dev, void *async_ptr)
244 {
245         dpm_wait(dev, *((bool *)async_ptr));
246         return 0;
247 }
248
249 static void dpm_wait_for_children(struct device *dev, bool async)
250 {
251        device_for_each_child(dev, &async, dpm_wait_fn);
252 }
253
254 /**
255  * pm_op - Return the PM operation appropriate for given PM event.
256  * @ops: PM operations to choose from.
257  * @state: PM transition of the system being carried out.
258  */
259 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
260 {
261         switch (state.event) {
262 #ifdef CONFIG_SUSPEND
263         case PM_EVENT_SUSPEND:
264                 return ops->suspend;
265         case PM_EVENT_RESUME:
266                 return ops->resume;
267 #endif /* CONFIG_SUSPEND */
268 #ifdef CONFIG_HIBERNATE_CALLBACKS
269         case PM_EVENT_FREEZE:
270         case PM_EVENT_QUIESCE:
271                 return ops->freeze;
272         case PM_EVENT_HIBERNATE:
273                 return ops->poweroff;
274         case PM_EVENT_THAW:
275         case PM_EVENT_RECOVER:
276                 return ops->thaw;
277                 break;
278         case PM_EVENT_RESTORE:
279                 return ops->restore;
280 #endif /* CONFIG_HIBERNATE_CALLBACKS */
281         }
282
283         return NULL;
284 }
285
286 /**
287  * pm_late_early_op - Return the PM operation appropriate for given PM event.
288  * @ops: PM operations to choose from.
289  * @state: PM transition of the system being carried out.
290  *
291  * Runtime PM is disabled for @dev while this function is being executed.
292  */
293 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
294                                       pm_message_t state)
295 {
296         switch (state.event) {
297 #ifdef CONFIG_SUSPEND
298         case PM_EVENT_SUSPEND:
299                 return ops->suspend_late;
300         case PM_EVENT_RESUME:
301                 return ops->resume_early;
302 #endif /* CONFIG_SUSPEND */
303 #ifdef CONFIG_HIBERNATE_CALLBACKS
304         case PM_EVENT_FREEZE:
305         case PM_EVENT_QUIESCE:
306                 return ops->freeze_late;
307         case PM_EVENT_HIBERNATE:
308                 return ops->poweroff_late;
309         case PM_EVENT_THAW:
310         case PM_EVENT_RECOVER:
311                 return ops->thaw_early;
312         case PM_EVENT_RESTORE:
313                 return ops->restore_early;
314 #endif /* CONFIG_HIBERNATE_CALLBACKS */
315         }
316
317         return NULL;
318 }
319
320 /**
321  * pm_noirq_op - Return the PM operation appropriate for given PM event.
322  * @ops: PM operations to choose from.
323  * @state: PM transition of the system being carried out.
324  *
325  * The driver of @dev will not receive interrupts while this function is being
326  * executed.
327  */
328 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
329 {
330         switch (state.event) {
331 #ifdef CONFIG_SUSPEND
332         case PM_EVENT_SUSPEND:
333                 return ops->suspend_noirq;
334         case PM_EVENT_RESUME:
335                 return ops->resume_noirq;
336 #endif /* CONFIG_SUSPEND */
337 #ifdef CONFIG_HIBERNATE_CALLBACKS
338         case PM_EVENT_FREEZE:
339         case PM_EVENT_QUIESCE:
340                 return ops->freeze_noirq;
341         case PM_EVENT_HIBERNATE:
342                 return ops->poweroff_noirq;
343         case PM_EVENT_THAW:
344         case PM_EVENT_RECOVER:
345                 return ops->thaw_noirq;
346         case PM_EVENT_RESTORE:
347                 return ops->restore_noirq;
348 #endif /* CONFIG_HIBERNATE_CALLBACKS */
349         }
350
351         return NULL;
352 }
353
354 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
355 {
356         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
357                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
358                 ", may wakeup" : "");
359 }
360
361 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
362                         int error)
363 {
364         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
365                 dev_name(dev), pm_verb(state.event), info, error);
366 }
367
368 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
369 {
370         ktime_t calltime;
371         u64 usecs64;
372         int usecs;
373
374         calltime = ktime_get();
375         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
376         do_div(usecs64, NSEC_PER_USEC);
377         usecs = usecs64;
378         if (usecs == 0)
379                 usecs = 1;
380         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
381                 info ?: "", info ? " " : "", pm_verb(state.event),
382                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
383 }
384
385 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
386                             pm_message_t state, char *info)
387 {
388         ktime_t calltime;
389         int error;
390
391         if (!cb)
392                 return 0;
393
394         calltime = initcall_debug_start(dev);
395
396         pm_dev_dbg(dev, state, info);
397         trace_device_pm_callback_start(dev, info, state.event);
398         error = cb(dev);
399         trace_device_pm_callback_end(dev, error);
400         suspend_report_result(cb, error);
401
402         initcall_debug_report(dev, calltime, error, state, info);
403
404         return error;
405 }
406
407 #ifdef CONFIG_DPM_WATCHDOG
408 struct dpm_watchdog {
409         struct device           *dev;
410         struct task_struct      *tsk;
411         struct timer_list       timer;
412 };
413
414 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
415         struct dpm_watchdog wd
416
417 /**
418  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
419  * @data: Watchdog object address.
420  *
421  * Called when a driver has timed out suspending or resuming.
422  * There's not much we can do here to recover so panic() to
423  * capture a crash-dump in pstore.
424  */
425 static void dpm_watchdog_handler(unsigned long data)
426 {
427         struct dpm_watchdog *wd = (void *)data;
428
429         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
430         show_stack(wd->tsk, NULL);
431         panic("%s %s: unrecoverable failure\n",
432                 dev_driver_string(wd->dev), dev_name(wd->dev));
433 }
434
435 /**
436  * dpm_watchdog_set - Enable pm watchdog for given device.
437  * @wd: Watchdog. Must be allocated on the stack.
438  * @dev: Device to handle.
439  */
440 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
441 {
442         struct timer_list *timer = &wd->timer;
443
444         wd->dev = dev;
445         wd->tsk = current;
446
447         init_timer_on_stack(timer);
448         /* use same timeout value for both suspend and resume */
449         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
450         timer->function = dpm_watchdog_handler;
451         timer->data = (unsigned long)wd;
452         add_timer(timer);
453 }
454
455 /**
456  * dpm_watchdog_clear - Disable suspend/resume watchdog.
457  * @wd: Watchdog to disable.
458  */
459 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
460 {
461         struct timer_list *timer = &wd->timer;
462
463         del_timer_sync(timer);
464         destroy_timer_on_stack(timer);
465 }
466 #else
467 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
468 #define dpm_watchdog_set(x, y)
469 #define dpm_watchdog_clear(x)
470 #endif
471
472 /*------------------------- Resume routines -------------------------*/
473
474 /**
475  * device_resume_noirq - Execute an "early resume" callback for given device.
476  * @dev: Device to handle.
477  * @state: PM transition of the system being carried out.
478  * @async: If true, the device is being resumed asynchronously.
479  *
480  * The driver of @dev will not receive interrupts while this function is being
481  * executed.
482  */
483 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
484 {
485         pm_callback_t callback = NULL;
486         char *info = NULL;
487         int error = 0;
488
489         TRACE_DEVICE(dev);
490         TRACE_RESUME(0);
491
492         if (dev->power.syscore || dev->power.direct_complete)
493                 goto Out;
494
495         if (!dev->power.is_noirq_suspended)
496                 goto Out;
497
498         dpm_wait(dev->parent, async);
499
500         if (dev->pm_domain) {
501                 info = "noirq power domain ";
502                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
503         } else if (dev->type && dev->type->pm) {
504                 info = "noirq type ";
505                 callback = pm_noirq_op(dev->type->pm, state);
506         } else if (dev->class && dev->class->pm) {
507                 info = "noirq class ";
508                 callback = pm_noirq_op(dev->class->pm, state);
509         } else if (dev->bus && dev->bus->pm) {
510                 info = "noirq bus ";
511                 callback = pm_noirq_op(dev->bus->pm, state);
512         }
513
514         if (!callback && dev->driver && dev->driver->pm) {
515                 info = "noirq driver ";
516                 callback = pm_noirq_op(dev->driver->pm, state);
517         }
518
519         error = dpm_run_callback(callback, dev, state, info);
520         dev->power.is_noirq_suspended = false;
521
522  Out:
523         complete_all(&dev->power.completion);
524         TRACE_RESUME(error);
525         return error;
526 }
527
528 static bool is_async(struct device *dev)
529 {
530         return dev->power.async_suspend && pm_async_enabled
531                 && !pm_trace_is_enabled();
532 }
533
534 static void async_resume_noirq(void *data, async_cookie_t cookie)
535 {
536         struct device *dev = (struct device *)data;
537         int error;
538
539         error = device_resume_noirq(dev, pm_transition, true);
540         if (error)
541                 pm_dev_err(dev, pm_transition, " async", error);
542
543         put_device(dev);
544 }
545
546 /**
547  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
548  * @state: PM transition of the system being carried out.
549  *
550  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
551  * enable device drivers to receive interrupts.
552  */
553 void dpm_resume_noirq(pm_message_t state)
554 {
555         struct device *dev;
556         ktime_t starttime = ktime_get();
557
558         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
559         mutex_lock(&dpm_list_mtx);
560         pm_transition = state;
561
562         /*
563          * Advanced the async threads upfront,
564          * in case the starting of async threads is
565          * delayed by non-async resuming devices.
566          */
567         list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
568                 reinit_completion(&dev->power.completion);
569                 if (is_async(dev)) {
570                         get_device(dev);
571                         async_schedule(async_resume_noirq, dev);
572                 }
573         }
574
575         while (!list_empty(&dpm_noirq_list)) {
576                 dev = to_device(dpm_noirq_list.next);
577                 get_device(dev);
578                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
579                 mutex_unlock(&dpm_list_mtx);
580
581                 if (!is_async(dev)) {
582                         int error;
583
584                         error = device_resume_noirq(dev, state, false);
585                         if (error) {
586                                 suspend_stats.failed_resume_noirq++;
587                                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
588                                 dpm_save_failed_dev(dev_name(dev));
589                                 pm_dev_err(dev, state, " noirq", error);
590                         }
591                 }
592
593                 mutex_lock(&dpm_list_mtx);
594                 put_device(dev);
595         }
596         mutex_unlock(&dpm_list_mtx);
597         async_synchronize_full();
598         dpm_show_time(starttime, state, "noirq");
599         resume_device_irqs();
600         device_wakeup_disarm_wake_irqs();
601         cpuidle_resume();
602         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
603 }
604
605 /**
606  * device_resume_early - Execute an "early resume" callback for given device.
607  * @dev: Device to handle.
608  * @state: PM transition of the system being carried out.
609  * @async: If true, the device is being resumed asynchronously.
610  *
611  * Runtime PM is disabled for @dev while this function is being executed.
612  */
613 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
614 {
615         pm_callback_t callback = NULL;
616         char *info = NULL;
617         int error = 0;
618
619         TRACE_DEVICE(dev);
620         TRACE_RESUME(0);
621
622         if (dev->power.syscore || dev->power.direct_complete)
623                 goto Out;
624
625         if (!dev->power.is_late_suspended)
626                 goto Out;
627
628         dpm_wait(dev->parent, async);
629
630         if (dev->pm_domain) {
631                 info = "early power domain ";
632                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
633         } else if (dev->type && dev->type->pm) {
634                 info = "early type ";
635                 callback = pm_late_early_op(dev->type->pm, state);
636         } else if (dev->class && dev->class->pm) {
637                 info = "early class ";
638                 callback = pm_late_early_op(dev->class->pm, state);
639         } else if (dev->bus && dev->bus->pm) {
640                 info = "early bus ";
641                 callback = pm_late_early_op(dev->bus->pm, state);
642         }
643
644         if (!callback && dev->driver && dev->driver->pm) {
645                 info = "early driver ";
646                 callback = pm_late_early_op(dev->driver->pm, state);
647         }
648
649         error = dpm_run_callback(callback, dev, state, info);
650         dev->power.is_late_suspended = false;
651
652  Out:
653         TRACE_RESUME(error);
654
655         pm_runtime_enable(dev);
656         complete_all(&dev->power.completion);
657         return error;
658 }
659
660 static void async_resume_early(void *data, async_cookie_t cookie)
661 {
662         struct device *dev = (struct device *)data;
663         int error;
664
665         error = device_resume_early(dev, pm_transition, true);
666         if (error)
667                 pm_dev_err(dev, pm_transition, " async", error);
668
669         put_device(dev);
670 }
671
672 /**
673  * dpm_resume_early - Execute "early resume" callbacks for all devices.
674  * @state: PM transition of the system being carried out.
675  */
676 void dpm_resume_early(pm_message_t state)
677 {
678         struct device *dev;
679         ktime_t starttime = ktime_get();
680
681         trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
682         mutex_lock(&dpm_list_mtx);
683         pm_transition = state;
684
685         /*
686          * Advanced the async threads upfront,
687          * in case the starting of async threads is
688          * delayed by non-async resuming devices.
689          */
690         list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
691                 reinit_completion(&dev->power.completion);
692                 if (is_async(dev)) {
693                         get_device(dev);
694                         async_schedule(async_resume_early, dev);
695                 }
696         }
697
698         while (!list_empty(&dpm_late_early_list)) {
699                 dev = to_device(dpm_late_early_list.next);
700                 get_device(dev);
701                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
702                 mutex_unlock(&dpm_list_mtx);
703
704                 if (!is_async(dev)) {
705                         int error;
706
707                         error = device_resume_early(dev, state, false);
708                         if (error) {
709                                 suspend_stats.failed_resume_early++;
710                                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
711                                 dpm_save_failed_dev(dev_name(dev));
712                                 pm_dev_err(dev, state, " early", error);
713                         }
714                 }
715                 mutex_lock(&dpm_list_mtx);
716                 put_device(dev);
717         }
718         mutex_unlock(&dpm_list_mtx);
719         async_synchronize_full();
720         dpm_show_time(starttime, state, "early");
721         trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
722 }
723
724 /**
725  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
726  * @state: PM transition of the system being carried out.
727  */
728 void dpm_resume_start(pm_message_t state)
729 {
730         dpm_resume_noirq(state);
731         dpm_resume_early(state);
732 }
733 EXPORT_SYMBOL_GPL(dpm_resume_start);
734
735 /**
736  * device_resume - Execute "resume" callbacks for given device.
737  * @dev: Device to handle.
738  * @state: PM transition of the system being carried out.
739  * @async: If true, the device is being resumed asynchronously.
740  */
741 static int device_resume(struct device *dev, pm_message_t state, bool async)
742 {
743         pm_callback_t callback = NULL;
744         char *info = NULL;
745         int error = 0;
746         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
747
748         TRACE_DEVICE(dev);
749         TRACE_RESUME(0);
750
751         if (dev->power.syscore)
752                 goto Complete;
753
754         if (dev->power.direct_complete) {
755                 /* Match the pm_runtime_disable() in __device_suspend(). */
756                 pm_runtime_enable(dev);
757                 goto Complete;
758         }
759
760         dpm_wait(dev->parent, async);
761         dpm_watchdog_set(&wd, dev);
762         device_lock(dev);
763
764         /*
765          * This is a fib.  But we'll allow new children to be added below
766          * a resumed device, even if the device hasn't been completed yet.
767          */
768         dev->power.is_prepared = false;
769
770         if (!dev->power.is_suspended)
771                 goto Unlock;
772
773         if (dev->pm_domain) {
774                 info = "power domain ";
775                 callback = pm_op(&dev->pm_domain->ops, state);
776                 goto Driver;
777         }
778
779         if (dev->type && dev->type->pm) {
780                 info = "type ";
781                 callback = pm_op(dev->type->pm, state);
782                 goto Driver;
783         }
784
785         if (dev->class) {
786                 if (dev->class->pm) {
787                         info = "class ";
788                         callback = pm_op(dev->class->pm, state);
789                         goto Driver;
790                 } else if (dev->class->resume) {
791                         info = "legacy class ";
792                         callback = dev->class->resume;
793                         goto End;
794                 }
795         }
796
797         if (dev->bus) {
798                 if (dev->bus->pm) {
799                         info = "bus ";
800                         callback = pm_op(dev->bus->pm, state);
801                 } else if (dev->bus->resume) {
802                         info = "legacy bus ";
803                         callback = dev->bus->resume;
804                         goto End;
805                 }
806         }
807
808  Driver:
809         if (!callback && dev->driver && dev->driver->pm) {
810                 info = "driver ";
811                 callback = pm_op(dev->driver->pm, state);
812         }
813
814  End:
815         error = dpm_run_callback(callback, dev, state, info);
816         dev->power.is_suspended = false;
817
818  Unlock:
819         device_unlock(dev);
820         dpm_watchdog_clear(&wd);
821
822  Complete:
823         complete_all(&dev->power.completion);
824
825         TRACE_RESUME(error);
826
827         return error;
828 }
829
830 static void async_resume(void *data, async_cookie_t cookie)
831 {
832         struct device *dev = (struct device *)data;
833         int error;
834
835         error = device_resume(dev, pm_transition, true);
836         if (error)
837                 pm_dev_err(dev, pm_transition, " async", error);
838         put_device(dev);
839 }
840
841 /**
842  *      dpm_drv_timeout - Driver suspend / resume watchdog handler
843  *      @data: struct device which timed out
844  *
845  *      Called when a driver has timed out suspending or resuming.
846  *      There's not much we can do here to recover so
847  *      BUG() out for a crash-dump
848  *
849  */
850 static void dpm_drv_timeout(unsigned long data)
851 {
852         struct dpm_drv_wd_data *wd_data = (void *)data;
853         struct device *dev = wd_data->dev;
854         struct task_struct *tsk = wd_data->tsk;
855
856         printk(KERN_EMERG "**** DPM device timeout: %s (%s)\n", dev_name(dev),
857                (dev->driver ? dev->driver->name : "no driver"));
858
859         printk(KERN_EMERG "dpm suspend stack:\n");
860         show_stack(tsk, NULL);
861
862         BUG();
863 }
864
865 /**
866  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
867  * @state: PM transition of the system being carried out.
868  *
869  * Execute the appropriate "resume" callback for all devices whose status
870  * indicates that they are suspended.
871  */
872 void dpm_resume(pm_message_t state)
873 {
874         struct device *dev;
875         ktime_t starttime = ktime_get();
876
877         trace_suspend_resume(TPS("dpm_resume"), state.event, true);
878         might_sleep();
879
880         mutex_lock(&dpm_list_mtx);
881         pm_transition = state;
882         async_error = 0;
883
884         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
885                 reinit_completion(&dev->power.completion);
886                 if (is_async(dev)) {
887                         get_device(dev);
888                         async_schedule(async_resume, dev);
889                 }
890         }
891
892         while (!list_empty(&dpm_suspended_list)) {
893                 dev = to_device(dpm_suspended_list.next);
894                 get_device(dev);
895                 if (!is_async(dev)) {
896                         int error;
897
898                         mutex_unlock(&dpm_list_mtx);
899
900                         error = device_resume(dev, state, false);
901                         if (error) {
902                                 suspend_stats.failed_resume++;
903                                 dpm_save_failed_step(SUSPEND_RESUME);
904                                 dpm_save_failed_dev(dev_name(dev));
905                                 pm_dev_err(dev, state, "", error);
906                         }
907
908                         mutex_lock(&dpm_list_mtx);
909                 }
910                 if (!list_empty(&dev->power.entry))
911                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
912                 put_device(dev);
913         }
914         mutex_unlock(&dpm_list_mtx);
915         async_synchronize_full();
916         dpm_show_time(starttime, state, NULL);
917
918         cpufreq_resume();
919         trace_suspend_resume(TPS("dpm_resume"), state.event, false);
920 }
921
922 /**
923  * device_complete - Complete a PM transition for given device.
924  * @dev: Device to handle.
925  * @state: PM transition of the system being carried out.
926  */
927 static void device_complete(struct device *dev, pm_message_t state)
928 {
929         void (*callback)(struct device *) = NULL;
930         char *info = NULL;
931
932         if (dev->power.syscore)
933                 return;
934
935         device_lock(dev);
936
937         if (dev->pm_domain) {
938                 info = "completing power domain ";
939                 callback = dev->pm_domain->ops.complete;
940         } else if (dev->type && dev->type->pm) {
941                 info = "completing type ";
942                 callback = dev->type->pm->complete;
943         } else if (dev->class && dev->class->pm) {
944                 info = "completing class ";
945                 callback = dev->class->pm->complete;
946         } else if (dev->bus && dev->bus->pm) {
947                 info = "completing bus ";
948                 callback = dev->bus->pm->complete;
949         }
950
951         if (!callback && dev->driver && dev->driver->pm) {
952                 info = "completing driver ";
953                 callback = dev->driver->pm->complete;
954         }
955
956         if (callback) {
957                 pm_dev_dbg(dev, state, info);
958                 callback(dev);
959         }
960
961         device_unlock(dev);
962
963         pm_runtime_put(dev);
964 }
965
966 /**
967  * dpm_complete - Complete a PM transition for all non-sysdev devices.
968  * @state: PM transition of the system being carried out.
969  *
970  * Execute the ->complete() callbacks for all devices whose PM status is not
971  * DPM_ON (this allows new devices to be registered).
972  */
973 void dpm_complete(pm_message_t state)
974 {
975         struct list_head list;
976
977         trace_suspend_resume(TPS("dpm_complete"), state.event, true);
978         might_sleep();
979
980         INIT_LIST_HEAD(&list);
981         mutex_lock(&dpm_list_mtx);
982         while (!list_empty(&dpm_prepared_list)) {
983                 struct device *dev = to_device(dpm_prepared_list.prev);
984
985                 get_device(dev);
986                 dev->power.is_prepared = false;
987                 list_move(&dev->power.entry, &list);
988                 mutex_unlock(&dpm_list_mtx);
989
990                 trace_device_pm_callback_start(dev, "", state.event);
991                 device_complete(dev, state);
992                 trace_device_pm_callback_end(dev, 0);
993
994                 mutex_lock(&dpm_list_mtx);
995                 put_device(dev);
996         }
997         list_splice(&list, &dpm_list);
998         mutex_unlock(&dpm_list_mtx);
999         trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1000 }
1001
1002 /**
1003  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1004  * @state: PM transition of the system being carried out.
1005  *
1006  * Execute "resume" callbacks for all devices and complete the PM transition of
1007  * the system.
1008  */
1009 void dpm_resume_end(pm_message_t state)
1010 {
1011         dpm_resume(state);
1012         dpm_complete(state);
1013 }
1014 EXPORT_SYMBOL_GPL(dpm_resume_end);
1015
1016
1017 /*------------------------- Suspend routines -------------------------*/
1018
1019 /**
1020  * resume_event - Return a "resume" message for given "suspend" sleep state.
1021  * @sleep_state: PM message representing a sleep state.
1022  *
1023  * Return a PM message representing the resume event corresponding to given
1024  * sleep state.
1025  */
1026 static pm_message_t resume_event(pm_message_t sleep_state)
1027 {
1028         switch (sleep_state.event) {
1029         case PM_EVENT_SUSPEND:
1030                 return PMSG_RESUME;
1031         case PM_EVENT_FREEZE:
1032         case PM_EVENT_QUIESCE:
1033                 return PMSG_RECOVER;
1034         case PM_EVENT_HIBERNATE:
1035                 return PMSG_RESTORE;
1036         }
1037         return PMSG_ON;
1038 }
1039
1040 /**
1041  * device_suspend_noirq - Execute a "late suspend" callback for given device.
1042  * @dev: Device to handle.
1043  * @state: PM transition of the system being carried out.
1044  * @async: If true, the device is being suspended asynchronously.
1045  *
1046  * The driver of @dev will not receive interrupts while this function is being
1047  * executed.
1048  */
1049 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1050 {
1051         pm_callback_t callback = NULL;
1052         char *info = NULL;
1053         int error = 0;
1054
1055         TRACE_DEVICE(dev);
1056         TRACE_SUSPEND(0);
1057
1058         if (async_error)
1059                 goto Complete;
1060
1061         if (pm_wakeup_pending()) {
1062                 async_error = -EBUSY;
1063                 goto Complete;
1064         }
1065
1066         if (dev->power.syscore || dev->power.direct_complete)
1067                 goto Complete;
1068
1069         dpm_wait_for_children(dev, async);
1070
1071         if (dev->pm_domain) {
1072                 info = "noirq power domain ";
1073                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1074         } else if (dev->type && dev->type->pm) {
1075                 info = "noirq type ";
1076                 callback = pm_noirq_op(dev->type->pm, state);
1077         } else if (dev->class && dev->class->pm) {
1078                 info = "noirq class ";
1079                 callback = pm_noirq_op(dev->class->pm, state);
1080         } else if (dev->bus && dev->bus->pm) {
1081                 info = "noirq bus ";
1082                 callback = pm_noirq_op(dev->bus->pm, state);
1083         }
1084
1085         if (!callback && dev->driver && dev->driver->pm) {
1086                 info = "noirq driver ";
1087                 callback = pm_noirq_op(dev->driver->pm, state);
1088         }
1089
1090         error = dpm_run_callback(callback, dev, state, info);
1091         if (!error)
1092                 dev->power.is_noirq_suspended = true;
1093         else
1094                 async_error = error;
1095
1096 Complete:
1097         complete_all(&dev->power.completion);
1098         TRACE_SUSPEND(error);
1099         return error;
1100 }
1101
1102 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1103 {
1104         struct device *dev = (struct device *)data;
1105         int error;
1106
1107         error = __device_suspend_noirq(dev, pm_transition, true);
1108         if (error) {
1109                 dpm_save_failed_dev(dev_name(dev));
1110                 pm_dev_err(dev, pm_transition, " async", error);
1111         }
1112
1113         put_device(dev);
1114 }
1115
1116 static int device_suspend_noirq(struct device *dev)
1117 {
1118         reinit_completion(&dev->power.completion);
1119
1120         if (is_async(dev)) {
1121                 get_device(dev);
1122                 async_schedule(async_suspend_noirq, dev);
1123                 return 0;
1124         }
1125         return __device_suspend_noirq(dev, pm_transition, false);
1126 }
1127
1128 /**
1129  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1130  * @state: PM transition of the system being carried out.
1131  *
1132  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1133  * handlers for all non-sysdev devices.
1134  */
1135 int dpm_suspend_noirq(pm_message_t state)
1136 {
1137         ktime_t starttime = ktime_get();
1138         int error = 0;
1139
1140         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1141         cpuidle_pause();
1142         device_wakeup_arm_wake_irqs();
1143         suspend_device_irqs();
1144         mutex_lock(&dpm_list_mtx);
1145         pm_transition = state;
1146         async_error = 0;
1147
1148         while (!list_empty(&dpm_late_early_list)) {
1149                 struct device *dev = to_device(dpm_late_early_list.prev);
1150
1151                 get_device(dev);
1152                 mutex_unlock(&dpm_list_mtx);
1153
1154                 error = device_suspend_noirq(dev);
1155
1156                 mutex_lock(&dpm_list_mtx);
1157                 if (error) {
1158                         pm_dev_err(dev, state, " noirq", error);
1159                         dpm_save_failed_dev(dev_name(dev));
1160                         put_device(dev);
1161                         break;
1162                 }
1163                 if (!list_empty(&dev->power.entry))
1164                         list_move(&dev->power.entry, &dpm_noirq_list);
1165                 put_device(dev);
1166
1167                 if (async_error)
1168                         break;
1169         }
1170         mutex_unlock(&dpm_list_mtx);
1171         async_synchronize_full();
1172         if (!error)
1173                 error = async_error;
1174
1175         if (error) {
1176                 suspend_stats.failed_suspend_noirq++;
1177                 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1178                 dpm_resume_noirq(resume_event(state));
1179         } else {
1180                 dpm_show_time(starttime, state, "noirq");
1181         }
1182         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1183         return error;
1184 }
1185
1186 /**
1187  * device_suspend_late - Execute a "late suspend" callback for given device.
1188  * @dev: Device to handle.
1189  * @state: PM transition of the system being carried out.
1190  * @async: If true, the device is being suspended asynchronously.
1191  *
1192  * Runtime PM is disabled for @dev while this function is being executed.
1193  */
1194 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1195 {
1196         pm_callback_t callback = NULL;
1197         char *info = NULL;
1198         int error = 0;
1199
1200         TRACE_DEVICE(dev);
1201         TRACE_SUSPEND(0);
1202
1203         __pm_runtime_disable(dev, false);
1204
1205         if (async_error)
1206                 goto Complete;
1207
1208         if (pm_wakeup_pending()) {
1209                 async_error = -EBUSY;
1210                 goto Complete;
1211         }
1212
1213         if (dev->power.syscore || dev->power.direct_complete)
1214                 goto Complete;
1215
1216         dpm_wait_for_children(dev, async);
1217
1218         if (dev->pm_domain) {
1219                 info = "late power domain ";
1220                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1221         } else if (dev->type && dev->type->pm) {
1222                 info = "late type ";
1223                 callback = pm_late_early_op(dev->type->pm, state);
1224         } else if (dev->class && dev->class->pm) {
1225                 info = "late class ";
1226                 callback = pm_late_early_op(dev->class->pm, state);
1227         } else if (dev->bus && dev->bus->pm) {
1228                 info = "late bus ";
1229                 callback = pm_late_early_op(dev->bus->pm, state);
1230         }
1231
1232         if (!callback && dev->driver && dev->driver->pm) {
1233                 info = "late driver ";
1234                 callback = pm_late_early_op(dev->driver->pm, state);
1235         }
1236
1237         error = dpm_run_callback(callback, dev, state, info);
1238         if (!error)
1239                 dev->power.is_late_suspended = true;
1240         else
1241                 async_error = error;
1242
1243 Complete:
1244         TRACE_SUSPEND(error);
1245         complete_all(&dev->power.completion);
1246         return error;
1247 }
1248
1249 static void async_suspend_late(void *data, async_cookie_t cookie)
1250 {
1251         struct device *dev = (struct device *)data;
1252         int error;
1253
1254         error = __device_suspend_late(dev, pm_transition, true);
1255         if (error) {
1256                 dpm_save_failed_dev(dev_name(dev));
1257                 pm_dev_err(dev, pm_transition, " async", error);
1258         }
1259         put_device(dev);
1260 }
1261
1262 static int device_suspend_late(struct device *dev)
1263 {
1264         reinit_completion(&dev->power.completion);
1265
1266         if (is_async(dev)) {
1267                 get_device(dev);
1268                 async_schedule(async_suspend_late, dev);
1269                 return 0;
1270         }
1271
1272         return __device_suspend_late(dev, pm_transition, false);
1273 }
1274
1275 /**
1276  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1277  * @state: PM transition of the system being carried out.
1278  */
1279 int dpm_suspend_late(pm_message_t state)
1280 {
1281         ktime_t starttime = ktime_get();
1282         int error = 0;
1283
1284         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1285         mutex_lock(&dpm_list_mtx);
1286         pm_transition = state;
1287         async_error = 0;
1288
1289         while (!list_empty(&dpm_suspended_list)) {
1290                 struct device *dev = to_device(dpm_suspended_list.prev);
1291
1292                 get_device(dev);
1293                 mutex_unlock(&dpm_list_mtx);
1294
1295                 error = device_suspend_late(dev);
1296
1297                 mutex_lock(&dpm_list_mtx);
1298                 if (error) {
1299                         pm_dev_err(dev, state, " late", error);
1300                         dpm_save_failed_dev(dev_name(dev));
1301                         put_device(dev);
1302                         break;
1303                 }
1304                 if (!list_empty(&dev->power.entry))
1305                         list_move(&dev->power.entry, &dpm_late_early_list);
1306                 put_device(dev);
1307
1308                 if (async_error)
1309                         break;
1310         }
1311         mutex_unlock(&dpm_list_mtx);
1312         async_synchronize_full();
1313         if (!error)
1314                 error = async_error;
1315         if (error) {
1316                 suspend_stats.failed_suspend_late++;
1317                 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1318                 dpm_resume_early(resume_event(state));
1319         } else {
1320                 dpm_show_time(starttime, state, "late");
1321         }
1322         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1323         return error;
1324 }
1325
1326 /**
1327  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1328  * @state: PM transition of the system being carried out.
1329  */
1330 int dpm_suspend_end(pm_message_t state)
1331 {
1332         int error = dpm_suspend_late(state);
1333         if (error)
1334                 return error;
1335
1336         error = dpm_suspend_noirq(state);
1337         if (error) {
1338                 dpm_resume_early(resume_event(state));
1339                 return error;
1340         }
1341
1342         return 0;
1343 }
1344 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1345
1346 /**
1347  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1348  * @dev: Device to suspend.
1349  * @state: PM transition of the system being carried out.
1350  * @cb: Suspend callback to execute.
1351  * @info: string description of caller.
1352  */
1353 static int legacy_suspend(struct device *dev, pm_message_t state,
1354                           int (*cb)(struct device *dev, pm_message_t state),
1355                           char *info)
1356 {
1357         int error;
1358         ktime_t calltime;
1359
1360         calltime = initcall_debug_start(dev);
1361
1362         trace_device_pm_callback_start(dev, info, state.event);
1363         error = cb(dev, state);
1364         trace_device_pm_callback_end(dev, error);
1365         suspend_report_result(cb, error);
1366
1367         initcall_debug_report(dev, calltime, error, state, info);
1368
1369         return error;
1370 }
1371
1372 /**
1373  * device_suspend - Execute "suspend" callbacks for given device.
1374  * @dev: Device to handle.
1375  * @state: PM transition of the system being carried out.
1376  * @async: If true, the device is being suspended asynchronously.
1377  */
1378 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1379 {
1380         pm_callback_t callback = NULL;
1381         char *info = NULL;
1382         int error = 0;
1383         struct timer_list timer;
1384         struct dpm_drv_wd_data data;
1385         char suspend_abort[MAX_SUSPEND_ABORT_LEN];
1386         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1387
1388         TRACE_DEVICE(dev);
1389         TRACE_SUSPEND(0);
1390
1391         dpm_wait_for_children(dev, async);
1392
1393         if (async_error)
1394                 goto Complete;
1395
1396         /*
1397          * If a device configured to wake up the system from sleep states
1398          * has been suspended at run time and there's a resume request pending
1399          * for it, this is equivalent to the device signaling wakeup, so the
1400          * system suspend operation should be aborted.
1401          */
1402         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1403                 pm_wakeup_event(dev, 0);
1404
1405         if (pm_wakeup_pending()) {
1406                 pm_get_active_wakeup_sources(suspend_abort,
1407                         MAX_SUSPEND_ABORT_LEN);
1408                 log_suspend_abort_reason(suspend_abort);
1409                 async_error = -EBUSY;
1410                 goto Complete;
1411         }
1412
1413         if (dev->power.syscore)
1414                 goto Complete;
1415         
1416         data.dev = dev;
1417         data.tsk = current;
1418         init_timer_on_stack(&timer);
1419         timer.expires = jiffies + HZ * 12;
1420         timer.function = dpm_drv_timeout;
1421         timer.data = (unsigned long)&data;
1422         add_timer(&timer);
1423
1424         if (dev->power.direct_complete) {
1425                 if (pm_runtime_status_suspended(dev)) {
1426                         pm_runtime_disable(dev);
1427                         if (pm_runtime_status_suspended(dev))
1428                                 goto Complete;
1429
1430                         pm_runtime_enable(dev);
1431                 }
1432                 dev->power.direct_complete = false;
1433         }
1434
1435         dpm_watchdog_set(&wd, dev);
1436         device_lock(dev);
1437
1438         if (dev->pm_domain) {
1439                 info = "power domain ";
1440                 callback = pm_op(&dev->pm_domain->ops, state);
1441                 goto Run;
1442         }
1443
1444         if (dev->type && dev->type->pm) {
1445                 info = "type ";
1446                 callback = pm_op(dev->type->pm, state);
1447                 goto Run;
1448         }
1449
1450         if (dev->class) {
1451                 if (dev->class->pm) {
1452                         info = "class ";
1453                         callback = pm_op(dev->class->pm, state);
1454                         goto Run;
1455                 } else if (dev->class->suspend) {
1456                         pm_dev_dbg(dev, state, "legacy class ");
1457                         error = legacy_suspend(dev, state, dev->class->suspend,
1458                                                 "legacy class ");
1459                         goto End;
1460                 }
1461         }
1462
1463         if (dev->bus) {
1464                 if (dev->bus->pm) {
1465                         info = "bus ";
1466                         callback = pm_op(dev->bus->pm, state);
1467                 } else if (dev->bus->suspend) {
1468                         pm_dev_dbg(dev, state, "legacy bus ");
1469                         error = legacy_suspend(dev, state, dev->bus->suspend,
1470                                                 "legacy bus ");
1471                         goto End;
1472                 }
1473         }
1474
1475  Run:
1476         if (!callback && dev->driver && dev->driver->pm) {
1477                 info = "driver ";
1478                 callback = pm_op(dev->driver->pm, state);
1479         }
1480
1481         error = dpm_run_callback(callback, dev, state, info);
1482
1483  End:
1484         if (!error) {
1485                 struct device *parent = dev->parent;
1486
1487                 dev->power.is_suspended = true;
1488                 if (parent) {
1489                         spin_lock_irq(&parent->power.lock);
1490
1491                         dev->parent->power.direct_complete = false;
1492                         if (dev->power.wakeup_path
1493                             && !dev->parent->power.ignore_children)
1494                                 dev->parent->power.wakeup_path = true;
1495
1496                         spin_unlock_irq(&parent->power.lock);
1497                 }
1498         }
1499
1500         device_unlock(dev);
1501         dpm_watchdog_clear(&wd);
1502
1503         del_timer_sync(&timer);
1504         destroy_timer_on_stack(&timer);
1505
1506  Complete:
1507         complete_all(&dev->power.completion);
1508         if (error)
1509                 async_error = error;
1510
1511         TRACE_SUSPEND(error);
1512         return error;
1513 }
1514
1515 static void async_suspend(void *data, async_cookie_t cookie)
1516 {
1517         struct device *dev = (struct device *)data;
1518         int error;
1519
1520         error = __device_suspend(dev, pm_transition, true);
1521         if (error) {
1522                 dpm_save_failed_dev(dev_name(dev));
1523                 pm_dev_err(dev, pm_transition, " async", error);
1524         }
1525
1526         put_device(dev);
1527 }
1528
1529 static int device_suspend(struct device *dev)
1530 {
1531         reinit_completion(&dev->power.completion);
1532
1533         if (is_async(dev)) {
1534                 get_device(dev);
1535                 async_schedule(async_suspend, dev);
1536                 return 0;
1537         }
1538
1539         return __device_suspend(dev, pm_transition, false);
1540 }
1541
1542 /**
1543  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1544  * @state: PM transition of the system being carried out.
1545  */
1546 int dpm_suspend(pm_message_t state)
1547 {
1548         ktime_t starttime = ktime_get();
1549         int error = 0;
1550
1551         trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1552         might_sleep();
1553
1554         cpufreq_suspend();
1555
1556         mutex_lock(&dpm_list_mtx);
1557         pm_transition = state;
1558         async_error = 0;
1559         while (!list_empty(&dpm_prepared_list)) {
1560                 struct device *dev = to_device(dpm_prepared_list.prev);
1561
1562                 get_device(dev);
1563                 mutex_unlock(&dpm_list_mtx);
1564
1565                 error = device_suspend(dev);
1566
1567                 mutex_lock(&dpm_list_mtx);
1568                 if (error) {
1569                         pm_dev_err(dev, state, "", error);
1570                         dpm_save_failed_dev(dev_name(dev));
1571                         put_device(dev);
1572                         break;
1573                 }
1574                 if (!list_empty(&dev->power.entry))
1575                         list_move(&dev->power.entry, &dpm_suspended_list);
1576                 put_device(dev);
1577                 if (async_error)
1578                         break;
1579         }
1580         mutex_unlock(&dpm_list_mtx);
1581         async_synchronize_full();
1582         if (!error)
1583                 error = async_error;
1584         if (error) {
1585                 suspend_stats.failed_suspend++;
1586                 dpm_save_failed_step(SUSPEND_SUSPEND);
1587         } else
1588                 dpm_show_time(starttime, state, NULL);
1589         trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1590         return error;
1591 }
1592
1593 /**
1594  * device_prepare - Prepare a device for system power transition.
1595  * @dev: Device to handle.
1596  * @state: PM transition of the system being carried out.
1597  *
1598  * Execute the ->prepare() callback(s) for given device.  No new children of the
1599  * device may be registered after this function has returned.
1600  */
1601 static int device_prepare(struct device *dev, pm_message_t state)
1602 {
1603         int (*callback)(struct device *) = NULL;
1604         char *info = NULL;
1605         int ret = 0;
1606
1607         if (dev->power.syscore)
1608                 return 0;
1609
1610         /*
1611          * If a device's parent goes into runtime suspend at the wrong time,
1612          * it won't be possible to resume the device.  To prevent this we
1613          * block runtime suspend here, during the prepare phase, and allow
1614          * it again during the complete phase.
1615          */
1616         pm_runtime_get_noresume(dev);
1617
1618         device_lock(dev);
1619
1620         dev->power.wakeup_path = device_may_wakeup(dev);
1621
1622         if (dev->pm_domain) {
1623                 info = "preparing power domain ";
1624                 callback = dev->pm_domain->ops.prepare;
1625         } else if (dev->type && dev->type->pm) {
1626                 info = "preparing type ";
1627                 callback = dev->type->pm->prepare;
1628         } else if (dev->class && dev->class->pm) {
1629                 info = "preparing class ";
1630                 callback = dev->class->pm->prepare;
1631         } else if (dev->bus && dev->bus->pm) {
1632                 info = "preparing bus ";
1633                 callback = dev->bus->pm->prepare;
1634         }
1635
1636         if (!callback && dev->driver && dev->driver->pm) {
1637                 info = "preparing driver ";
1638                 callback = dev->driver->pm->prepare;
1639         }
1640
1641         if (callback)
1642                 ret = callback(dev);
1643
1644         device_unlock(dev);
1645
1646         if (ret < 0) {
1647                 suspend_report_result(callback, ret);
1648                 pm_runtime_put(dev);
1649                 return ret;
1650         }
1651         /*
1652          * A positive return value from ->prepare() means "this device appears
1653          * to be runtime-suspended and its state is fine, so if it really is
1654          * runtime-suspended, you can leave it in that state provided that you
1655          * will do the same thing with all of its descendants".  This only
1656          * applies to suspend transitions, however.
1657          */
1658         spin_lock_irq(&dev->power.lock);
1659         dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1660         spin_unlock_irq(&dev->power.lock);
1661         return 0;
1662 }
1663
1664 /**
1665  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1666  * @state: PM transition of the system being carried out.
1667  *
1668  * Execute the ->prepare() callback(s) for all devices.
1669  */
1670 int dpm_prepare(pm_message_t state)
1671 {
1672         int error = 0;
1673
1674         trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1675         might_sleep();
1676
1677         mutex_lock(&dpm_list_mtx);
1678         while (!list_empty(&dpm_list)) {
1679                 struct device *dev = to_device(dpm_list.next);
1680
1681                 get_device(dev);
1682                 mutex_unlock(&dpm_list_mtx);
1683
1684                 trace_device_pm_callback_start(dev, "", state.event);
1685                 error = device_prepare(dev, state);
1686                 trace_device_pm_callback_end(dev, error);
1687
1688                 mutex_lock(&dpm_list_mtx);
1689                 if (error) {
1690                         if (error == -EAGAIN) {
1691                                 put_device(dev);
1692                                 error = 0;
1693                                 continue;
1694                         }
1695                         printk(KERN_INFO "PM: Device %s not prepared "
1696                                 "for power transition: code %d\n",
1697                                 dev_name(dev), error);
1698                         put_device(dev);
1699                         break;
1700                 }
1701                 dev->power.is_prepared = true;
1702                 if (!list_empty(&dev->power.entry))
1703                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1704                 put_device(dev);
1705         }
1706         mutex_unlock(&dpm_list_mtx);
1707         trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1708         return error;
1709 }
1710
1711 /**
1712  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1713  * @state: PM transition of the system being carried out.
1714  *
1715  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1716  * callbacks for them.
1717  */
1718 int dpm_suspend_start(pm_message_t state)
1719 {
1720         int error;
1721
1722         error = dpm_prepare(state);
1723         if (error) {
1724                 suspend_stats.failed_prepare++;
1725                 dpm_save_failed_step(SUSPEND_PREPARE);
1726         } else
1727                 error = dpm_suspend(state);
1728         return error;
1729 }
1730 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1731
1732 void __suspend_report_result(const char *function, void *fn, int ret)
1733 {
1734         if (ret)
1735                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1736 }
1737 EXPORT_SYMBOL_GPL(__suspend_report_result);
1738
1739 /**
1740  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1741  * @dev: Device to wait for.
1742  * @subordinate: Device that needs to wait for @dev.
1743  */
1744 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1745 {
1746         dpm_wait(dev, subordinate->power.async_suspend);
1747         return async_error;
1748 }
1749 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1750
1751 /**
1752  * dpm_for_each_dev - device iterator.
1753  * @data: data for the callback.
1754  * @fn: function to be called for each device.
1755  *
1756  * Iterate over devices in dpm_list, and call @fn for each device,
1757  * passing it @data.
1758  */
1759 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1760 {
1761         struct device *dev;
1762
1763         if (!fn)
1764                 return;
1765
1766         device_pm_lock();
1767         list_for_each_entry(dev, &dpm_list, power.entry)
1768                 fn(dev, data);
1769         device_pm_unlock();
1770 }
1771 EXPORT_SYMBOL_GPL(dpm_for_each_dev);