Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android
[firefly-linux-kernel-4.4.55.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/timer.h>
36 #include <linux/wakeup_reason.h>
37
38 #include "../base.h"
39 #include "power.h"
40
41 typedef int (*pm_callback_t)(struct device *);
42
43 /*
44  * The entries in the dpm_list list are in a depth first order, simply
45  * because children are guaranteed to be discovered after parents, and
46  * are inserted at the back of the list on discovery.
47  *
48  * Since device_pm_add() may be called with a device lock held,
49  * we must never try to acquire a device lock while holding
50  * dpm_list_mutex.
51  */
52
53 LIST_HEAD(dpm_list);
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
58
59 struct suspend_stats suspend_stats;
60 static DEFINE_MUTEX(dpm_list_mtx);
61 static pm_message_t pm_transition;
62
63 static void dpm_drv_timeout(unsigned long data);
64 struct dpm_drv_wd_data {
65         struct device *dev;
66         struct task_struct *tsk;
67 };
68
69 static int async_error;
70
71 static char *pm_verb(int event)
72 {
73         switch (event) {
74         case PM_EVENT_SUSPEND:
75                 return "suspend";
76         case PM_EVENT_RESUME:
77                 return "resume";
78         case PM_EVENT_FREEZE:
79                 return "freeze";
80         case PM_EVENT_QUIESCE:
81                 return "quiesce";
82         case PM_EVENT_HIBERNATE:
83                 return "hibernate";
84         case PM_EVENT_THAW:
85                 return "thaw";
86         case PM_EVENT_RESTORE:
87                 return "restore";
88         case PM_EVENT_RECOVER:
89                 return "recover";
90         default:
91                 return "(unknown PM event)";
92         }
93 }
94
95 /**
96  * device_pm_sleep_init - Initialize system suspend-related device fields.
97  * @dev: Device object being initialized.
98  */
99 void device_pm_sleep_init(struct device *dev)
100 {
101         dev->power.is_prepared = false;
102         dev->power.is_suspended = false;
103         dev->power.is_noirq_suspended = false;
104         dev->power.is_late_suspended = false;
105         init_completion(&dev->power.completion);
106         complete_all(&dev->power.completion);
107         dev->power.wakeup = NULL;
108         INIT_LIST_HEAD(&dev->power.entry);
109 }
110
111 /**
112  * device_pm_lock - Lock the list of active devices used by the PM core.
113  */
114 void device_pm_lock(void)
115 {
116         mutex_lock(&dpm_list_mtx);
117 }
118
119 /**
120  * device_pm_unlock - Unlock the list of active devices used by the PM core.
121  */
122 void device_pm_unlock(void)
123 {
124         mutex_unlock(&dpm_list_mtx);
125 }
126
127 /**
128  * device_pm_add - Add a device to the PM core's list of active devices.
129  * @dev: Device to add to the list.
130  */
131 void device_pm_add(struct device *dev)
132 {
133         pr_debug("PM: Adding info for %s:%s\n",
134                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
135         mutex_lock(&dpm_list_mtx);
136         if (dev->parent && dev->parent->power.is_prepared)
137                 dev_warn(dev, "parent %s should not be sleeping\n",
138                         dev_name(dev->parent));
139         list_add_tail(&dev->power.entry, &dpm_list);
140         mutex_unlock(&dpm_list_mtx);
141 }
142
143 /**
144  * device_pm_remove - Remove a device from the PM core's list of active devices.
145  * @dev: Device to be removed from the list.
146  */
147 void device_pm_remove(struct device *dev)
148 {
149         pr_debug("PM: Removing info for %s:%s\n",
150                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
151         complete_all(&dev->power.completion);
152         mutex_lock(&dpm_list_mtx);
153         list_del_init(&dev->power.entry);
154         mutex_unlock(&dpm_list_mtx);
155         device_wakeup_disable(dev);
156         pm_runtime_remove(dev);
157 }
158
159 /**
160  * device_pm_move_before - Move device in the PM core's list of active devices.
161  * @deva: Device to move in dpm_list.
162  * @devb: Device @deva should come before.
163  */
164 void device_pm_move_before(struct device *deva, struct device *devb)
165 {
166         pr_debug("PM: Moving %s:%s before %s:%s\n",
167                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
168                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
169         /* Delete deva from dpm_list and reinsert before devb. */
170         list_move_tail(&deva->power.entry, &devb->power.entry);
171 }
172
173 /**
174  * device_pm_move_after - Move device in the PM core's list of active devices.
175  * @deva: Device to move in dpm_list.
176  * @devb: Device @deva should come after.
177  */
178 void device_pm_move_after(struct device *deva, struct device *devb)
179 {
180         pr_debug("PM: Moving %s:%s after %s:%s\n",
181                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
182                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
183         /* Delete deva from dpm_list and reinsert after devb. */
184         list_move(&deva->power.entry, &devb->power.entry);
185 }
186
187 /**
188  * device_pm_move_last - Move device to end of the PM core's list of devices.
189  * @dev: Device to move in dpm_list.
190  */
191 void device_pm_move_last(struct device *dev)
192 {
193         pr_debug("PM: Moving %s:%s to end of list\n",
194                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
195         list_move_tail(&dev->power.entry, &dpm_list);
196 }
197
198 static ktime_t initcall_debug_start(struct device *dev)
199 {
200         ktime_t calltime = ktime_set(0, 0);
201
202         if (pm_print_times_enabled) {
203                 pr_info("calling  %s+ @ %i, parent: %s\n",
204                         dev_name(dev), task_pid_nr(current),
205                         dev->parent ? dev_name(dev->parent) : "none");
206                 calltime = ktime_get();
207         }
208
209         return calltime;
210 }
211
212 static void initcall_debug_report(struct device *dev, ktime_t calltime,
213                                   int error, pm_message_t state, char *info)
214 {
215         ktime_t rettime;
216         s64 nsecs;
217
218         rettime = ktime_get();
219         nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
220
221         if (pm_print_times_enabled) {
222                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
223                         error, (unsigned long long)nsecs >> 10);
224         }
225 }
226
227 /**
228  * dpm_wait - Wait for a PM operation to complete.
229  * @dev: Device to wait for.
230  * @async: If unset, wait only if the device's power.async_suspend flag is set.
231  */
232 static void dpm_wait(struct device *dev, bool async)
233 {
234         if (!dev)
235                 return;
236
237         if (async || (pm_async_enabled && dev->power.async_suspend))
238                 wait_for_completion(&dev->power.completion);
239 }
240
241 static int dpm_wait_fn(struct device *dev, void *async_ptr)
242 {
243         dpm_wait(dev, *((bool *)async_ptr));
244         return 0;
245 }
246
247 static void dpm_wait_for_children(struct device *dev, bool async)
248 {
249        device_for_each_child(dev, &async, dpm_wait_fn);
250 }
251
252 /**
253  * pm_op - Return the PM operation appropriate for given PM event.
254  * @ops: PM operations to choose from.
255  * @state: PM transition of the system being carried out.
256  */
257 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
258 {
259         switch (state.event) {
260 #ifdef CONFIG_SUSPEND
261         case PM_EVENT_SUSPEND:
262                 return ops->suspend;
263         case PM_EVENT_RESUME:
264                 return ops->resume;
265 #endif /* CONFIG_SUSPEND */
266 #ifdef CONFIG_HIBERNATE_CALLBACKS
267         case PM_EVENT_FREEZE:
268         case PM_EVENT_QUIESCE:
269                 return ops->freeze;
270         case PM_EVENT_HIBERNATE:
271                 return ops->poweroff;
272         case PM_EVENT_THAW:
273         case PM_EVENT_RECOVER:
274                 return ops->thaw;
275                 break;
276         case PM_EVENT_RESTORE:
277                 return ops->restore;
278 #endif /* CONFIG_HIBERNATE_CALLBACKS */
279         }
280
281         return NULL;
282 }
283
284 /**
285  * pm_late_early_op - Return the PM operation appropriate for given PM event.
286  * @ops: PM operations to choose from.
287  * @state: PM transition of the system being carried out.
288  *
289  * Runtime PM is disabled for @dev while this function is being executed.
290  */
291 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
292                                       pm_message_t state)
293 {
294         switch (state.event) {
295 #ifdef CONFIG_SUSPEND
296         case PM_EVENT_SUSPEND:
297                 return ops->suspend_late;
298         case PM_EVENT_RESUME:
299                 return ops->resume_early;
300 #endif /* CONFIG_SUSPEND */
301 #ifdef CONFIG_HIBERNATE_CALLBACKS
302         case PM_EVENT_FREEZE:
303         case PM_EVENT_QUIESCE:
304                 return ops->freeze_late;
305         case PM_EVENT_HIBERNATE:
306                 return ops->poweroff_late;
307         case PM_EVENT_THAW:
308         case PM_EVENT_RECOVER:
309                 return ops->thaw_early;
310         case PM_EVENT_RESTORE:
311                 return ops->restore_early;
312 #endif /* CONFIG_HIBERNATE_CALLBACKS */
313         }
314
315         return NULL;
316 }
317
318 /**
319  * pm_noirq_op - Return the PM operation appropriate for given PM event.
320  * @ops: PM operations to choose from.
321  * @state: PM transition of the system being carried out.
322  *
323  * The driver of @dev will not receive interrupts while this function is being
324  * executed.
325  */
326 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
327 {
328         switch (state.event) {
329 #ifdef CONFIG_SUSPEND
330         case PM_EVENT_SUSPEND:
331                 return ops->suspend_noirq;
332         case PM_EVENT_RESUME:
333                 return ops->resume_noirq;
334 #endif /* CONFIG_SUSPEND */
335 #ifdef CONFIG_HIBERNATE_CALLBACKS
336         case PM_EVENT_FREEZE:
337         case PM_EVENT_QUIESCE:
338                 return ops->freeze_noirq;
339         case PM_EVENT_HIBERNATE:
340                 return ops->poweroff_noirq;
341         case PM_EVENT_THAW:
342         case PM_EVENT_RECOVER:
343                 return ops->thaw_noirq;
344         case PM_EVENT_RESTORE:
345                 return ops->restore_noirq;
346 #endif /* CONFIG_HIBERNATE_CALLBACKS */
347         }
348
349         return NULL;
350 }
351
352 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
353 {
354         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
355                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
356                 ", may wakeup" : "");
357 }
358
359 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
360                         int error)
361 {
362         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
363                 dev_name(dev), pm_verb(state.event), info, error);
364 }
365
366 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
367 {
368         ktime_t calltime;
369         u64 usecs64;
370         int usecs;
371
372         calltime = ktime_get();
373         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
374         do_div(usecs64, NSEC_PER_USEC);
375         usecs = usecs64;
376         if (usecs == 0)
377                 usecs = 1;
378         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
379                 info ?: "", info ? " " : "", pm_verb(state.event),
380                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
381 }
382
383 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
384                             pm_message_t state, char *info)
385 {
386         ktime_t calltime;
387         int error;
388
389         if (!cb)
390                 return 0;
391
392         calltime = initcall_debug_start(dev);
393
394         pm_dev_dbg(dev, state, info);
395         trace_device_pm_callback_start(dev, info, state.event);
396         error = cb(dev);
397         trace_device_pm_callback_end(dev, error);
398         suspend_report_result(cb, error);
399
400         initcall_debug_report(dev, calltime, error, state, info);
401
402         return error;
403 }
404
405 #ifdef CONFIG_DPM_WATCHDOG
406 struct dpm_watchdog {
407         struct device           *dev;
408         struct task_struct      *tsk;
409         struct timer_list       timer;
410 };
411
412 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
413         struct dpm_watchdog wd
414
415 /**
416  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
417  * @data: Watchdog object address.
418  *
419  * Called when a driver has timed out suspending or resuming.
420  * There's not much we can do here to recover so panic() to
421  * capture a crash-dump in pstore.
422  */
423 static void dpm_watchdog_handler(unsigned long data)
424 {
425         struct dpm_watchdog *wd = (void *)data;
426
427         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
428         show_stack(wd->tsk, NULL);
429         panic("%s %s: unrecoverable failure\n",
430                 dev_driver_string(wd->dev), dev_name(wd->dev));
431 }
432
433 /**
434  * dpm_watchdog_set - Enable pm watchdog for given device.
435  * @wd: Watchdog. Must be allocated on the stack.
436  * @dev: Device to handle.
437  */
438 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
439 {
440         struct timer_list *timer = &wd->timer;
441
442         wd->dev = dev;
443         wd->tsk = current;
444
445         init_timer_on_stack(timer);
446         /* use same timeout value for both suspend and resume */
447         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
448         timer->function = dpm_watchdog_handler;
449         timer->data = (unsigned long)wd;
450         add_timer(timer);
451 }
452
453 /**
454  * dpm_watchdog_clear - Disable suspend/resume watchdog.
455  * @wd: Watchdog to disable.
456  */
457 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
458 {
459         struct timer_list *timer = &wd->timer;
460
461         del_timer_sync(timer);
462         destroy_timer_on_stack(timer);
463 }
464 #else
465 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
466 #define dpm_watchdog_set(x, y)
467 #define dpm_watchdog_clear(x)
468 #endif
469
470 /*------------------------- Resume routines -------------------------*/
471
472 /**
473  * device_resume_noirq - Execute an "early resume" callback for given device.
474  * @dev: Device to handle.
475  * @state: PM transition of the system being carried out.
476  * @async: If true, the device is being resumed asynchronously.
477  *
478  * The driver of @dev will not receive interrupts while this function is being
479  * executed.
480  */
481 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
482 {
483         pm_callback_t callback = NULL;
484         char *info = NULL;
485         int error = 0;
486
487         TRACE_DEVICE(dev);
488         TRACE_RESUME(0);
489
490         if (dev->power.syscore || dev->power.direct_complete)
491                 goto Out;
492
493         if (!dev->power.is_noirq_suspended)
494                 goto Out;
495
496         dpm_wait(dev->parent, async);
497
498         if (dev->pm_domain) {
499                 info = "noirq power domain ";
500                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
501         } else if (dev->type && dev->type->pm) {
502                 info = "noirq type ";
503                 callback = pm_noirq_op(dev->type->pm, state);
504         } else if (dev->class && dev->class->pm) {
505                 info = "noirq class ";
506                 callback = pm_noirq_op(dev->class->pm, state);
507         } else if (dev->bus && dev->bus->pm) {
508                 info = "noirq bus ";
509                 callback = pm_noirq_op(dev->bus->pm, state);
510         }
511
512         if (!callback && dev->driver && dev->driver->pm) {
513                 info = "noirq driver ";
514                 callback = pm_noirq_op(dev->driver->pm, state);
515         }
516
517         error = dpm_run_callback(callback, dev, state, info);
518         dev->power.is_noirq_suspended = false;
519
520  Out:
521         complete_all(&dev->power.completion);
522         TRACE_RESUME(error);
523         return error;
524 }
525
526 static bool is_async(struct device *dev)
527 {
528         return dev->power.async_suspend && pm_async_enabled
529                 && !pm_trace_is_enabled();
530 }
531
532 static void async_resume_noirq(void *data, async_cookie_t cookie)
533 {
534         struct device *dev = (struct device *)data;
535         int error;
536
537         error = device_resume_noirq(dev, pm_transition, true);
538         if (error)
539                 pm_dev_err(dev, pm_transition, " async", error);
540
541         put_device(dev);
542 }
543
544 /**
545  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
546  * @state: PM transition of the system being carried out.
547  *
548  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
549  * enable device drivers to receive interrupts.
550  */
551 void dpm_resume_noirq(pm_message_t state)
552 {
553         struct device *dev;
554         ktime_t starttime = ktime_get();
555
556         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
557         mutex_lock(&dpm_list_mtx);
558         pm_transition = state;
559
560         /*
561          * Advanced the async threads upfront,
562          * in case the starting of async threads is
563          * delayed by non-async resuming devices.
564          */
565         list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
566                 reinit_completion(&dev->power.completion);
567                 if (is_async(dev)) {
568                         get_device(dev);
569                         async_schedule(async_resume_noirq, dev);
570                 }
571         }
572
573         while (!list_empty(&dpm_noirq_list)) {
574                 dev = to_device(dpm_noirq_list.next);
575                 get_device(dev);
576                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
577                 mutex_unlock(&dpm_list_mtx);
578
579                 if (!is_async(dev)) {
580                         int error;
581
582                         error = device_resume_noirq(dev, state, false);
583                         if (error) {
584                                 suspend_stats.failed_resume_noirq++;
585                                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
586                                 dpm_save_failed_dev(dev_name(dev));
587                                 pm_dev_err(dev, state, " noirq", error);
588                         }
589                 }
590
591                 mutex_lock(&dpm_list_mtx);
592                 put_device(dev);
593         }
594         mutex_unlock(&dpm_list_mtx);
595         async_synchronize_full();
596         dpm_show_time(starttime, state, "noirq");
597         resume_device_irqs();
598         device_wakeup_disarm_wake_irqs();
599         cpuidle_resume();
600         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
601 }
602
603 /**
604  * device_resume_early - Execute an "early resume" callback for given device.
605  * @dev: Device to handle.
606  * @state: PM transition of the system being carried out.
607  * @async: If true, the device is being resumed asynchronously.
608  *
609  * Runtime PM is disabled for @dev while this function is being executed.
610  */
611 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
612 {
613         pm_callback_t callback = NULL;
614         char *info = NULL;
615         int error = 0;
616
617         TRACE_DEVICE(dev);
618         TRACE_RESUME(0);
619
620         if (dev->power.syscore || dev->power.direct_complete)
621                 goto Out;
622
623         if (!dev->power.is_late_suspended)
624                 goto Out;
625
626         dpm_wait(dev->parent, async);
627
628         if (dev->pm_domain) {
629                 info = "early power domain ";
630                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
631         } else if (dev->type && dev->type->pm) {
632                 info = "early type ";
633                 callback = pm_late_early_op(dev->type->pm, state);
634         } else if (dev->class && dev->class->pm) {
635                 info = "early class ";
636                 callback = pm_late_early_op(dev->class->pm, state);
637         } else if (dev->bus && dev->bus->pm) {
638                 info = "early bus ";
639                 callback = pm_late_early_op(dev->bus->pm, state);
640         }
641
642         if (!callback && dev->driver && dev->driver->pm) {
643                 info = "early driver ";
644                 callback = pm_late_early_op(dev->driver->pm, state);
645         }
646
647         error = dpm_run_callback(callback, dev, state, info);
648         dev->power.is_late_suspended = false;
649
650  Out:
651         TRACE_RESUME(error);
652
653         pm_runtime_enable(dev);
654         complete_all(&dev->power.completion);
655         return error;
656 }
657
658 static void async_resume_early(void *data, async_cookie_t cookie)
659 {
660         struct device *dev = (struct device *)data;
661         int error;
662
663         error = device_resume_early(dev, pm_transition, true);
664         if (error)
665                 pm_dev_err(dev, pm_transition, " async", error);
666
667         put_device(dev);
668 }
669
670 /**
671  * dpm_resume_early - Execute "early resume" callbacks for all devices.
672  * @state: PM transition of the system being carried out.
673  */
674 void dpm_resume_early(pm_message_t state)
675 {
676         struct device *dev;
677         ktime_t starttime = ktime_get();
678
679         trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
680         mutex_lock(&dpm_list_mtx);
681         pm_transition = state;
682
683         /*
684          * Advanced the async threads upfront,
685          * in case the starting of async threads is
686          * delayed by non-async resuming devices.
687          */
688         list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
689                 reinit_completion(&dev->power.completion);
690                 if (is_async(dev)) {
691                         get_device(dev);
692                         async_schedule(async_resume_early, dev);
693                 }
694         }
695
696         while (!list_empty(&dpm_late_early_list)) {
697                 dev = to_device(dpm_late_early_list.next);
698                 get_device(dev);
699                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
700                 mutex_unlock(&dpm_list_mtx);
701
702                 if (!is_async(dev)) {
703                         int error;
704
705                         error = device_resume_early(dev, state, false);
706                         if (error) {
707                                 suspend_stats.failed_resume_early++;
708                                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
709                                 dpm_save_failed_dev(dev_name(dev));
710                                 pm_dev_err(dev, state, " early", error);
711                         }
712                 }
713                 mutex_lock(&dpm_list_mtx);
714                 put_device(dev);
715         }
716         mutex_unlock(&dpm_list_mtx);
717         async_synchronize_full();
718         dpm_show_time(starttime, state, "early");
719         trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
720 }
721
722 /**
723  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
724  * @state: PM transition of the system being carried out.
725  */
726 void dpm_resume_start(pm_message_t state)
727 {
728         dpm_resume_noirq(state);
729         dpm_resume_early(state);
730 }
731 EXPORT_SYMBOL_GPL(dpm_resume_start);
732
733 /**
734  * device_resume - Execute "resume" callbacks for given device.
735  * @dev: Device to handle.
736  * @state: PM transition of the system being carried out.
737  * @async: If true, the device is being resumed asynchronously.
738  */
739 static int device_resume(struct device *dev, pm_message_t state, bool async)
740 {
741         pm_callback_t callback = NULL;
742         char *info = NULL;
743         int error = 0;
744         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
745
746         TRACE_DEVICE(dev);
747         TRACE_RESUME(0);
748
749         if (dev->power.syscore)
750                 goto Complete;
751
752         if (dev->power.direct_complete) {
753                 /* Match the pm_runtime_disable() in __device_suspend(). */
754                 pm_runtime_enable(dev);
755                 goto Complete;
756         }
757
758         dpm_wait(dev->parent, async);
759         dpm_watchdog_set(&wd, dev);
760         device_lock(dev);
761
762         /*
763          * This is a fib.  But we'll allow new children to be added below
764          * a resumed device, even if the device hasn't been completed yet.
765          */
766         dev->power.is_prepared = false;
767
768         if (!dev->power.is_suspended)
769                 goto Unlock;
770
771         if (dev->pm_domain) {
772                 info = "power domain ";
773                 callback = pm_op(&dev->pm_domain->ops, state);
774                 goto Driver;
775         }
776
777         if (dev->type && dev->type->pm) {
778                 info = "type ";
779                 callback = pm_op(dev->type->pm, state);
780                 goto Driver;
781         }
782
783         if (dev->class) {
784                 if (dev->class->pm) {
785                         info = "class ";
786                         callback = pm_op(dev->class->pm, state);
787                         goto Driver;
788                 } else if (dev->class->resume) {
789                         info = "legacy class ";
790                         callback = dev->class->resume;
791                         goto End;
792                 }
793         }
794
795         if (dev->bus) {
796                 if (dev->bus->pm) {
797                         info = "bus ";
798                         callback = pm_op(dev->bus->pm, state);
799                 } else if (dev->bus->resume) {
800                         info = "legacy bus ";
801                         callback = dev->bus->resume;
802                         goto End;
803                 }
804         }
805
806  Driver:
807         if (!callback && dev->driver && dev->driver->pm) {
808                 info = "driver ";
809                 callback = pm_op(dev->driver->pm, state);
810         }
811
812  End:
813         error = dpm_run_callback(callback, dev, state, info);
814         dev->power.is_suspended = false;
815
816  Unlock:
817         device_unlock(dev);
818         dpm_watchdog_clear(&wd);
819
820  Complete:
821         complete_all(&dev->power.completion);
822
823         TRACE_RESUME(error);
824
825         return error;
826 }
827
828 static void async_resume(void *data, async_cookie_t cookie)
829 {
830         struct device *dev = (struct device *)data;
831         int error;
832
833         error = device_resume(dev, pm_transition, true);
834         if (error)
835                 pm_dev_err(dev, pm_transition, " async", error);
836         put_device(dev);
837 }
838
839 /**
840  *      dpm_drv_timeout - Driver suspend / resume watchdog handler
841  *      @data: struct device which timed out
842  *
843  *      Called when a driver has timed out suspending or resuming.
844  *      There's not much we can do here to recover so
845  *      BUG() out for a crash-dump
846  *
847  */
848 static void dpm_drv_timeout(unsigned long data)
849 {
850         struct dpm_drv_wd_data *wd_data = (void *)data;
851         struct device *dev = wd_data->dev;
852         struct task_struct *tsk = wd_data->tsk;
853
854         printk(KERN_EMERG "**** DPM device timeout: %s (%s)\n", dev_name(dev),
855                (dev->driver ? dev->driver->name : "no driver"));
856
857         printk(KERN_EMERG "dpm suspend stack:\n");
858         show_stack(tsk, NULL);
859
860         BUG();
861 }
862
863 /**
864  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
865  * @state: PM transition of the system being carried out.
866  *
867  * Execute the appropriate "resume" callback for all devices whose status
868  * indicates that they are suspended.
869  */
870 void dpm_resume(pm_message_t state)
871 {
872         struct device *dev;
873         ktime_t starttime = ktime_get();
874
875         trace_suspend_resume(TPS("dpm_resume"), state.event, true);
876         might_sleep();
877
878         mutex_lock(&dpm_list_mtx);
879         pm_transition = state;
880         async_error = 0;
881
882         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
883                 reinit_completion(&dev->power.completion);
884                 if (is_async(dev)) {
885                         get_device(dev);
886                         async_schedule(async_resume, dev);
887                 }
888         }
889
890         while (!list_empty(&dpm_suspended_list)) {
891                 dev = to_device(dpm_suspended_list.next);
892                 get_device(dev);
893                 if (!is_async(dev)) {
894                         int error;
895
896                         mutex_unlock(&dpm_list_mtx);
897
898                         error = device_resume(dev, state, false);
899                         if (error) {
900                                 suspend_stats.failed_resume++;
901                                 dpm_save_failed_step(SUSPEND_RESUME);
902                                 dpm_save_failed_dev(dev_name(dev));
903                                 pm_dev_err(dev, state, "", error);
904                         }
905
906                         mutex_lock(&dpm_list_mtx);
907                 }
908                 if (!list_empty(&dev->power.entry))
909                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
910                 put_device(dev);
911         }
912         mutex_unlock(&dpm_list_mtx);
913         async_synchronize_full();
914         dpm_show_time(starttime, state, NULL);
915
916         cpufreq_resume();
917         trace_suspend_resume(TPS("dpm_resume"), state.event, false);
918 }
919
920 /**
921  * device_complete - Complete a PM transition for given device.
922  * @dev: Device to handle.
923  * @state: PM transition of the system being carried out.
924  */
925 static void device_complete(struct device *dev, pm_message_t state)
926 {
927         void (*callback)(struct device *) = NULL;
928         char *info = NULL;
929
930         if (dev->power.syscore)
931                 return;
932
933         device_lock(dev);
934
935         if (dev->pm_domain) {
936                 info = "completing power domain ";
937                 callback = dev->pm_domain->ops.complete;
938         } else if (dev->type && dev->type->pm) {
939                 info = "completing type ";
940                 callback = dev->type->pm->complete;
941         } else if (dev->class && dev->class->pm) {
942                 info = "completing class ";
943                 callback = dev->class->pm->complete;
944         } else if (dev->bus && dev->bus->pm) {
945                 info = "completing bus ";
946                 callback = dev->bus->pm->complete;
947         }
948
949         if (!callback && dev->driver && dev->driver->pm) {
950                 info = "completing driver ";
951                 callback = dev->driver->pm->complete;
952         }
953
954         if (callback) {
955                 pm_dev_dbg(dev, state, info);
956                 callback(dev);
957         }
958
959         device_unlock(dev);
960
961         pm_runtime_put(dev);
962 }
963
964 /**
965  * dpm_complete - Complete a PM transition for all non-sysdev devices.
966  * @state: PM transition of the system being carried out.
967  *
968  * Execute the ->complete() callbacks for all devices whose PM status is not
969  * DPM_ON (this allows new devices to be registered).
970  */
971 void dpm_complete(pm_message_t state)
972 {
973         struct list_head list;
974
975         trace_suspend_resume(TPS("dpm_complete"), state.event, true);
976         might_sleep();
977
978         INIT_LIST_HEAD(&list);
979         mutex_lock(&dpm_list_mtx);
980         while (!list_empty(&dpm_prepared_list)) {
981                 struct device *dev = to_device(dpm_prepared_list.prev);
982
983                 get_device(dev);
984                 dev->power.is_prepared = false;
985                 list_move(&dev->power.entry, &list);
986                 mutex_unlock(&dpm_list_mtx);
987
988                 trace_device_pm_callback_start(dev, "", state.event);
989                 device_complete(dev, state);
990                 trace_device_pm_callback_end(dev, 0);
991
992                 mutex_lock(&dpm_list_mtx);
993                 put_device(dev);
994         }
995         list_splice(&list, &dpm_list);
996         mutex_unlock(&dpm_list_mtx);
997         trace_suspend_resume(TPS("dpm_complete"), state.event, false);
998 }
999
1000 /**
1001  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1002  * @state: PM transition of the system being carried out.
1003  *
1004  * Execute "resume" callbacks for all devices and complete the PM transition of
1005  * the system.
1006  */
1007 void dpm_resume_end(pm_message_t state)
1008 {
1009         dpm_resume(state);
1010         dpm_complete(state);
1011 }
1012 EXPORT_SYMBOL_GPL(dpm_resume_end);
1013
1014
1015 /*------------------------- Suspend routines -------------------------*/
1016
1017 /**
1018  * resume_event - Return a "resume" message for given "suspend" sleep state.
1019  * @sleep_state: PM message representing a sleep state.
1020  *
1021  * Return a PM message representing the resume event corresponding to given
1022  * sleep state.
1023  */
1024 static pm_message_t resume_event(pm_message_t sleep_state)
1025 {
1026         switch (sleep_state.event) {
1027         case PM_EVENT_SUSPEND:
1028                 return PMSG_RESUME;
1029         case PM_EVENT_FREEZE:
1030         case PM_EVENT_QUIESCE:
1031                 return PMSG_RECOVER;
1032         case PM_EVENT_HIBERNATE:
1033                 return PMSG_RESTORE;
1034         }
1035         return PMSG_ON;
1036 }
1037
1038 /**
1039  * device_suspend_noirq - Execute a "late suspend" callback for given device.
1040  * @dev: Device to handle.
1041  * @state: PM transition of the system being carried out.
1042  * @async: If true, the device is being suspended asynchronously.
1043  *
1044  * The driver of @dev will not receive interrupts while this function is being
1045  * executed.
1046  */
1047 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1048 {
1049         pm_callback_t callback = NULL;
1050         char *info = NULL;
1051         int error = 0;
1052
1053         TRACE_DEVICE(dev);
1054         TRACE_SUSPEND(0);
1055
1056         if (async_error)
1057                 goto Complete;
1058
1059         if (pm_wakeup_pending()) {
1060                 async_error = -EBUSY;
1061                 goto Complete;
1062         }
1063
1064         if (dev->power.syscore || dev->power.direct_complete)
1065                 goto Complete;
1066
1067         dpm_wait_for_children(dev, async);
1068
1069         if (dev->pm_domain) {
1070                 info = "noirq power domain ";
1071                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1072         } else if (dev->type && dev->type->pm) {
1073                 info = "noirq type ";
1074                 callback = pm_noirq_op(dev->type->pm, state);
1075         } else if (dev->class && dev->class->pm) {
1076                 info = "noirq class ";
1077                 callback = pm_noirq_op(dev->class->pm, state);
1078         } else if (dev->bus && dev->bus->pm) {
1079                 info = "noirq bus ";
1080                 callback = pm_noirq_op(dev->bus->pm, state);
1081         }
1082
1083         if (!callback && dev->driver && dev->driver->pm) {
1084                 info = "noirq driver ";
1085                 callback = pm_noirq_op(dev->driver->pm, state);
1086         }
1087
1088         error = dpm_run_callback(callback, dev, state, info);
1089         if (!error)
1090                 dev->power.is_noirq_suspended = true;
1091         else
1092                 async_error = error;
1093
1094 Complete:
1095         complete_all(&dev->power.completion);
1096         TRACE_SUSPEND(error);
1097         return error;
1098 }
1099
1100 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1101 {
1102         struct device *dev = (struct device *)data;
1103         int error;
1104
1105         error = __device_suspend_noirq(dev, pm_transition, true);
1106         if (error) {
1107                 dpm_save_failed_dev(dev_name(dev));
1108                 pm_dev_err(dev, pm_transition, " async", error);
1109         }
1110
1111         put_device(dev);
1112 }
1113
1114 static int device_suspend_noirq(struct device *dev)
1115 {
1116         reinit_completion(&dev->power.completion);
1117
1118         if (is_async(dev)) {
1119                 get_device(dev);
1120                 async_schedule(async_suspend_noirq, dev);
1121                 return 0;
1122         }
1123         return __device_suspend_noirq(dev, pm_transition, false);
1124 }
1125
1126 /**
1127  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1128  * @state: PM transition of the system being carried out.
1129  *
1130  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1131  * handlers for all non-sysdev devices.
1132  */
1133 int dpm_suspend_noirq(pm_message_t state)
1134 {
1135         ktime_t starttime = ktime_get();
1136         int error = 0;
1137
1138         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1139         cpuidle_pause();
1140         device_wakeup_arm_wake_irqs();
1141         suspend_device_irqs();
1142         mutex_lock(&dpm_list_mtx);
1143         pm_transition = state;
1144         async_error = 0;
1145
1146         while (!list_empty(&dpm_late_early_list)) {
1147                 struct device *dev = to_device(dpm_late_early_list.prev);
1148
1149                 get_device(dev);
1150                 mutex_unlock(&dpm_list_mtx);
1151
1152                 error = device_suspend_noirq(dev);
1153
1154                 mutex_lock(&dpm_list_mtx);
1155                 if (error) {
1156                         pm_dev_err(dev, state, " noirq", error);
1157                         dpm_save_failed_dev(dev_name(dev));
1158                         put_device(dev);
1159                         break;
1160                 }
1161                 if (!list_empty(&dev->power.entry))
1162                         list_move(&dev->power.entry, &dpm_noirq_list);
1163                 put_device(dev);
1164
1165                 if (async_error)
1166                         break;
1167         }
1168         mutex_unlock(&dpm_list_mtx);
1169         async_synchronize_full();
1170         if (!error)
1171                 error = async_error;
1172
1173         if (error) {
1174                 suspend_stats.failed_suspend_noirq++;
1175                 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1176                 dpm_resume_noirq(resume_event(state));
1177         } else {
1178                 dpm_show_time(starttime, state, "noirq");
1179         }
1180         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1181         return error;
1182 }
1183
1184 /**
1185  * device_suspend_late - Execute a "late suspend" callback for given device.
1186  * @dev: Device to handle.
1187  * @state: PM transition of the system being carried out.
1188  * @async: If true, the device is being suspended asynchronously.
1189  *
1190  * Runtime PM is disabled for @dev while this function is being executed.
1191  */
1192 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1193 {
1194         pm_callback_t callback = NULL;
1195         char *info = NULL;
1196         int error = 0;
1197
1198         TRACE_DEVICE(dev);
1199         TRACE_SUSPEND(0);
1200
1201         __pm_runtime_disable(dev, false);
1202
1203         if (async_error)
1204                 goto Complete;
1205
1206         if (pm_wakeup_pending()) {
1207                 async_error = -EBUSY;
1208                 goto Complete;
1209         }
1210
1211         if (dev->power.syscore || dev->power.direct_complete)
1212                 goto Complete;
1213
1214         dpm_wait_for_children(dev, async);
1215
1216         if (dev->pm_domain) {
1217                 info = "late power domain ";
1218                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1219         } else if (dev->type && dev->type->pm) {
1220                 info = "late type ";
1221                 callback = pm_late_early_op(dev->type->pm, state);
1222         } else if (dev->class && dev->class->pm) {
1223                 info = "late class ";
1224                 callback = pm_late_early_op(dev->class->pm, state);
1225         } else if (dev->bus && dev->bus->pm) {
1226                 info = "late bus ";
1227                 callback = pm_late_early_op(dev->bus->pm, state);
1228         }
1229
1230         if (!callback && dev->driver && dev->driver->pm) {
1231                 info = "late driver ";
1232                 callback = pm_late_early_op(dev->driver->pm, state);
1233         }
1234
1235         error = dpm_run_callback(callback, dev, state, info);
1236         if (!error)
1237                 dev->power.is_late_suspended = true;
1238         else
1239                 async_error = error;
1240
1241 Complete:
1242         TRACE_SUSPEND(error);
1243         complete_all(&dev->power.completion);
1244         return error;
1245 }
1246
1247 static void async_suspend_late(void *data, async_cookie_t cookie)
1248 {
1249         struct device *dev = (struct device *)data;
1250         int error;
1251
1252         error = __device_suspend_late(dev, pm_transition, true);
1253         if (error) {
1254                 dpm_save_failed_dev(dev_name(dev));
1255                 pm_dev_err(dev, pm_transition, " async", error);
1256         }
1257         put_device(dev);
1258 }
1259
1260 static int device_suspend_late(struct device *dev)
1261 {
1262         reinit_completion(&dev->power.completion);
1263
1264         if (is_async(dev)) {
1265                 get_device(dev);
1266                 async_schedule(async_suspend_late, dev);
1267                 return 0;
1268         }
1269
1270         return __device_suspend_late(dev, pm_transition, false);
1271 }
1272
1273 /**
1274  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1275  * @state: PM transition of the system being carried out.
1276  */
1277 int dpm_suspend_late(pm_message_t state)
1278 {
1279         ktime_t starttime = ktime_get();
1280         int error = 0;
1281
1282         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1283         mutex_lock(&dpm_list_mtx);
1284         pm_transition = state;
1285         async_error = 0;
1286
1287         while (!list_empty(&dpm_suspended_list)) {
1288                 struct device *dev = to_device(dpm_suspended_list.prev);
1289
1290                 get_device(dev);
1291                 mutex_unlock(&dpm_list_mtx);
1292
1293                 error = device_suspend_late(dev);
1294
1295                 mutex_lock(&dpm_list_mtx);
1296                 if (error) {
1297                         pm_dev_err(dev, state, " late", error);
1298                         dpm_save_failed_dev(dev_name(dev));
1299                         put_device(dev);
1300                         break;
1301                 }
1302                 if (!list_empty(&dev->power.entry))
1303                         list_move(&dev->power.entry, &dpm_late_early_list);
1304                 put_device(dev);
1305
1306                 if (async_error)
1307                         break;
1308         }
1309         mutex_unlock(&dpm_list_mtx);
1310         async_synchronize_full();
1311         if (!error)
1312                 error = async_error;
1313         if (error) {
1314                 suspend_stats.failed_suspend_late++;
1315                 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1316                 dpm_resume_early(resume_event(state));
1317         } else {
1318                 dpm_show_time(starttime, state, "late");
1319         }
1320         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1321         return error;
1322 }
1323
1324 /**
1325  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1326  * @state: PM transition of the system being carried out.
1327  */
1328 int dpm_suspend_end(pm_message_t state)
1329 {
1330         int error = dpm_suspend_late(state);
1331         if (error)
1332                 return error;
1333
1334         error = dpm_suspend_noirq(state);
1335         if (error) {
1336                 dpm_resume_early(resume_event(state));
1337                 return error;
1338         }
1339
1340         return 0;
1341 }
1342 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1343
1344 /**
1345  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1346  * @dev: Device to suspend.
1347  * @state: PM transition of the system being carried out.
1348  * @cb: Suspend callback to execute.
1349  * @info: string description of caller.
1350  */
1351 static int legacy_suspend(struct device *dev, pm_message_t state,
1352                           int (*cb)(struct device *dev, pm_message_t state),
1353                           char *info)
1354 {
1355         int error;
1356         ktime_t calltime;
1357
1358         calltime = initcall_debug_start(dev);
1359
1360         trace_device_pm_callback_start(dev, info, state.event);
1361         error = cb(dev, state);
1362         trace_device_pm_callback_end(dev, error);
1363         suspend_report_result(cb, error);
1364
1365         initcall_debug_report(dev, calltime, error, state, info);
1366
1367         return error;
1368 }
1369
1370 /**
1371  * device_suspend - Execute "suspend" callbacks for given device.
1372  * @dev: Device to handle.
1373  * @state: PM transition of the system being carried out.
1374  * @async: If true, the device is being suspended asynchronously.
1375  */
1376 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1377 {
1378         pm_callback_t callback = NULL;
1379         char *info = NULL;
1380         int error = 0;
1381         struct timer_list timer;
1382         struct dpm_drv_wd_data data;
1383         char suspend_abort[MAX_SUSPEND_ABORT_LEN];
1384         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1385
1386         TRACE_DEVICE(dev);
1387         TRACE_SUSPEND(0);
1388
1389         dpm_wait_for_children(dev, async);
1390
1391         if (async_error)
1392                 goto Complete;
1393
1394         /*
1395          * If a device configured to wake up the system from sleep states
1396          * has been suspended at run time and there's a resume request pending
1397          * for it, this is equivalent to the device signaling wakeup, so the
1398          * system suspend operation should be aborted.
1399          */
1400         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1401                 pm_wakeup_event(dev, 0);
1402
1403         if (pm_wakeup_pending()) {
1404                 pm_get_active_wakeup_sources(suspend_abort,
1405                         MAX_SUSPEND_ABORT_LEN);
1406                 log_suspend_abort_reason(suspend_abort);
1407                 async_error = -EBUSY;
1408                 goto Complete;
1409         }
1410
1411         if (dev->power.syscore)
1412                 goto Complete;
1413         
1414         data.dev = dev;
1415         data.tsk = get_current();
1416         init_timer_on_stack(&timer);
1417         timer.expires = jiffies + HZ * 12;
1418         timer.function = dpm_drv_timeout;
1419         timer.data = (unsigned long)&data;
1420         add_timer(&timer);
1421
1422         if (dev->power.direct_complete) {
1423                 if (pm_runtime_status_suspended(dev)) {
1424                         pm_runtime_disable(dev);
1425                         if (pm_runtime_status_suspended(dev))
1426                                 goto Complete;
1427
1428                         pm_runtime_enable(dev);
1429                 }
1430                 dev->power.direct_complete = false;
1431         }
1432
1433         dpm_watchdog_set(&wd, dev);
1434         device_lock(dev);
1435
1436         if (dev->pm_domain) {
1437                 info = "power domain ";
1438                 callback = pm_op(&dev->pm_domain->ops, state);
1439                 goto Run;
1440         }
1441
1442         if (dev->type && dev->type->pm) {
1443                 info = "type ";
1444                 callback = pm_op(dev->type->pm, state);
1445                 goto Run;
1446         }
1447
1448         if (dev->class) {
1449                 if (dev->class->pm) {
1450                         info = "class ";
1451                         callback = pm_op(dev->class->pm, state);
1452                         goto Run;
1453                 } else if (dev->class->suspend) {
1454                         pm_dev_dbg(dev, state, "legacy class ");
1455                         error = legacy_suspend(dev, state, dev->class->suspend,
1456                                                 "legacy class ");
1457                         goto End;
1458                 }
1459         }
1460
1461         if (dev->bus) {
1462                 if (dev->bus->pm) {
1463                         info = "bus ";
1464                         callback = pm_op(dev->bus->pm, state);
1465                 } else if (dev->bus->suspend) {
1466                         pm_dev_dbg(dev, state, "legacy bus ");
1467                         error = legacy_suspend(dev, state, dev->bus->suspend,
1468                                                 "legacy bus ");
1469                         goto End;
1470                 }
1471         }
1472
1473  Run:
1474         if (!callback && dev->driver && dev->driver->pm) {
1475                 info = "driver ";
1476                 callback = pm_op(dev->driver->pm, state);
1477         }
1478
1479         error = dpm_run_callback(callback, dev, state, info);
1480
1481  End:
1482         if (!error) {
1483                 struct device *parent = dev->parent;
1484
1485                 dev->power.is_suspended = true;
1486                 if (parent) {
1487                         spin_lock_irq(&parent->power.lock);
1488
1489                         dev->parent->power.direct_complete = false;
1490                         if (dev->power.wakeup_path
1491                             && !dev->parent->power.ignore_children)
1492                                 dev->parent->power.wakeup_path = true;
1493
1494                         spin_unlock_irq(&parent->power.lock);
1495                 }
1496         }
1497
1498         device_unlock(dev);
1499         dpm_watchdog_clear(&wd);
1500
1501         del_timer_sync(&timer);
1502         destroy_timer_on_stack(&timer);
1503
1504  Complete:
1505         complete_all(&dev->power.completion);
1506         if (error)
1507                 async_error = error;
1508
1509         TRACE_SUSPEND(error);
1510         return error;
1511 }
1512
1513 static void async_suspend(void *data, async_cookie_t cookie)
1514 {
1515         struct device *dev = (struct device *)data;
1516         int error;
1517
1518         error = __device_suspend(dev, pm_transition, true);
1519         if (error) {
1520                 dpm_save_failed_dev(dev_name(dev));
1521                 pm_dev_err(dev, pm_transition, " async", error);
1522         }
1523
1524         put_device(dev);
1525 }
1526
1527 static int device_suspend(struct device *dev)
1528 {
1529         reinit_completion(&dev->power.completion);
1530
1531         if (is_async(dev)) {
1532                 get_device(dev);
1533                 async_schedule(async_suspend, dev);
1534                 return 0;
1535         }
1536
1537         return __device_suspend(dev, pm_transition, false);
1538 }
1539
1540 /**
1541  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1542  * @state: PM transition of the system being carried out.
1543  */
1544 int dpm_suspend(pm_message_t state)
1545 {
1546         ktime_t starttime = ktime_get();
1547         int error = 0;
1548
1549         trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1550         might_sleep();
1551
1552         cpufreq_suspend();
1553
1554         mutex_lock(&dpm_list_mtx);
1555         pm_transition = state;
1556         async_error = 0;
1557         while (!list_empty(&dpm_prepared_list)) {
1558                 struct device *dev = to_device(dpm_prepared_list.prev);
1559
1560                 get_device(dev);
1561                 mutex_unlock(&dpm_list_mtx);
1562
1563                 error = device_suspend(dev);
1564
1565                 mutex_lock(&dpm_list_mtx);
1566                 if (error) {
1567                         pm_dev_err(dev, state, "", error);
1568                         dpm_save_failed_dev(dev_name(dev));
1569                         put_device(dev);
1570                         break;
1571                 }
1572                 if (!list_empty(&dev->power.entry))
1573                         list_move(&dev->power.entry, &dpm_suspended_list);
1574                 put_device(dev);
1575                 if (async_error)
1576                         break;
1577         }
1578         mutex_unlock(&dpm_list_mtx);
1579         async_synchronize_full();
1580         if (!error)
1581                 error = async_error;
1582         if (error) {
1583                 suspend_stats.failed_suspend++;
1584                 dpm_save_failed_step(SUSPEND_SUSPEND);
1585         } else
1586                 dpm_show_time(starttime, state, NULL);
1587         trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1588         return error;
1589 }
1590
1591 /**
1592  * device_prepare - Prepare a device for system power transition.
1593  * @dev: Device to handle.
1594  * @state: PM transition of the system being carried out.
1595  *
1596  * Execute the ->prepare() callback(s) for given device.  No new children of the
1597  * device may be registered after this function has returned.
1598  */
1599 static int device_prepare(struct device *dev, pm_message_t state)
1600 {
1601         int (*callback)(struct device *) = NULL;
1602         char *info = NULL;
1603         int ret = 0;
1604
1605         if (dev->power.syscore)
1606                 return 0;
1607
1608         /*
1609          * If a device's parent goes into runtime suspend at the wrong time,
1610          * it won't be possible to resume the device.  To prevent this we
1611          * block runtime suspend here, during the prepare phase, and allow
1612          * it again during the complete phase.
1613          */
1614         pm_runtime_get_noresume(dev);
1615
1616         device_lock(dev);
1617
1618         dev->power.wakeup_path = device_may_wakeup(dev);
1619
1620         if (dev->pm_domain) {
1621                 info = "preparing power domain ";
1622                 callback = dev->pm_domain->ops.prepare;
1623         } else if (dev->type && dev->type->pm) {
1624                 info = "preparing type ";
1625                 callback = dev->type->pm->prepare;
1626         } else if (dev->class && dev->class->pm) {
1627                 info = "preparing class ";
1628                 callback = dev->class->pm->prepare;
1629         } else if (dev->bus && dev->bus->pm) {
1630                 info = "preparing bus ";
1631                 callback = dev->bus->pm->prepare;
1632         }
1633
1634         if (!callback && dev->driver && dev->driver->pm) {
1635                 info = "preparing driver ";
1636                 callback = dev->driver->pm->prepare;
1637         }
1638
1639         if (callback)
1640                 ret = callback(dev);
1641
1642         device_unlock(dev);
1643
1644         if (ret < 0) {
1645                 suspend_report_result(callback, ret);
1646                 pm_runtime_put(dev);
1647                 return ret;
1648         }
1649         /*
1650          * A positive return value from ->prepare() means "this device appears
1651          * to be runtime-suspended and its state is fine, so if it really is
1652          * runtime-suspended, you can leave it in that state provided that you
1653          * will do the same thing with all of its descendants".  This only
1654          * applies to suspend transitions, however.
1655          */
1656         spin_lock_irq(&dev->power.lock);
1657         dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1658         spin_unlock_irq(&dev->power.lock);
1659         return 0;
1660 }
1661
1662 /**
1663  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1664  * @state: PM transition of the system being carried out.
1665  *
1666  * Execute the ->prepare() callback(s) for all devices.
1667  */
1668 int dpm_prepare(pm_message_t state)
1669 {
1670         int error = 0;
1671
1672         trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1673         might_sleep();
1674
1675         mutex_lock(&dpm_list_mtx);
1676         while (!list_empty(&dpm_list)) {
1677                 struct device *dev = to_device(dpm_list.next);
1678
1679                 get_device(dev);
1680                 mutex_unlock(&dpm_list_mtx);
1681
1682                 trace_device_pm_callback_start(dev, "", state.event);
1683                 error = device_prepare(dev, state);
1684                 trace_device_pm_callback_end(dev, error);
1685
1686                 mutex_lock(&dpm_list_mtx);
1687                 if (error) {
1688                         if (error == -EAGAIN) {
1689                                 put_device(dev);
1690                                 error = 0;
1691                                 continue;
1692                         }
1693                         printk(KERN_INFO "PM: Device %s not prepared "
1694                                 "for power transition: code %d\n",
1695                                 dev_name(dev), error);
1696                         put_device(dev);
1697                         break;
1698                 }
1699                 dev->power.is_prepared = true;
1700                 if (!list_empty(&dev->power.entry))
1701                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1702                 put_device(dev);
1703         }
1704         mutex_unlock(&dpm_list_mtx);
1705         trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1706         return error;
1707 }
1708
1709 /**
1710  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1711  * @state: PM transition of the system being carried out.
1712  *
1713  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1714  * callbacks for them.
1715  */
1716 int dpm_suspend_start(pm_message_t state)
1717 {
1718         int error;
1719
1720         error = dpm_prepare(state);
1721         if (error) {
1722                 suspend_stats.failed_prepare++;
1723                 dpm_save_failed_step(SUSPEND_PREPARE);
1724         } else
1725                 error = dpm_suspend(state);
1726         return error;
1727 }
1728 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1729
1730 void __suspend_report_result(const char *function, void *fn, int ret)
1731 {
1732         if (ret)
1733                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1734 }
1735 EXPORT_SYMBOL_GPL(__suspend_report_result);
1736
1737 /**
1738  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1739  * @dev: Device to wait for.
1740  * @subordinate: Device that needs to wait for @dev.
1741  */
1742 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1743 {
1744         dpm_wait(dev, subordinate->power.async_suspend);
1745         return async_error;
1746 }
1747 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1748
1749 /**
1750  * dpm_for_each_dev - device iterator.
1751  * @data: data for the callback.
1752  * @fn: function to be called for each device.
1753  *
1754  * Iterate over devices in dpm_list, and call @fn for each device,
1755  * passing it @data.
1756  */
1757 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1758 {
1759         struct device *dev;
1760
1761         if (!fn)
1762                 return;
1763
1764         device_pm_lock();
1765         list_for_each_entry(dev, &dpm_list, power.entry)
1766                 fn(dev, data);
1767         device_pm_unlock();
1768 }
1769 EXPORT_SYMBOL_GPL(dpm_for_each_dev);