Merge branch 'linux-linaro-lsk' into linux-linaro-lsk-android
[firefly-linux-kernel-4.4.55.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/resume-trace.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/async.h>
30 #include <linux/suspend.h>
31 #include <linux/cpuidle.h>
32 #include <linux/timer.h>
33
34 #include "../base.h"
35 #include "power.h"
36
37 typedef int (*pm_callback_t)(struct device *);
38
39 /*
40  * The entries in the dpm_list list are in a depth first order, simply
41  * because children are guaranteed to be discovered after parents, and
42  * are inserted at the back of the list on discovery.
43  *
44  * Since device_pm_add() may be called with a device lock held,
45  * we must never try to acquire a device lock while holding
46  * dpm_list_mutex.
47  */
48
49 LIST_HEAD(dpm_list);
50 static LIST_HEAD(dpm_prepared_list);
51 static LIST_HEAD(dpm_suspended_list);
52 static LIST_HEAD(dpm_late_early_list);
53 static LIST_HEAD(dpm_noirq_list);
54
55 struct suspend_stats suspend_stats;
56 static DEFINE_MUTEX(dpm_list_mtx);
57 static pm_message_t pm_transition;
58
59 struct dpm_watchdog {
60         struct device           *dev;
61         struct task_struct      *tsk;
62         struct timer_list       timer;
63 };
64
65 static int async_error;
66
67 /**
68  * device_pm_sleep_init - Initialize system suspend-related device fields.
69  * @dev: Device object being initialized.
70  */
71 void device_pm_sleep_init(struct device *dev)
72 {
73         dev->power.is_prepared = false;
74         dev->power.is_suspended = false;
75         init_completion(&dev->power.completion);
76         complete_all(&dev->power.completion);
77         dev->power.wakeup = NULL;
78         INIT_LIST_HEAD(&dev->power.entry);
79 }
80
81 /**
82  * device_pm_lock - Lock the list of active devices used by the PM core.
83  */
84 void device_pm_lock(void)
85 {
86         mutex_lock(&dpm_list_mtx);
87 }
88
89 /**
90  * device_pm_unlock - Unlock the list of active devices used by the PM core.
91  */
92 void device_pm_unlock(void)
93 {
94         mutex_unlock(&dpm_list_mtx);
95 }
96
97 /**
98  * device_pm_add - Add a device to the PM core's list of active devices.
99  * @dev: Device to add to the list.
100  */
101 void device_pm_add(struct device *dev)
102 {
103         pr_debug("PM: Adding info for %s:%s\n",
104                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
105         mutex_lock(&dpm_list_mtx);
106         if (dev->parent && dev->parent->power.is_prepared)
107                 dev_warn(dev, "parent %s should not be sleeping\n",
108                         dev_name(dev->parent));
109         list_add_tail(&dev->power.entry, &dpm_list);
110         mutex_unlock(&dpm_list_mtx);
111 }
112
113 /**
114  * device_pm_remove - Remove a device from the PM core's list of active devices.
115  * @dev: Device to be removed from the list.
116  */
117 void device_pm_remove(struct device *dev)
118 {
119         pr_debug("PM: Removing info for %s:%s\n",
120                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
121         complete_all(&dev->power.completion);
122         mutex_lock(&dpm_list_mtx);
123         list_del_init(&dev->power.entry);
124         mutex_unlock(&dpm_list_mtx);
125         device_wakeup_disable(dev);
126         pm_runtime_remove(dev);
127 }
128
129 /**
130  * device_pm_move_before - Move device in the PM core's list of active devices.
131  * @deva: Device to move in dpm_list.
132  * @devb: Device @deva should come before.
133  */
134 void device_pm_move_before(struct device *deva, struct device *devb)
135 {
136         pr_debug("PM: Moving %s:%s before %s:%s\n",
137                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
138                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
139         /* Delete deva from dpm_list and reinsert before devb. */
140         list_move_tail(&deva->power.entry, &devb->power.entry);
141 }
142
143 /**
144  * device_pm_move_after - Move device in the PM core's list of active devices.
145  * @deva: Device to move in dpm_list.
146  * @devb: Device @deva should come after.
147  */
148 void device_pm_move_after(struct device *deva, struct device *devb)
149 {
150         pr_debug("PM: Moving %s:%s after %s:%s\n",
151                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
152                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
153         /* Delete deva from dpm_list and reinsert after devb. */
154         list_move(&deva->power.entry, &devb->power.entry);
155 }
156
157 /**
158  * device_pm_move_last - Move device to end of the PM core's list of devices.
159  * @dev: Device to move in dpm_list.
160  */
161 void device_pm_move_last(struct device *dev)
162 {
163         pr_debug("PM: Moving %s:%s to end of list\n",
164                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
165         list_move_tail(&dev->power.entry, &dpm_list);
166 }
167
168 static ktime_t initcall_debug_start(struct device *dev)
169 {
170         ktime_t calltime = ktime_set(0, 0);
171
172         if (pm_print_times_enabled) {
173                 pr_info("calling  %s+ @ %i, parent: %s\n",
174                         dev_name(dev), task_pid_nr(current),
175                         dev->parent ? dev_name(dev->parent) : "none");
176                 calltime = ktime_get();
177         }
178
179         return calltime;
180 }
181
182 static void initcall_debug_report(struct device *dev, ktime_t calltime,
183                                   int error)
184 {
185         ktime_t delta, rettime;
186
187         if (pm_print_times_enabled) {
188                 rettime = ktime_get();
189                 delta = ktime_sub(rettime, calltime);
190                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
191                         error, (unsigned long long)ktime_to_ns(delta) >> 10);
192         }
193 }
194
195 /**
196  * dpm_wait - Wait for a PM operation to complete.
197  * @dev: Device to wait for.
198  * @async: If unset, wait only if the device's power.async_suspend flag is set.
199  */
200 static void dpm_wait(struct device *dev, bool async)
201 {
202         if (!dev)
203                 return;
204
205         if (async || (pm_async_enabled && dev->power.async_suspend))
206                 wait_for_completion(&dev->power.completion);
207 }
208
209 static int dpm_wait_fn(struct device *dev, void *async_ptr)
210 {
211         dpm_wait(dev, *((bool *)async_ptr));
212         return 0;
213 }
214
215 static void dpm_wait_for_children(struct device *dev, bool async)
216 {
217        device_for_each_child(dev, &async, dpm_wait_fn);
218 }
219
220 /**
221  * pm_op - Return the PM operation appropriate for given PM event.
222  * @ops: PM operations to choose from.
223  * @state: PM transition of the system being carried out.
224  */
225 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
226 {
227         switch (state.event) {
228 #ifdef CONFIG_SUSPEND
229         case PM_EVENT_SUSPEND:
230                 return ops->suspend;
231         case PM_EVENT_RESUME:
232                 return ops->resume;
233 #endif /* CONFIG_SUSPEND */
234 #ifdef CONFIG_HIBERNATE_CALLBACKS
235         case PM_EVENT_FREEZE:
236         case PM_EVENT_QUIESCE:
237                 return ops->freeze;
238         case PM_EVENT_HIBERNATE:
239                 return ops->poweroff;
240         case PM_EVENT_THAW:
241         case PM_EVENT_RECOVER:
242                 return ops->thaw;
243                 break;
244         case PM_EVENT_RESTORE:
245                 return ops->restore;
246 #endif /* CONFIG_HIBERNATE_CALLBACKS */
247         }
248
249         return NULL;
250 }
251
252 /**
253  * pm_late_early_op - Return the PM operation appropriate for given PM event.
254  * @ops: PM operations to choose from.
255  * @state: PM transition of the system being carried out.
256  *
257  * Runtime PM is disabled for @dev while this function is being executed.
258  */
259 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
260                                       pm_message_t state)
261 {
262         switch (state.event) {
263 #ifdef CONFIG_SUSPEND
264         case PM_EVENT_SUSPEND:
265                 return ops->suspend_late;
266         case PM_EVENT_RESUME:
267                 return ops->resume_early;
268 #endif /* CONFIG_SUSPEND */
269 #ifdef CONFIG_HIBERNATE_CALLBACKS
270         case PM_EVENT_FREEZE:
271         case PM_EVENT_QUIESCE:
272                 return ops->freeze_late;
273         case PM_EVENT_HIBERNATE:
274                 return ops->poweroff_late;
275         case PM_EVENT_THAW:
276         case PM_EVENT_RECOVER:
277                 return ops->thaw_early;
278         case PM_EVENT_RESTORE:
279                 return ops->restore_early;
280 #endif /* CONFIG_HIBERNATE_CALLBACKS */
281         }
282
283         return NULL;
284 }
285
286 /**
287  * pm_noirq_op - Return the PM operation appropriate for given PM event.
288  * @ops: PM operations to choose from.
289  * @state: PM transition of the system being carried out.
290  *
291  * The driver of @dev will not receive interrupts while this function is being
292  * executed.
293  */
294 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
295 {
296         switch (state.event) {
297 #ifdef CONFIG_SUSPEND
298         case PM_EVENT_SUSPEND:
299                 return ops->suspend_noirq;
300         case PM_EVENT_RESUME:
301                 return ops->resume_noirq;
302 #endif /* CONFIG_SUSPEND */
303 #ifdef CONFIG_HIBERNATE_CALLBACKS
304         case PM_EVENT_FREEZE:
305         case PM_EVENT_QUIESCE:
306                 return ops->freeze_noirq;
307         case PM_EVENT_HIBERNATE:
308                 return ops->poweroff_noirq;
309         case PM_EVENT_THAW:
310         case PM_EVENT_RECOVER:
311                 return ops->thaw_noirq;
312         case PM_EVENT_RESTORE:
313                 return ops->restore_noirq;
314 #endif /* CONFIG_HIBERNATE_CALLBACKS */
315         }
316
317         return NULL;
318 }
319
320 static char *pm_verb(int event)
321 {
322         switch (event) {
323         case PM_EVENT_SUSPEND:
324                 return "suspend";
325         case PM_EVENT_RESUME:
326                 return "resume";
327         case PM_EVENT_FREEZE:
328                 return "freeze";
329         case PM_EVENT_QUIESCE:
330                 return "quiesce";
331         case PM_EVENT_HIBERNATE:
332                 return "hibernate";
333         case PM_EVENT_THAW:
334                 return "thaw";
335         case PM_EVENT_RESTORE:
336                 return "restore";
337         case PM_EVENT_RECOVER:
338                 return "recover";
339         default:
340                 return "(unknown PM event)";
341         }
342 }
343
344 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
345 {
346         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
347                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
348                 ", may wakeup" : "");
349 }
350
351 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
352                         int error)
353 {
354         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
355                 dev_name(dev), pm_verb(state.event), info, error);
356 }
357
358 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
359 {
360         ktime_t calltime;
361         u64 usecs64;
362         int usecs;
363
364         calltime = ktime_get();
365         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
366         do_div(usecs64, NSEC_PER_USEC);
367         usecs = usecs64;
368         if (usecs == 0)
369                 usecs = 1;
370         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
371                 info ?: "", info ? " " : "", pm_verb(state.event),
372                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
373 }
374
375 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
376                             pm_message_t state, char *info)
377 {
378         ktime_t calltime;
379         int error;
380
381         if (!cb)
382                 return 0;
383
384         calltime = initcall_debug_start(dev);
385
386         pm_dev_dbg(dev, state, info);
387         error = cb(dev);
388         suspend_report_result(cb, error);
389
390         initcall_debug_report(dev, calltime, error);
391
392         return error;
393 }
394
395 /**
396  * dpm_wd_handler - Driver suspend / resume watchdog handler.
397  *
398  * Called when a driver has timed out suspending or resuming.
399  * There's not much we can do here to recover so BUG() out for
400  * a crash-dump
401  */
402 static void dpm_wd_handler(unsigned long data)
403 {
404         struct dpm_watchdog *wd = (void *)data;
405         struct device *dev      = wd->dev;
406         struct task_struct *tsk = wd->tsk;
407
408         dev_emerg(dev, "**** DPM device timeout ****\n");
409         show_stack(tsk, NULL);
410
411         BUG();
412 }
413
414 /**
415  * dpm_wd_set - Enable pm watchdog for given device.
416  * @wd: Watchdog. Must be allocated on the stack.
417  * @dev: Device to handle.
418  */
419 static void dpm_wd_set(struct dpm_watchdog *wd, struct device *dev)
420 {
421         struct timer_list *timer = &wd->timer;
422
423         wd->dev = dev;
424         wd->tsk = get_current();
425
426         init_timer_on_stack(timer);
427         timer->expires = jiffies + HZ * 12;
428         timer->function = dpm_wd_handler;
429         timer->data = (unsigned long)wd;
430         add_timer(timer);
431 }
432
433 /**
434  * dpm_wd_clear - Disable pm watchdog.
435  * @wd: Watchdog to disable.
436  */
437 static void dpm_wd_clear(struct dpm_watchdog *wd)
438 {
439         struct timer_list *timer = &wd->timer;
440
441         del_timer_sync(timer);
442         destroy_timer_on_stack(timer);
443 }
444
445 /*------------------------- Resume routines -------------------------*/
446
447 /**
448  * device_resume_noirq - Execute an "early resume" callback for given device.
449  * @dev: Device to handle.
450  * @state: PM transition of the system being carried out.
451  *
452  * The driver of @dev will not receive interrupts while this function is being
453  * executed.
454  */
455 static int device_resume_noirq(struct device *dev, pm_message_t state)
456 {
457         pm_callback_t callback = NULL;
458         char *info = NULL;
459         int error = 0;
460
461         TRACE_DEVICE(dev);
462         TRACE_RESUME(0);
463
464         if (dev->power.syscore)
465                 goto Out;
466
467         if (dev->pm_domain) {
468                 info = "noirq power domain ";
469                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
470         } else if (dev->type && dev->type->pm) {
471                 info = "noirq type ";
472                 callback = pm_noirq_op(dev->type->pm, state);
473         } else if (dev->class && dev->class->pm) {
474                 info = "noirq class ";
475                 callback = pm_noirq_op(dev->class->pm, state);
476         } else if (dev->bus && dev->bus->pm) {
477                 info = "noirq bus ";
478                 callback = pm_noirq_op(dev->bus->pm, state);
479         }
480
481         if (!callback && dev->driver && dev->driver->pm) {
482                 info = "noirq driver ";
483                 callback = pm_noirq_op(dev->driver->pm, state);
484         }
485
486         error = dpm_run_callback(callback, dev, state, info);
487
488  Out:
489         TRACE_RESUME(error);
490         return error;
491 }
492
493 /**
494  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
495  * @state: PM transition of the system being carried out.
496  *
497  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
498  * enable device drivers to receive interrupts.
499  */
500 static void dpm_resume_noirq(pm_message_t state)
501 {
502         ktime_t starttime = ktime_get();
503
504         mutex_lock(&dpm_list_mtx);
505         while (!list_empty(&dpm_noirq_list)) {
506                 struct device *dev = to_device(dpm_noirq_list.next);
507                 int error;
508
509                 get_device(dev);
510                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
511                 mutex_unlock(&dpm_list_mtx);
512
513                 error = device_resume_noirq(dev, state);
514                 if (error) {
515                         suspend_stats.failed_resume_noirq++;
516                         dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
517                         dpm_save_failed_dev(dev_name(dev));
518                         pm_dev_err(dev, state, " noirq", error);
519                 }
520
521                 mutex_lock(&dpm_list_mtx);
522                 put_device(dev);
523         }
524         mutex_unlock(&dpm_list_mtx);
525         dpm_show_time(starttime, state, "noirq");
526         resume_device_irqs();
527         cpuidle_resume();
528 }
529
530 /**
531  * device_resume_early - Execute an "early resume" callback for given device.
532  * @dev: Device to handle.
533  * @state: PM transition of the system being carried out.
534  *
535  * Runtime PM is disabled for @dev while this function is being executed.
536  */
537 static int device_resume_early(struct device *dev, pm_message_t state)
538 {
539         pm_callback_t callback = NULL;
540         char *info = NULL;
541         int error = 0;
542
543         TRACE_DEVICE(dev);
544         TRACE_RESUME(0);
545
546         if (dev->power.syscore)
547                 goto Out;
548
549         if (dev->pm_domain) {
550                 info = "early power domain ";
551                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
552         } else if (dev->type && dev->type->pm) {
553                 info = "early type ";
554                 callback = pm_late_early_op(dev->type->pm, state);
555         } else if (dev->class && dev->class->pm) {
556                 info = "early class ";
557                 callback = pm_late_early_op(dev->class->pm, state);
558         } else if (dev->bus && dev->bus->pm) {
559                 info = "early bus ";
560                 callback = pm_late_early_op(dev->bus->pm, state);
561         }
562
563         if (!callback && dev->driver && dev->driver->pm) {
564                 info = "early driver ";
565                 callback = pm_late_early_op(dev->driver->pm, state);
566         }
567
568         error = dpm_run_callback(callback, dev, state, info);
569
570  Out:
571         TRACE_RESUME(error);
572
573         pm_runtime_enable(dev);
574         return error;
575 }
576
577 /**
578  * dpm_resume_early - Execute "early resume" callbacks for all devices.
579  * @state: PM transition of the system being carried out.
580  */
581 static void dpm_resume_early(pm_message_t state)
582 {
583         ktime_t starttime = ktime_get();
584
585         mutex_lock(&dpm_list_mtx);
586         while (!list_empty(&dpm_late_early_list)) {
587                 struct device *dev = to_device(dpm_late_early_list.next);
588                 int error;
589
590                 get_device(dev);
591                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
592                 mutex_unlock(&dpm_list_mtx);
593
594                 error = device_resume_early(dev, state);
595                 if (error) {
596                         suspend_stats.failed_resume_early++;
597                         dpm_save_failed_step(SUSPEND_RESUME_EARLY);
598                         dpm_save_failed_dev(dev_name(dev));
599                         pm_dev_err(dev, state, " early", error);
600                 }
601
602                 mutex_lock(&dpm_list_mtx);
603                 put_device(dev);
604         }
605         mutex_unlock(&dpm_list_mtx);
606         dpm_show_time(starttime, state, "early");
607 }
608
609 /**
610  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
611  * @state: PM transition of the system being carried out.
612  */
613 void dpm_resume_start(pm_message_t state)
614 {
615         dpm_resume_noirq(state);
616         dpm_resume_early(state);
617 }
618 EXPORT_SYMBOL_GPL(dpm_resume_start);
619
620 /**
621  * device_resume - Execute "resume" callbacks for given device.
622  * @dev: Device to handle.
623  * @state: PM transition of the system being carried out.
624  * @async: If true, the device is being resumed asynchronously.
625  */
626 static int device_resume(struct device *dev, pm_message_t state, bool async)
627 {
628         pm_callback_t callback = NULL;
629         char *info = NULL;
630         int error = 0;
631         struct dpm_watchdog wd;
632
633         TRACE_DEVICE(dev);
634         TRACE_RESUME(0);
635
636         if (dev->power.syscore)
637                 goto Complete;
638
639         dpm_wait(dev->parent, async);
640         device_lock(dev);
641
642         /*
643          * This is a fib.  But we'll allow new children to be added below
644          * a resumed device, even if the device hasn't been completed yet.
645          */
646         dev->power.is_prepared = false;
647         dpm_wd_set(&wd, dev);
648
649         if (!dev->power.is_suspended)
650                 goto Unlock;
651
652         if (dev->pm_domain) {
653                 info = "power domain ";
654                 callback = pm_op(&dev->pm_domain->ops, state);
655                 goto Driver;
656         }
657
658         if (dev->type && dev->type->pm) {
659                 info = "type ";
660                 callback = pm_op(dev->type->pm, state);
661                 goto Driver;
662         }
663
664         if (dev->class) {
665                 if (dev->class->pm) {
666                         info = "class ";
667                         callback = pm_op(dev->class->pm, state);
668                         goto Driver;
669                 } else if (dev->class->resume) {
670                         info = "legacy class ";
671                         callback = dev->class->resume;
672                         goto End;
673                 }
674         }
675
676         if (dev->bus) {
677                 if (dev->bus->pm) {
678                         info = "bus ";
679                         callback = pm_op(dev->bus->pm, state);
680                 } else if (dev->bus->resume) {
681                         info = "legacy bus ";
682                         callback = dev->bus->resume;
683                         goto End;
684                 }
685         }
686
687  Driver:
688         if (!callback && dev->driver && dev->driver->pm) {
689                 info = "driver ";
690                 callback = pm_op(dev->driver->pm, state);
691         }
692
693  End:
694         error = dpm_run_callback(callback, dev, state, info);
695         dev->power.is_suspended = false;
696
697  Unlock:
698         device_unlock(dev);
699         dpm_wd_clear(&wd);
700
701  Complete:
702         complete_all(&dev->power.completion);
703
704         TRACE_RESUME(error);
705
706         return error;
707 }
708
709 static void async_resume(void *data, async_cookie_t cookie)
710 {
711         struct device *dev = (struct device *)data;
712         int error;
713
714         error = device_resume(dev, pm_transition, true);
715         if (error)
716                 pm_dev_err(dev, pm_transition, " async", error);
717         put_device(dev);
718 }
719
720 static bool is_async(struct device *dev)
721 {
722         return dev->power.async_suspend && pm_async_enabled
723                 && !pm_trace_is_enabled();
724 }
725
726 /**
727  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
728  * @state: PM transition of the system being carried out.
729  *
730  * Execute the appropriate "resume" callback for all devices whose status
731  * indicates that they are suspended.
732  */
733 void dpm_resume(pm_message_t state)
734 {
735         struct device *dev;
736         ktime_t starttime = ktime_get();
737
738         might_sleep();
739
740         mutex_lock(&dpm_list_mtx);
741         pm_transition = state;
742         async_error = 0;
743
744         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
745                 INIT_COMPLETION(dev->power.completion);
746                 if (is_async(dev)) {
747                         get_device(dev);
748                         async_schedule(async_resume, dev);
749                 }
750         }
751
752         while (!list_empty(&dpm_suspended_list)) {
753                 dev = to_device(dpm_suspended_list.next);
754                 get_device(dev);
755                 if (!is_async(dev)) {
756                         int error;
757
758                         mutex_unlock(&dpm_list_mtx);
759
760                         error = device_resume(dev, state, false);
761                         if (error) {
762                                 suspend_stats.failed_resume++;
763                                 dpm_save_failed_step(SUSPEND_RESUME);
764                                 dpm_save_failed_dev(dev_name(dev));
765                                 pm_dev_err(dev, state, "", error);
766                         }
767
768                         mutex_lock(&dpm_list_mtx);
769                 }
770                 if (!list_empty(&dev->power.entry))
771                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
772                 put_device(dev);
773         }
774         mutex_unlock(&dpm_list_mtx);
775         async_synchronize_full();
776         dpm_show_time(starttime, state, NULL);
777 }
778
779 /**
780  * device_complete - Complete a PM transition for given device.
781  * @dev: Device to handle.
782  * @state: PM transition of the system being carried out.
783  */
784 static void device_complete(struct device *dev, pm_message_t state)
785 {
786         void (*callback)(struct device *) = NULL;
787         char *info = NULL;
788
789         if (dev->power.syscore)
790                 return;
791
792         device_lock(dev);
793
794         if (dev->pm_domain) {
795                 info = "completing power domain ";
796                 callback = dev->pm_domain->ops.complete;
797         } else if (dev->type && dev->type->pm) {
798                 info = "completing type ";
799                 callback = dev->type->pm->complete;
800         } else if (dev->class && dev->class->pm) {
801                 info = "completing class ";
802                 callback = dev->class->pm->complete;
803         } else if (dev->bus && dev->bus->pm) {
804                 info = "completing bus ";
805                 callback = dev->bus->pm->complete;
806         }
807
808         if (!callback && dev->driver && dev->driver->pm) {
809                 info = "completing driver ";
810                 callback = dev->driver->pm->complete;
811         }
812
813         if (callback) {
814                 pm_dev_dbg(dev, state, info);
815                 callback(dev);
816         }
817
818         device_unlock(dev);
819
820         pm_runtime_put(dev);
821 }
822
823 /**
824  * dpm_complete - Complete a PM transition for all non-sysdev devices.
825  * @state: PM transition of the system being carried out.
826  *
827  * Execute the ->complete() callbacks for all devices whose PM status is not
828  * DPM_ON (this allows new devices to be registered).
829  */
830 void dpm_complete(pm_message_t state)
831 {
832         struct list_head list;
833
834         might_sleep();
835
836         INIT_LIST_HEAD(&list);
837         mutex_lock(&dpm_list_mtx);
838         while (!list_empty(&dpm_prepared_list)) {
839                 struct device *dev = to_device(dpm_prepared_list.prev);
840
841                 get_device(dev);
842                 dev->power.is_prepared = false;
843                 list_move(&dev->power.entry, &list);
844                 mutex_unlock(&dpm_list_mtx);
845
846                 device_complete(dev, state);
847
848                 mutex_lock(&dpm_list_mtx);
849                 put_device(dev);
850         }
851         list_splice(&list, &dpm_list);
852         mutex_unlock(&dpm_list_mtx);
853 }
854
855 /**
856  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
857  * @state: PM transition of the system being carried out.
858  *
859  * Execute "resume" callbacks for all devices and complete the PM transition of
860  * the system.
861  */
862 void dpm_resume_end(pm_message_t state)
863 {
864         dpm_resume(state);
865         dpm_complete(state);
866 }
867 EXPORT_SYMBOL_GPL(dpm_resume_end);
868
869
870 /*------------------------- Suspend routines -------------------------*/
871
872 /**
873  * resume_event - Return a "resume" message for given "suspend" sleep state.
874  * @sleep_state: PM message representing a sleep state.
875  *
876  * Return a PM message representing the resume event corresponding to given
877  * sleep state.
878  */
879 static pm_message_t resume_event(pm_message_t sleep_state)
880 {
881         switch (sleep_state.event) {
882         case PM_EVENT_SUSPEND:
883                 return PMSG_RESUME;
884         case PM_EVENT_FREEZE:
885         case PM_EVENT_QUIESCE:
886                 return PMSG_RECOVER;
887         case PM_EVENT_HIBERNATE:
888                 return PMSG_RESTORE;
889         }
890         return PMSG_ON;
891 }
892
893 /**
894  * device_suspend_noirq - Execute a "late suspend" callback for given device.
895  * @dev: Device to handle.
896  * @state: PM transition of the system being carried out.
897  *
898  * The driver of @dev will not receive interrupts while this function is being
899  * executed.
900  */
901 static int device_suspend_noirq(struct device *dev, pm_message_t state)
902 {
903         pm_callback_t callback = NULL;
904         char *info = NULL;
905
906         if (dev->power.syscore)
907                 return 0;
908
909         if (dev->pm_domain) {
910                 info = "noirq power domain ";
911                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
912         } else if (dev->type && dev->type->pm) {
913                 info = "noirq type ";
914                 callback = pm_noirq_op(dev->type->pm, state);
915         } else if (dev->class && dev->class->pm) {
916                 info = "noirq class ";
917                 callback = pm_noirq_op(dev->class->pm, state);
918         } else if (dev->bus && dev->bus->pm) {
919                 info = "noirq bus ";
920                 callback = pm_noirq_op(dev->bus->pm, state);
921         }
922
923         if (!callback && dev->driver && dev->driver->pm) {
924                 info = "noirq driver ";
925                 callback = pm_noirq_op(dev->driver->pm, state);
926         }
927
928         return dpm_run_callback(callback, dev, state, info);
929 }
930
931 /**
932  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
933  * @state: PM transition of the system being carried out.
934  *
935  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
936  * handlers for all non-sysdev devices.
937  */
938 static int dpm_suspend_noirq(pm_message_t state)
939 {
940         ktime_t starttime = ktime_get();
941         int error = 0;
942
943         cpuidle_pause();
944         suspend_device_irqs();
945         mutex_lock(&dpm_list_mtx);
946         while (!list_empty(&dpm_late_early_list)) {
947                 struct device *dev = to_device(dpm_late_early_list.prev);
948
949                 get_device(dev);
950                 mutex_unlock(&dpm_list_mtx);
951
952                 error = device_suspend_noirq(dev, state);
953
954                 mutex_lock(&dpm_list_mtx);
955                 if (error) {
956                         pm_dev_err(dev, state, " noirq", error);
957                         suspend_stats.failed_suspend_noirq++;
958                         dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
959                         dpm_save_failed_dev(dev_name(dev));
960                         put_device(dev);
961                         break;
962                 }
963                 if (!list_empty(&dev->power.entry))
964                         list_move(&dev->power.entry, &dpm_noirq_list);
965                 put_device(dev);
966
967                 if (pm_wakeup_pending()) {
968                         error = -EBUSY;
969                         break;
970                 }
971         }
972         mutex_unlock(&dpm_list_mtx);
973         if (error)
974                 dpm_resume_noirq(resume_event(state));
975         else
976                 dpm_show_time(starttime, state, "noirq");
977         return error;
978 }
979
980 /**
981  * device_suspend_late - Execute a "late suspend" callback for given device.
982  * @dev: Device to handle.
983  * @state: PM transition of the system being carried out.
984  *
985  * Runtime PM is disabled for @dev while this function is being executed.
986  */
987 static int device_suspend_late(struct device *dev, pm_message_t state)
988 {
989         pm_callback_t callback = NULL;
990         char *info = NULL;
991
992         __pm_runtime_disable(dev, false);
993
994         if (dev->power.syscore)
995                 return 0;
996
997         if (dev->pm_domain) {
998                 info = "late power domain ";
999                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1000         } else if (dev->type && dev->type->pm) {
1001                 info = "late type ";
1002                 callback = pm_late_early_op(dev->type->pm, state);
1003         } else if (dev->class && dev->class->pm) {
1004                 info = "late class ";
1005                 callback = pm_late_early_op(dev->class->pm, state);
1006         } else if (dev->bus && dev->bus->pm) {
1007                 info = "late bus ";
1008                 callback = pm_late_early_op(dev->bus->pm, state);
1009         }
1010
1011         if (!callback && dev->driver && dev->driver->pm) {
1012                 info = "late driver ";
1013                 callback = pm_late_early_op(dev->driver->pm, state);
1014         }
1015
1016         return dpm_run_callback(callback, dev, state, info);
1017 }
1018
1019 /**
1020  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1021  * @state: PM transition of the system being carried out.
1022  */
1023 static int dpm_suspend_late(pm_message_t state)
1024 {
1025         ktime_t starttime = ktime_get();
1026         int error = 0;
1027
1028         mutex_lock(&dpm_list_mtx);
1029         while (!list_empty(&dpm_suspended_list)) {
1030                 struct device *dev = to_device(dpm_suspended_list.prev);
1031
1032                 get_device(dev);
1033                 mutex_unlock(&dpm_list_mtx);
1034
1035                 error = device_suspend_late(dev, state);
1036
1037                 mutex_lock(&dpm_list_mtx);
1038                 if (error) {
1039                         pm_dev_err(dev, state, " late", error);
1040                         suspend_stats.failed_suspend_late++;
1041                         dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1042                         dpm_save_failed_dev(dev_name(dev));
1043                         put_device(dev);
1044                         break;
1045                 }
1046                 if (!list_empty(&dev->power.entry))
1047                         list_move(&dev->power.entry, &dpm_late_early_list);
1048                 put_device(dev);
1049
1050                 if (pm_wakeup_pending()) {
1051                         error = -EBUSY;
1052                         break;
1053                 }
1054         }
1055         mutex_unlock(&dpm_list_mtx);
1056         if (error)
1057                 dpm_resume_early(resume_event(state));
1058         else
1059                 dpm_show_time(starttime, state, "late");
1060
1061         return error;
1062 }
1063
1064 /**
1065  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1066  * @state: PM transition of the system being carried out.
1067  */
1068 int dpm_suspend_end(pm_message_t state)
1069 {
1070         int error = dpm_suspend_late(state);
1071         if (error)
1072                 return error;
1073
1074         error = dpm_suspend_noirq(state);
1075         if (error) {
1076                 dpm_resume_early(resume_event(state));
1077                 return error;
1078         }
1079
1080         return 0;
1081 }
1082 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1083
1084 /**
1085  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1086  * @dev: Device to suspend.
1087  * @state: PM transition of the system being carried out.
1088  * @cb: Suspend callback to execute.
1089  */
1090 static int legacy_suspend(struct device *dev, pm_message_t state,
1091                           int (*cb)(struct device *dev, pm_message_t state))
1092 {
1093         int error;
1094         ktime_t calltime;
1095
1096         calltime = initcall_debug_start(dev);
1097
1098         error = cb(dev, state);
1099         suspend_report_result(cb, error);
1100
1101         initcall_debug_report(dev, calltime, error);
1102
1103         return error;
1104 }
1105
1106 /**
1107  * device_suspend - Execute "suspend" callbacks for given device.
1108  * @dev: Device to handle.
1109  * @state: PM transition of the system being carried out.
1110  * @async: If true, the device is being suspended asynchronously.
1111  */
1112 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1113 {
1114         pm_callback_t callback = NULL;
1115         char *info = NULL;
1116         int error = 0;
1117         struct dpm_watchdog wd;
1118
1119         dpm_wait_for_children(dev, async);
1120
1121         if (async_error)
1122                 goto Complete;
1123
1124         /*
1125          * If a device configured to wake up the system from sleep states
1126          * has been suspended at run time and there's a resume request pending
1127          * for it, this is equivalent to the device signaling wakeup, so the
1128          * system suspend operation should be aborted.
1129          */
1130         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1131                 pm_wakeup_event(dev, 0);
1132
1133         if (pm_wakeup_pending()) {
1134                 async_error = -EBUSY;
1135                 goto Complete;
1136         }
1137
1138         if (dev->power.syscore)
1139                 goto Complete;
1140         
1141         dpm_wd_set(&wd, dev);
1142
1143         device_lock(dev);
1144
1145         if (dev->pm_domain) {
1146                 info = "power domain ";
1147                 callback = pm_op(&dev->pm_domain->ops, state);
1148                 goto Run;
1149         }
1150
1151         if (dev->type && dev->type->pm) {
1152                 info = "type ";
1153                 callback = pm_op(dev->type->pm, state);
1154                 goto Run;
1155         }
1156
1157         if (dev->class) {
1158                 if (dev->class->pm) {
1159                         info = "class ";
1160                         callback = pm_op(dev->class->pm, state);
1161                         goto Run;
1162                 } else if (dev->class->suspend) {
1163                         pm_dev_dbg(dev, state, "legacy class ");
1164                         error = legacy_suspend(dev, state, dev->class->suspend);
1165                         goto End;
1166                 }
1167         }
1168
1169         if (dev->bus) {
1170                 if (dev->bus->pm) {
1171                         info = "bus ";
1172                         callback = pm_op(dev->bus->pm, state);
1173                 } else if (dev->bus->suspend) {
1174                         pm_dev_dbg(dev, state, "legacy bus ");
1175                         error = legacy_suspend(dev, state, dev->bus->suspend);
1176                         goto End;
1177                 }
1178         }
1179
1180  Run:
1181         if (!callback && dev->driver && dev->driver->pm) {
1182                 info = "driver ";
1183                 callback = pm_op(dev->driver->pm, state);
1184         }
1185
1186         error = dpm_run_callback(callback, dev, state, info);
1187
1188  End:
1189         if (!error) {
1190                 dev->power.is_suspended = true;
1191                 if (dev->power.wakeup_path
1192                     && dev->parent && !dev->parent->power.ignore_children)
1193                         dev->parent->power.wakeup_path = true;
1194         }
1195
1196         device_unlock(dev);
1197
1198         dpm_wd_clear(&wd);
1199
1200  Complete:
1201         complete_all(&dev->power.completion);
1202         if (error)
1203                 async_error = error;
1204
1205         return error;
1206 }
1207
1208 static void async_suspend(void *data, async_cookie_t cookie)
1209 {
1210         struct device *dev = (struct device *)data;
1211         int error;
1212
1213         error = __device_suspend(dev, pm_transition, true);
1214         if (error) {
1215                 dpm_save_failed_dev(dev_name(dev));
1216                 pm_dev_err(dev, pm_transition, " async", error);
1217         }
1218
1219         put_device(dev);
1220 }
1221
1222 static int device_suspend(struct device *dev)
1223 {
1224         INIT_COMPLETION(dev->power.completion);
1225
1226         if (pm_async_enabled && dev->power.async_suspend) {
1227                 get_device(dev);
1228                 async_schedule(async_suspend, dev);
1229                 return 0;
1230         }
1231
1232         return __device_suspend(dev, pm_transition, false);
1233 }
1234
1235 /**
1236  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1237  * @state: PM transition of the system being carried out.
1238  */
1239 int dpm_suspend(pm_message_t state)
1240 {
1241         ktime_t starttime = ktime_get();
1242         int error = 0;
1243
1244         might_sleep();
1245
1246         mutex_lock(&dpm_list_mtx);
1247         pm_transition = state;
1248         async_error = 0;
1249         while (!list_empty(&dpm_prepared_list)) {
1250                 struct device *dev = to_device(dpm_prepared_list.prev);
1251
1252                 get_device(dev);
1253                 mutex_unlock(&dpm_list_mtx);
1254
1255                 error = device_suspend(dev);
1256
1257                 mutex_lock(&dpm_list_mtx);
1258                 if (error) {
1259                         pm_dev_err(dev, state, "", error);
1260                         dpm_save_failed_dev(dev_name(dev));
1261                         put_device(dev);
1262                         break;
1263                 }
1264                 if (!list_empty(&dev->power.entry))
1265                         list_move(&dev->power.entry, &dpm_suspended_list);
1266                 put_device(dev);
1267                 if (async_error)
1268                         break;
1269         }
1270         mutex_unlock(&dpm_list_mtx);
1271         async_synchronize_full();
1272         if (!error)
1273                 error = async_error;
1274         if (error) {
1275                 suspend_stats.failed_suspend++;
1276                 dpm_save_failed_step(SUSPEND_SUSPEND);
1277         } else
1278                 dpm_show_time(starttime, state, NULL);
1279         return error;
1280 }
1281
1282 /**
1283  * device_prepare - Prepare a device for system power transition.
1284  * @dev: Device to handle.
1285  * @state: PM transition of the system being carried out.
1286  *
1287  * Execute the ->prepare() callback(s) for given device.  No new children of the
1288  * device may be registered after this function has returned.
1289  */
1290 static int device_prepare(struct device *dev, pm_message_t state)
1291 {
1292         int (*callback)(struct device *) = NULL;
1293         char *info = NULL;
1294         int error = 0;
1295
1296         if (dev->power.syscore)
1297                 return 0;
1298
1299         /*
1300          * If a device's parent goes into runtime suspend at the wrong time,
1301          * it won't be possible to resume the device.  To prevent this we
1302          * block runtime suspend here, during the prepare phase, and allow
1303          * it again during the complete phase.
1304          */
1305         pm_runtime_get_noresume(dev);
1306
1307         device_lock(dev);
1308
1309         dev->power.wakeup_path = device_may_wakeup(dev);
1310
1311         if (dev->pm_domain) {
1312                 info = "preparing power domain ";
1313                 callback = dev->pm_domain->ops.prepare;
1314         } else if (dev->type && dev->type->pm) {
1315                 info = "preparing type ";
1316                 callback = dev->type->pm->prepare;
1317         } else if (dev->class && dev->class->pm) {
1318                 info = "preparing class ";
1319                 callback = dev->class->pm->prepare;
1320         } else if (dev->bus && dev->bus->pm) {
1321                 info = "preparing bus ";
1322                 callback = dev->bus->pm->prepare;
1323         }
1324
1325         if (!callback && dev->driver && dev->driver->pm) {
1326                 info = "preparing driver ";
1327                 callback = dev->driver->pm->prepare;
1328         }
1329
1330         if (callback) {
1331                 error = callback(dev);
1332                 suspend_report_result(callback, error);
1333         }
1334
1335         device_unlock(dev);
1336
1337         return error;
1338 }
1339
1340 /**
1341  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1342  * @state: PM transition of the system being carried out.
1343  *
1344  * Execute the ->prepare() callback(s) for all devices.
1345  */
1346 int dpm_prepare(pm_message_t state)
1347 {
1348         int error = 0;
1349
1350         might_sleep();
1351
1352         mutex_lock(&dpm_list_mtx);
1353         while (!list_empty(&dpm_list)) {
1354                 struct device *dev = to_device(dpm_list.next);
1355
1356                 get_device(dev);
1357                 mutex_unlock(&dpm_list_mtx);
1358
1359                 error = device_prepare(dev, state);
1360
1361                 mutex_lock(&dpm_list_mtx);
1362                 if (error) {
1363                         if (error == -EAGAIN) {
1364                                 put_device(dev);
1365                                 error = 0;
1366                                 continue;
1367                         }
1368                         printk(KERN_INFO "PM: Device %s not prepared "
1369                                 "for power transition: code %d\n",
1370                                 dev_name(dev), error);
1371                         put_device(dev);
1372                         break;
1373                 }
1374                 dev->power.is_prepared = true;
1375                 if (!list_empty(&dev->power.entry))
1376                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1377                 put_device(dev);
1378         }
1379         mutex_unlock(&dpm_list_mtx);
1380         return error;
1381 }
1382
1383 /**
1384  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1385  * @state: PM transition of the system being carried out.
1386  *
1387  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1388  * callbacks for them.
1389  */
1390 int dpm_suspend_start(pm_message_t state)
1391 {
1392         int error;
1393
1394         error = dpm_prepare(state);
1395         if (error) {
1396                 suspend_stats.failed_prepare++;
1397                 dpm_save_failed_step(SUSPEND_PREPARE);
1398         } else
1399                 error = dpm_suspend(state);
1400         return error;
1401 }
1402 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1403
1404 void __suspend_report_result(const char *function, void *fn, int ret)
1405 {
1406         if (ret)
1407                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1408 }
1409 EXPORT_SYMBOL_GPL(__suspend_report_result);
1410
1411 /**
1412  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1413  * @dev: Device to wait for.
1414  * @subordinate: Device that needs to wait for @dev.
1415  */
1416 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1417 {
1418         dpm_wait(dev, subordinate->power.async_suspend);
1419         return async_error;
1420 }
1421 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1422
1423 /**
1424  * dpm_for_each_dev - device iterator.
1425  * @data: data for the callback.
1426  * @fn: function to be called for each device.
1427  *
1428  * Iterate over devices in dpm_list, and call @fn for each device,
1429  * passing it @data.
1430  */
1431 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1432 {
1433         struct device *dev;
1434
1435         if (!fn)
1436                 return;
1437
1438         device_pm_lock();
1439         list_for_each_entry(dev, &dpm_list, power.entry)
1440                 fn(dev, data);
1441         device_pm_unlock();
1442 }
1443 EXPORT_SYMBOL_GPL(dpm_for_each_dev);