Merge branch 'linux-linaro-lsk' into linux-linaro-lsk-android
[firefly-linux-kernel-4.4.55.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/resume-trace.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/async.h>
30 #include <linux/suspend.h>
31 #include <trace/events/power.h>
32 #include <linux/cpufreq.h>
33 #include <linux/cpuidle.h>
34 #include <linux/timer.h>
35
36 #include "../base.h"
37 #include "power.h"
38
39 typedef int (*pm_callback_t)(struct device *);
40
41 /*
42  * The entries in the dpm_list list are in a depth first order, simply
43  * because children are guaranteed to be discovered after parents, and
44  * are inserted at the back of the list on discovery.
45  *
46  * Since device_pm_add() may be called with a device lock held,
47  * we must never try to acquire a device lock while holding
48  * dpm_list_mutex.
49  */
50
51 LIST_HEAD(dpm_list);
52 static LIST_HEAD(dpm_prepared_list);
53 static LIST_HEAD(dpm_suspended_list);
54 static LIST_HEAD(dpm_late_early_list);
55 static LIST_HEAD(dpm_noirq_list);
56
57 struct suspend_stats suspend_stats;
58 static DEFINE_MUTEX(dpm_list_mtx);
59 static pm_message_t pm_transition;
60
61 struct dpm_watchdog {
62         struct device           *dev;
63         struct task_struct      *tsk;
64         struct timer_list       timer;
65 };
66
67 static int async_error;
68
69 /**
70  * device_pm_sleep_init - Initialize system suspend-related device fields.
71  * @dev: Device object being initialized.
72  */
73 void device_pm_sleep_init(struct device *dev)
74 {
75         dev->power.is_prepared = false;
76         dev->power.is_suspended = false;
77         init_completion(&dev->power.completion);
78         complete_all(&dev->power.completion);
79         dev->power.wakeup = NULL;
80         INIT_LIST_HEAD(&dev->power.entry);
81 }
82
83 /**
84  * device_pm_lock - Lock the list of active devices used by the PM core.
85  */
86 void device_pm_lock(void)
87 {
88         mutex_lock(&dpm_list_mtx);
89 }
90
91 /**
92  * device_pm_unlock - Unlock the list of active devices used by the PM core.
93  */
94 void device_pm_unlock(void)
95 {
96         mutex_unlock(&dpm_list_mtx);
97 }
98
99 /**
100  * device_pm_add - Add a device to the PM core's list of active devices.
101  * @dev: Device to add to the list.
102  */
103 void device_pm_add(struct device *dev)
104 {
105         pr_debug("PM: Adding info for %s:%s\n",
106                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
107         mutex_lock(&dpm_list_mtx);
108         if (dev->parent && dev->parent->power.is_prepared)
109                 dev_warn(dev, "parent %s should not be sleeping\n",
110                         dev_name(dev->parent));
111         list_add_tail(&dev->power.entry, &dpm_list);
112         mutex_unlock(&dpm_list_mtx);
113 }
114
115 /**
116  * device_pm_remove - Remove a device from the PM core's list of active devices.
117  * @dev: Device to be removed from the list.
118  */
119 void device_pm_remove(struct device *dev)
120 {
121         pr_debug("PM: Removing info for %s:%s\n",
122                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
123         complete_all(&dev->power.completion);
124         mutex_lock(&dpm_list_mtx);
125         list_del_init(&dev->power.entry);
126         mutex_unlock(&dpm_list_mtx);
127         device_wakeup_disable(dev);
128         pm_runtime_remove(dev);
129 }
130
131 /**
132  * device_pm_move_before - Move device in the PM core's list of active devices.
133  * @deva: Device to move in dpm_list.
134  * @devb: Device @deva should come before.
135  */
136 void device_pm_move_before(struct device *deva, struct device *devb)
137 {
138         pr_debug("PM: Moving %s:%s before %s:%s\n",
139                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
140                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
141         /* Delete deva from dpm_list and reinsert before devb. */
142         list_move_tail(&deva->power.entry, &devb->power.entry);
143 }
144
145 /**
146  * device_pm_move_after - Move device in the PM core's list of active devices.
147  * @deva: Device to move in dpm_list.
148  * @devb: Device @deva should come after.
149  */
150 void device_pm_move_after(struct device *deva, struct device *devb)
151 {
152         pr_debug("PM: Moving %s:%s after %s:%s\n",
153                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
154                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
155         /* Delete deva from dpm_list and reinsert after devb. */
156         list_move(&deva->power.entry, &devb->power.entry);
157 }
158
159 /**
160  * device_pm_move_last - Move device to end of the PM core's list of devices.
161  * @dev: Device to move in dpm_list.
162  */
163 void device_pm_move_last(struct device *dev)
164 {
165         pr_debug("PM: Moving %s:%s to end of list\n",
166                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
167         list_move_tail(&dev->power.entry, &dpm_list);
168 }
169
170 static ktime_t initcall_debug_start(struct device *dev)
171 {
172         ktime_t calltime = ktime_set(0, 0);
173
174         if (pm_print_times_enabled) {
175                 pr_info("calling  %s+ @ %i, parent: %s\n",
176                         dev_name(dev), task_pid_nr(current),
177                         dev->parent ? dev_name(dev->parent) : "none");
178                 calltime = ktime_get();
179         }
180
181         return calltime;
182 }
183
184 static void initcall_debug_report(struct device *dev, ktime_t calltime,
185                                   int error)
186 {
187         ktime_t delta, rettime;
188
189         if (pm_print_times_enabled) {
190                 rettime = ktime_get();
191                 delta = ktime_sub(rettime, calltime);
192                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
193                         error, (unsigned long long)ktime_to_ns(delta) >> 10);
194         }
195 }
196
197 /**
198  * dpm_wait - Wait for a PM operation to complete.
199  * @dev: Device to wait for.
200  * @async: If unset, wait only if the device's power.async_suspend flag is set.
201  */
202 static void dpm_wait(struct device *dev, bool async)
203 {
204         if (!dev)
205                 return;
206
207         if (async || (pm_async_enabled && dev->power.async_suspend))
208                 wait_for_completion(&dev->power.completion);
209 }
210
211 static int dpm_wait_fn(struct device *dev, void *async_ptr)
212 {
213         dpm_wait(dev, *((bool *)async_ptr));
214         return 0;
215 }
216
217 static void dpm_wait_for_children(struct device *dev, bool async)
218 {
219        device_for_each_child(dev, &async, dpm_wait_fn);
220 }
221
222 /**
223  * pm_op - Return the PM operation appropriate for given PM event.
224  * @ops: PM operations to choose from.
225  * @state: PM transition of the system being carried out.
226  */
227 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
228 {
229         switch (state.event) {
230 #ifdef CONFIG_SUSPEND
231         case PM_EVENT_SUSPEND:
232                 return ops->suspend;
233         case PM_EVENT_RESUME:
234                 return ops->resume;
235 #endif /* CONFIG_SUSPEND */
236 #ifdef CONFIG_HIBERNATE_CALLBACKS
237         case PM_EVENT_FREEZE:
238         case PM_EVENT_QUIESCE:
239                 return ops->freeze;
240         case PM_EVENT_HIBERNATE:
241                 return ops->poweroff;
242         case PM_EVENT_THAW:
243         case PM_EVENT_RECOVER:
244                 return ops->thaw;
245                 break;
246         case PM_EVENT_RESTORE:
247                 return ops->restore;
248 #endif /* CONFIG_HIBERNATE_CALLBACKS */
249         }
250
251         return NULL;
252 }
253
254 /**
255  * pm_late_early_op - Return the PM operation appropriate for given PM event.
256  * @ops: PM operations to choose from.
257  * @state: PM transition of the system being carried out.
258  *
259  * Runtime PM is disabled for @dev while this function is being executed.
260  */
261 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
262                                       pm_message_t state)
263 {
264         switch (state.event) {
265 #ifdef CONFIG_SUSPEND
266         case PM_EVENT_SUSPEND:
267                 return ops->suspend_late;
268         case PM_EVENT_RESUME:
269                 return ops->resume_early;
270 #endif /* CONFIG_SUSPEND */
271 #ifdef CONFIG_HIBERNATE_CALLBACKS
272         case PM_EVENT_FREEZE:
273         case PM_EVENT_QUIESCE:
274                 return ops->freeze_late;
275         case PM_EVENT_HIBERNATE:
276                 return ops->poweroff_late;
277         case PM_EVENT_THAW:
278         case PM_EVENT_RECOVER:
279                 return ops->thaw_early;
280         case PM_EVENT_RESTORE:
281                 return ops->restore_early;
282 #endif /* CONFIG_HIBERNATE_CALLBACKS */
283         }
284
285         return NULL;
286 }
287
288 /**
289  * pm_noirq_op - Return the PM operation appropriate for given PM event.
290  * @ops: PM operations to choose from.
291  * @state: PM transition of the system being carried out.
292  *
293  * The driver of @dev will not receive interrupts while this function is being
294  * executed.
295  */
296 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
297 {
298         switch (state.event) {
299 #ifdef CONFIG_SUSPEND
300         case PM_EVENT_SUSPEND:
301                 return ops->suspend_noirq;
302         case PM_EVENT_RESUME:
303                 return ops->resume_noirq;
304 #endif /* CONFIG_SUSPEND */
305 #ifdef CONFIG_HIBERNATE_CALLBACKS
306         case PM_EVENT_FREEZE:
307         case PM_EVENT_QUIESCE:
308                 return ops->freeze_noirq;
309         case PM_EVENT_HIBERNATE:
310                 return ops->poweroff_noirq;
311         case PM_EVENT_THAW:
312         case PM_EVENT_RECOVER:
313                 return ops->thaw_noirq;
314         case PM_EVENT_RESTORE:
315                 return ops->restore_noirq;
316 #endif /* CONFIG_HIBERNATE_CALLBACKS */
317         }
318
319         return NULL;
320 }
321
322 static char *pm_verb(int event)
323 {
324         switch (event) {
325         case PM_EVENT_SUSPEND:
326                 return "suspend";
327         case PM_EVENT_RESUME:
328                 return "resume";
329         case PM_EVENT_FREEZE:
330                 return "freeze";
331         case PM_EVENT_QUIESCE:
332                 return "quiesce";
333         case PM_EVENT_HIBERNATE:
334                 return "hibernate";
335         case PM_EVENT_THAW:
336                 return "thaw";
337         case PM_EVENT_RESTORE:
338                 return "restore";
339         case PM_EVENT_RECOVER:
340                 return "recover";
341         default:
342                 return "(unknown PM event)";
343         }
344 }
345
346 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
347 {
348         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
349                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
350                 ", may wakeup" : "");
351 }
352
353 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
354                         int error)
355 {
356         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
357                 dev_name(dev), pm_verb(state.event), info, error);
358 }
359
360 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
361 {
362         ktime_t calltime;
363         u64 usecs64;
364         int usecs;
365
366         calltime = ktime_get();
367         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
368         do_div(usecs64, NSEC_PER_USEC);
369         usecs = usecs64;
370         if (usecs == 0)
371                 usecs = 1;
372         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
373                 info ?: "", info ? " " : "", pm_verb(state.event),
374                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
375 }
376
377 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
378                             pm_message_t state, char *info)
379 {
380         ktime_t calltime;
381         int error;
382
383         if (!cb)
384                 return 0;
385
386         calltime = initcall_debug_start(dev);
387
388         pm_dev_dbg(dev, state, info);
389         error = cb(dev);
390         suspend_report_result(cb, error);
391
392         initcall_debug_report(dev, calltime, error);
393
394         return error;
395 }
396
397 /**
398  * dpm_wd_handler - Driver suspend / resume watchdog handler.
399  *
400  * Called when a driver has timed out suspending or resuming.
401  * There's not much we can do here to recover so BUG() out for
402  * a crash-dump
403  */
404 static void dpm_wd_handler(unsigned long data)
405 {
406         struct dpm_watchdog *wd = (void *)data;
407         struct device *dev      = wd->dev;
408         struct task_struct *tsk = wd->tsk;
409
410         dev_emerg(dev, "**** DPM device timeout ****\n");
411         show_stack(tsk, NULL);
412
413         BUG();
414 }
415
416 /**
417  * dpm_wd_set - Enable pm watchdog for given device.
418  * @wd: Watchdog. Must be allocated on the stack.
419  * @dev: Device to handle.
420  */
421 static void dpm_wd_set(struct dpm_watchdog *wd, struct device *dev)
422 {
423         struct timer_list *timer = &wd->timer;
424
425         wd->dev = dev;
426         wd->tsk = get_current();
427
428         init_timer_on_stack(timer);
429         timer->expires = jiffies + HZ * 12;
430         timer->function = dpm_wd_handler;
431         timer->data = (unsigned long)wd;
432         add_timer(timer);
433 }
434
435 /**
436  * dpm_wd_clear - Disable pm watchdog.
437  * @wd: Watchdog to disable.
438  */
439 static void dpm_wd_clear(struct dpm_watchdog *wd)
440 {
441         struct timer_list *timer = &wd->timer;
442
443         del_timer_sync(timer);
444         destroy_timer_on_stack(timer);
445 }
446
447 /*------------------------- Resume routines -------------------------*/
448
449 /**
450  * device_resume_noirq - Execute an "early resume" callback for given device.
451  * @dev: Device to handle.
452  * @state: PM transition of the system being carried out.
453  *
454  * The driver of @dev will not receive interrupts while this function is being
455  * executed.
456  */
457 static int device_resume_noirq(struct device *dev, pm_message_t state)
458 {
459         pm_callback_t callback = NULL;
460         char *info = NULL;
461         int error = 0;
462
463         TRACE_DEVICE(dev);
464         TRACE_RESUME(0);
465
466         if (dev->power.syscore)
467                 goto Out;
468
469         if (dev->pm_domain) {
470                 info = "noirq power domain ";
471                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
472         } else if (dev->type && dev->type->pm) {
473                 info = "noirq type ";
474                 callback = pm_noirq_op(dev->type->pm, state);
475         } else if (dev->class && dev->class->pm) {
476                 info = "noirq class ";
477                 callback = pm_noirq_op(dev->class->pm, state);
478         } else if (dev->bus && dev->bus->pm) {
479                 info = "noirq bus ";
480                 callback = pm_noirq_op(dev->bus->pm, state);
481         }
482
483         if (!callback && dev->driver && dev->driver->pm) {
484                 info = "noirq driver ";
485                 callback = pm_noirq_op(dev->driver->pm, state);
486         }
487
488         error = dpm_run_callback(callback, dev, state, info);
489
490  Out:
491         TRACE_RESUME(error);
492         return error;
493 }
494
495 /**
496  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
497  * @state: PM transition of the system being carried out.
498  *
499  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
500  * enable device drivers to receive interrupts.
501  */
502 static void dpm_resume_noirq(pm_message_t state)
503 {
504         ktime_t starttime = ktime_get();
505
506         mutex_lock(&dpm_list_mtx);
507         while (!list_empty(&dpm_noirq_list)) {
508                 struct device *dev = to_device(dpm_noirq_list.next);
509                 int error;
510
511                 get_device(dev);
512                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
513                 mutex_unlock(&dpm_list_mtx);
514
515                 error = device_resume_noirq(dev, state);
516                 if (error) {
517                         suspend_stats.failed_resume_noirq++;
518                         dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
519                         dpm_save_failed_dev(dev_name(dev));
520                         pm_dev_err(dev, state, " noirq", error);
521                 }
522
523                 mutex_lock(&dpm_list_mtx);
524                 put_device(dev);
525         }
526         mutex_unlock(&dpm_list_mtx);
527         dpm_show_time(starttime, state, "noirq");
528         resume_device_irqs();
529         cpuidle_resume();
530 }
531
532 /**
533  * device_resume_early - Execute an "early resume" callback for given device.
534  * @dev: Device to handle.
535  * @state: PM transition of the system being carried out.
536  *
537  * Runtime PM is disabled for @dev while this function is being executed.
538  */
539 static int device_resume_early(struct device *dev, pm_message_t state)
540 {
541         pm_callback_t callback = NULL;
542         char *info = NULL;
543         int error = 0;
544
545         TRACE_DEVICE(dev);
546         TRACE_RESUME(0);
547
548         if (dev->power.syscore)
549                 goto Out;
550
551         if (dev->pm_domain) {
552                 info = "early power domain ";
553                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
554         } else if (dev->type && dev->type->pm) {
555                 info = "early type ";
556                 callback = pm_late_early_op(dev->type->pm, state);
557         } else if (dev->class && dev->class->pm) {
558                 info = "early class ";
559                 callback = pm_late_early_op(dev->class->pm, state);
560         } else if (dev->bus && dev->bus->pm) {
561                 info = "early bus ";
562                 callback = pm_late_early_op(dev->bus->pm, state);
563         }
564
565         if (!callback && dev->driver && dev->driver->pm) {
566                 info = "early driver ";
567                 callback = pm_late_early_op(dev->driver->pm, state);
568         }
569
570         error = dpm_run_callback(callback, dev, state, info);
571
572  Out:
573         TRACE_RESUME(error);
574
575         pm_runtime_enable(dev);
576         return error;
577 }
578
579 /**
580  * dpm_resume_early - Execute "early resume" callbacks for all devices.
581  * @state: PM transition of the system being carried out.
582  */
583 static void dpm_resume_early(pm_message_t state)
584 {
585         ktime_t starttime = ktime_get();
586
587         mutex_lock(&dpm_list_mtx);
588         while (!list_empty(&dpm_late_early_list)) {
589                 struct device *dev = to_device(dpm_late_early_list.next);
590                 int error;
591
592                 get_device(dev);
593                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
594                 mutex_unlock(&dpm_list_mtx);
595
596                 error = device_resume_early(dev, state);
597                 if (error) {
598                         suspend_stats.failed_resume_early++;
599                         dpm_save_failed_step(SUSPEND_RESUME_EARLY);
600                         dpm_save_failed_dev(dev_name(dev));
601                         pm_dev_err(dev, state, " early", error);
602                 }
603
604                 mutex_lock(&dpm_list_mtx);
605                 put_device(dev);
606         }
607         mutex_unlock(&dpm_list_mtx);
608         dpm_show_time(starttime, state, "early");
609 }
610
611 /**
612  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
613  * @state: PM transition of the system being carried out.
614  */
615 void dpm_resume_start(pm_message_t state)
616 {
617         dpm_resume_noirq(state);
618         dpm_resume_early(state);
619 }
620 EXPORT_SYMBOL_GPL(dpm_resume_start);
621
622 /**
623  * device_resume - Execute "resume" callbacks for given device.
624  * @dev: Device to handle.
625  * @state: PM transition of the system being carried out.
626  * @async: If true, the device is being resumed asynchronously.
627  */
628 static int device_resume(struct device *dev, pm_message_t state, bool async)
629 {
630         pm_callback_t callback = NULL;
631         char *info = NULL;
632         int error = 0;
633         struct dpm_watchdog wd;
634
635         TRACE_DEVICE(dev);
636         TRACE_RESUME(0);
637
638         if (dev->power.syscore)
639                 goto Complete;
640
641         dpm_wait(dev->parent, async);
642         device_lock(dev);
643
644         /*
645          * This is a fib.  But we'll allow new children to be added below
646          * a resumed device, even if the device hasn't been completed yet.
647          */
648         dev->power.is_prepared = false;
649         dpm_wd_set(&wd, dev);
650
651         if (!dev->power.is_suspended)
652                 goto Unlock;
653
654         if (dev->pm_domain) {
655                 info = "power domain ";
656                 callback = pm_op(&dev->pm_domain->ops, state);
657                 goto Driver;
658         }
659
660         if (dev->type && dev->type->pm) {
661                 info = "type ";
662                 callback = pm_op(dev->type->pm, state);
663                 goto Driver;
664         }
665
666         if (dev->class) {
667                 if (dev->class->pm) {
668                         info = "class ";
669                         callback = pm_op(dev->class->pm, state);
670                         goto Driver;
671                 } else if (dev->class->resume) {
672                         info = "legacy class ";
673                         callback = dev->class->resume;
674                         goto End;
675                 }
676         }
677
678         if (dev->bus) {
679                 if (dev->bus->pm) {
680                         info = "bus ";
681                         callback = pm_op(dev->bus->pm, state);
682                 } else if (dev->bus->resume) {
683                         info = "legacy bus ";
684                         callback = dev->bus->resume;
685                         goto End;
686                 }
687         }
688
689  Driver:
690         if (!callback && dev->driver && dev->driver->pm) {
691                 info = "driver ";
692                 callback = pm_op(dev->driver->pm, state);
693         }
694
695  End:
696         error = dpm_run_callback(callback, dev, state, info);
697         dev->power.is_suspended = false;
698
699  Unlock:
700         device_unlock(dev);
701         dpm_wd_clear(&wd);
702
703  Complete:
704         complete_all(&dev->power.completion);
705
706         TRACE_RESUME(error);
707
708         return error;
709 }
710
711 static void async_resume(void *data, async_cookie_t cookie)
712 {
713         struct device *dev = (struct device *)data;
714         int error;
715
716         error = device_resume(dev, pm_transition, true);
717         if (error)
718                 pm_dev_err(dev, pm_transition, " async", error);
719         put_device(dev);
720 }
721
722 static bool is_async(struct device *dev)
723 {
724         return dev->power.async_suspend && pm_async_enabled
725                 && !pm_trace_is_enabled();
726 }
727
728 /**
729  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
730  * @state: PM transition of the system being carried out.
731  *
732  * Execute the appropriate "resume" callback for all devices whose status
733  * indicates that they are suspended.
734  */
735 void dpm_resume(pm_message_t state)
736 {
737         struct device *dev;
738         ktime_t starttime = ktime_get();
739
740         might_sleep();
741
742         mutex_lock(&dpm_list_mtx);
743         pm_transition = state;
744         async_error = 0;
745
746         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
747                 INIT_COMPLETION(dev->power.completion);
748                 if (is_async(dev)) {
749                         get_device(dev);
750                         async_schedule(async_resume, dev);
751                 }
752         }
753
754         while (!list_empty(&dpm_suspended_list)) {
755                 dev = to_device(dpm_suspended_list.next);
756                 get_device(dev);
757                 if (!is_async(dev)) {
758                         int error;
759
760                         mutex_unlock(&dpm_list_mtx);
761
762                         error = device_resume(dev, state, false);
763                         if (error) {
764                                 suspend_stats.failed_resume++;
765                                 dpm_save_failed_step(SUSPEND_RESUME);
766                                 dpm_save_failed_dev(dev_name(dev));
767                                 pm_dev_err(dev, state, "", error);
768                         }
769
770                         mutex_lock(&dpm_list_mtx);
771                 }
772                 if (!list_empty(&dev->power.entry))
773                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
774                 put_device(dev);
775         }
776         mutex_unlock(&dpm_list_mtx);
777         async_synchronize_full();
778         dpm_show_time(starttime, state, NULL);
779
780         cpufreq_resume();
781 }
782
783 /**
784  * device_complete - Complete a PM transition for given device.
785  * @dev: Device to handle.
786  * @state: PM transition of the system being carried out.
787  */
788 static void device_complete(struct device *dev, pm_message_t state)
789 {
790         void (*callback)(struct device *) = NULL;
791         char *info = NULL;
792
793         if (dev->power.syscore)
794                 return;
795
796         device_lock(dev);
797
798         if (dev->pm_domain) {
799                 info = "completing power domain ";
800                 callback = dev->pm_domain->ops.complete;
801         } else if (dev->type && dev->type->pm) {
802                 info = "completing type ";
803                 callback = dev->type->pm->complete;
804         } else if (dev->class && dev->class->pm) {
805                 info = "completing class ";
806                 callback = dev->class->pm->complete;
807         } else if (dev->bus && dev->bus->pm) {
808                 info = "completing bus ";
809                 callback = dev->bus->pm->complete;
810         }
811
812         if (!callback && dev->driver && dev->driver->pm) {
813                 info = "completing driver ";
814                 callback = dev->driver->pm->complete;
815         }
816
817         if (callback) {
818                 pm_dev_dbg(dev, state, info);
819                 callback(dev);
820         }
821
822         device_unlock(dev);
823
824         pm_runtime_put(dev);
825 }
826
827 /**
828  * dpm_complete - Complete a PM transition for all non-sysdev devices.
829  * @state: PM transition of the system being carried out.
830  *
831  * Execute the ->complete() callbacks for all devices whose PM status is not
832  * DPM_ON (this allows new devices to be registered).
833  */
834 void dpm_complete(pm_message_t state)
835 {
836         struct list_head list;
837
838         might_sleep();
839
840         INIT_LIST_HEAD(&list);
841         mutex_lock(&dpm_list_mtx);
842         while (!list_empty(&dpm_prepared_list)) {
843                 struct device *dev = to_device(dpm_prepared_list.prev);
844
845                 get_device(dev);
846                 dev->power.is_prepared = false;
847                 list_move(&dev->power.entry, &list);
848                 mutex_unlock(&dpm_list_mtx);
849
850                 device_complete(dev, state);
851
852                 mutex_lock(&dpm_list_mtx);
853                 put_device(dev);
854         }
855         list_splice(&list, &dpm_list);
856         mutex_unlock(&dpm_list_mtx);
857 }
858
859 /**
860  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
861  * @state: PM transition of the system being carried out.
862  *
863  * Execute "resume" callbacks for all devices and complete the PM transition of
864  * the system.
865  */
866 void dpm_resume_end(pm_message_t state)
867 {
868         dpm_resume(state);
869         dpm_complete(state);
870 }
871 EXPORT_SYMBOL_GPL(dpm_resume_end);
872
873
874 /*------------------------- Suspend routines -------------------------*/
875
876 /**
877  * resume_event - Return a "resume" message for given "suspend" sleep state.
878  * @sleep_state: PM message representing a sleep state.
879  *
880  * Return a PM message representing the resume event corresponding to given
881  * sleep state.
882  */
883 static pm_message_t resume_event(pm_message_t sleep_state)
884 {
885         switch (sleep_state.event) {
886         case PM_EVENT_SUSPEND:
887                 return PMSG_RESUME;
888         case PM_EVENT_FREEZE:
889         case PM_EVENT_QUIESCE:
890                 return PMSG_RECOVER;
891         case PM_EVENT_HIBERNATE:
892                 return PMSG_RESTORE;
893         }
894         return PMSG_ON;
895 }
896
897 /**
898  * device_suspend_noirq - Execute a "late suspend" callback for given device.
899  * @dev: Device to handle.
900  * @state: PM transition of the system being carried out.
901  *
902  * The driver of @dev will not receive interrupts while this function is being
903  * executed.
904  */
905 static int device_suspend_noirq(struct device *dev, pm_message_t state)
906 {
907         pm_callback_t callback = NULL;
908         char *info = NULL;
909
910         if (dev->power.syscore)
911                 return 0;
912
913         if (dev->pm_domain) {
914                 info = "noirq power domain ";
915                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
916         } else if (dev->type && dev->type->pm) {
917                 info = "noirq type ";
918                 callback = pm_noirq_op(dev->type->pm, state);
919         } else if (dev->class && dev->class->pm) {
920                 info = "noirq class ";
921                 callback = pm_noirq_op(dev->class->pm, state);
922         } else if (dev->bus && dev->bus->pm) {
923                 info = "noirq bus ";
924                 callback = pm_noirq_op(dev->bus->pm, state);
925         }
926
927         if (!callback && dev->driver && dev->driver->pm) {
928                 info = "noirq driver ";
929                 callback = pm_noirq_op(dev->driver->pm, state);
930         }
931
932         return dpm_run_callback(callback, dev, state, info);
933 }
934
935 /**
936  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
937  * @state: PM transition of the system being carried out.
938  *
939  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
940  * handlers for all non-sysdev devices.
941  */
942 static int dpm_suspend_noirq(pm_message_t state)
943 {
944         ktime_t starttime = ktime_get();
945         int error = 0;
946
947         cpuidle_pause();
948         suspend_device_irqs();
949         mutex_lock(&dpm_list_mtx);
950         while (!list_empty(&dpm_late_early_list)) {
951                 struct device *dev = to_device(dpm_late_early_list.prev);
952
953                 get_device(dev);
954                 mutex_unlock(&dpm_list_mtx);
955
956                 error = device_suspend_noirq(dev, state);
957
958                 mutex_lock(&dpm_list_mtx);
959                 if (error) {
960                         pm_dev_err(dev, state, " noirq", error);
961                         suspend_stats.failed_suspend_noirq++;
962                         dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
963                         dpm_save_failed_dev(dev_name(dev));
964                         put_device(dev);
965                         break;
966                 }
967                 if (!list_empty(&dev->power.entry))
968                         list_move(&dev->power.entry, &dpm_noirq_list);
969                 put_device(dev);
970
971                 if (pm_wakeup_pending()) {
972                         error = -EBUSY;
973                         break;
974                 }
975         }
976         mutex_unlock(&dpm_list_mtx);
977         if (error)
978                 dpm_resume_noirq(resume_event(state));
979         else
980                 dpm_show_time(starttime, state, "noirq");
981         return error;
982 }
983
984 /**
985  * device_suspend_late - Execute a "late suspend" callback for given device.
986  * @dev: Device to handle.
987  * @state: PM transition of the system being carried out.
988  *
989  * Runtime PM is disabled for @dev while this function is being executed.
990  */
991 static int device_suspend_late(struct device *dev, pm_message_t state)
992 {
993         pm_callback_t callback = NULL;
994         char *info = NULL;
995
996         __pm_runtime_disable(dev, false);
997
998         if (dev->power.syscore)
999                 return 0;
1000
1001         if (dev->pm_domain) {
1002                 info = "late power domain ";
1003                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1004         } else if (dev->type && dev->type->pm) {
1005                 info = "late type ";
1006                 callback = pm_late_early_op(dev->type->pm, state);
1007         } else if (dev->class && dev->class->pm) {
1008                 info = "late class ";
1009                 callback = pm_late_early_op(dev->class->pm, state);
1010         } else if (dev->bus && dev->bus->pm) {
1011                 info = "late bus ";
1012                 callback = pm_late_early_op(dev->bus->pm, state);
1013         }
1014
1015         if (!callback && dev->driver && dev->driver->pm) {
1016                 info = "late driver ";
1017                 callback = pm_late_early_op(dev->driver->pm, state);
1018         }
1019
1020         return dpm_run_callback(callback, dev, state, info);
1021 }
1022
1023 /**
1024  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1025  * @state: PM transition of the system being carried out.
1026  */
1027 static int dpm_suspend_late(pm_message_t state)
1028 {
1029         ktime_t starttime = ktime_get();
1030         int error = 0;
1031
1032         mutex_lock(&dpm_list_mtx);
1033         while (!list_empty(&dpm_suspended_list)) {
1034                 struct device *dev = to_device(dpm_suspended_list.prev);
1035
1036                 get_device(dev);
1037                 mutex_unlock(&dpm_list_mtx);
1038
1039                 error = device_suspend_late(dev, state);
1040
1041                 mutex_lock(&dpm_list_mtx);
1042                 if (error) {
1043                         pm_dev_err(dev, state, " late", error);
1044                         suspend_stats.failed_suspend_late++;
1045                         dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1046                         dpm_save_failed_dev(dev_name(dev));
1047                         put_device(dev);
1048                         break;
1049                 }
1050                 if (!list_empty(&dev->power.entry))
1051                         list_move(&dev->power.entry, &dpm_late_early_list);
1052                 put_device(dev);
1053
1054                 if (pm_wakeup_pending()) {
1055                         error = -EBUSY;
1056                         break;
1057                 }
1058         }
1059         mutex_unlock(&dpm_list_mtx);
1060         if (error)
1061                 dpm_resume_early(resume_event(state));
1062         else
1063                 dpm_show_time(starttime, state, "late");
1064
1065         return error;
1066 }
1067
1068 /**
1069  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1070  * @state: PM transition of the system being carried out.
1071  */
1072 int dpm_suspend_end(pm_message_t state)
1073 {
1074         int error = dpm_suspend_late(state);
1075         if (error)
1076                 return error;
1077
1078         error = dpm_suspend_noirq(state);
1079         if (error) {
1080                 dpm_resume_early(resume_event(state));
1081                 return error;
1082         }
1083
1084         return 0;
1085 }
1086 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1087
1088 /**
1089  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1090  * @dev: Device to suspend.
1091  * @state: PM transition of the system being carried out.
1092  * @cb: Suspend callback to execute.
1093  */
1094 static int legacy_suspend(struct device *dev, pm_message_t state,
1095                           int (*cb)(struct device *dev, pm_message_t state))
1096 {
1097         int error;
1098         ktime_t calltime;
1099
1100         calltime = initcall_debug_start(dev);
1101
1102         error = cb(dev, state);
1103         suspend_report_result(cb, error);
1104
1105         initcall_debug_report(dev, calltime, error);
1106
1107         return error;
1108 }
1109
1110 /**
1111  * device_suspend - Execute "suspend" callbacks for given device.
1112  * @dev: Device to handle.
1113  * @state: PM transition of the system being carried out.
1114  * @async: If true, the device is being suspended asynchronously.
1115  */
1116 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1117 {
1118         pm_callback_t callback = NULL;
1119         char *info = NULL;
1120         int error = 0;
1121         struct dpm_watchdog wd;
1122
1123         dpm_wait_for_children(dev, async);
1124
1125         if (async_error)
1126                 goto Complete;
1127
1128         /*
1129          * If a device configured to wake up the system from sleep states
1130          * has been suspended at run time and there's a resume request pending
1131          * for it, this is equivalent to the device signaling wakeup, so the
1132          * system suspend operation should be aborted.
1133          */
1134         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1135                 pm_wakeup_event(dev, 0);
1136
1137         if (pm_wakeup_pending()) {
1138                 async_error = -EBUSY;
1139                 goto Complete;
1140         }
1141
1142         if (dev->power.syscore)
1143                 goto Complete;
1144         
1145         dpm_wd_set(&wd, dev);
1146
1147         device_lock(dev);
1148
1149         if (dev->pm_domain) {
1150                 info = "power domain ";
1151                 callback = pm_op(&dev->pm_domain->ops, state);
1152                 goto Run;
1153         }
1154
1155         if (dev->type && dev->type->pm) {
1156                 info = "type ";
1157                 callback = pm_op(dev->type->pm, state);
1158                 goto Run;
1159         }
1160
1161         if (dev->class) {
1162                 if (dev->class->pm) {
1163                         info = "class ";
1164                         callback = pm_op(dev->class->pm, state);
1165                         goto Run;
1166                 } else if (dev->class->suspend) {
1167                         pm_dev_dbg(dev, state, "legacy class ");
1168                         error = legacy_suspend(dev, state, dev->class->suspend);
1169                         goto End;
1170                 }
1171         }
1172
1173         if (dev->bus) {
1174                 if (dev->bus->pm) {
1175                         info = "bus ";
1176                         callback = pm_op(dev->bus->pm, state);
1177                 } else if (dev->bus->suspend) {
1178                         pm_dev_dbg(dev, state, "legacy bus ");
1179                         error = legacy_suspend(dev, state, dev->bus->suspend);
1180                         goto End;
1181                 }
1182         }
1183
1184  Run:
1185         if (!callback && dev->driver && dev->driver->pm) {
1186                 info = "driver ";
1187                 callback = pm_op(dev->driver->pm, state);
1188         }
1189
1190         error = dpm_run_callback(callback, dev, state, info);
1191
1192  End:
1193         if (!error) {
1194                 dev->power.is_suspended = true;
1195                 if (dev->power.wakeup_path
1196                     && dev->parent && !dev->parent->power.ignore_children)
1197                         dev->parent->power.wakeup_path = true;
1198         }
1199
1200         device_unlock(dev);
1201
1202         dpm_wd_clear(&wd);
1203
1204  Complete:
1205         complete_all(&dev->power.completion);
1206         if (error)
1207                 async_error = error;
1208
1209         return error;
1210 }
1211
1212 static void async_suspend(void *data, async_cookie_t cookie)
1213 {
1214         struct device *dev = (struct device *)data;
1215         int error;
1216
1217         error = __device_suspend(dev, pm_transition, true);
1218         if (error) {
1219                 dpm_save_failed_dev(dev_name(dev));
1220                 pm_dev_err(dev, pm_transition, " async", error);
1221         }
1222
1223         put_device(dev);
1224 }
1225
1226 static int device_suspend(struct device *dev)
1227 {
1228         INIT_COMPLETION(dev->power.completion);
1229
1230         if (pm_async_enabled && dev->power.async_suspend) {
1231                 get_device(dev);
1232                 async_schedule(async_suspend, dev);
1233                 return 0;
1234         }
1235
1236         return __device_suspend(dev, pm_transition, false);
1237 }
1238
1239 /**
1240  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1241  * @state: PM transition of the system being carried out.
1242  */
1243 int dpm_suspend(pm_message_t state)
1244 {
1245         ktime_t starttime = ktime_get();
1246         int error = 0;
1247
1248         might_sleep();
1249
1250         cpufreq_suspend();
1251
1252         mutex_lock(&dpm_list_mtx);
1253         pm_transition = state;
1254         async_error = 0;
1255         while (!list_empty(&dpm_prepared_list)) {
1256                 struct device *dev = to_device(dpm_prepared_list.prev);
1257
1258                 get_device(dev);
1259                 mutex_unlock(&dpm_list_mtx);
1260
1261                 error = device_suspend(dev);
1262
1263                 mutex_lock(&dpm_list_mtx);
1264                 if (error) {
1265                         pm_dev_err(dev, state, "", error);
1266                         dpm_save_failed_dev(dev_name(dev));
1267                         put_device(dev);
1268                         break;
1269                 }
1270                 if (!list_empty(&dev->power.entry))
1271                         list_move(&dev->power.entry, &dpm_suspended_list);
1272                 put_device(dev);
1273                 if (async_error)
1274                         break;
1275         }
1276         mutex_unlock(&dpm_list_mtx);
1277         async_synchronize_full();
1278         if (!error)
1279                 error = async_error;
1280         if (error) {
1281                 suspend_stats.failed_suspend++;
1282                 dpm_save_failed_step(SUSPEND_SUSPEND);
1283         } else
1284                 dpm_show_time(starttime, state, NULL);
1285         return error;
1286 }
1287
1288 /**
1289  * device_prepare - Prepare a device for system power transition.
1290  * @dev: Device to handle.
1291  * @state: PM transition of the system being carried out.
1292  *
1293  * Execute the ->prepare() callback(s) for given device.  No new children of the
1294  * device may be registered after this function has returned.
1295  */
1296 static int device_prepare(struct device *dev, pm_message_t state)
1297 {
1298         int (*callback)(struct device *) = NULL;
1299         char *info = NULL;
1300         int error = 0;
1301
1302         if (dev->power.syscore)
1303                 return 0;
1304
1305         /*
1306          * If a device's parent goes into runtime suspend at the wrong time,
1307          * it won't be possible to resume the device.  To prevent this we
1308          * block runtime suspend here, during the prepare phase, and allow
1309          * it again during the complete phase.
1310          */
1311         pm_runtime_get_noresume(dev);
1312
1313         device_lock(dev);
1314
1315         dev->power.wakeup_path = device_may_wakeup(dev);
1316
1317         if (dev->pm_domain) {
1318                 info = "preparing power domain ";
1319                 callback = dev->pm_domain->ops.prepare;
1320         } else if (dev->type && dev->type->pm) {
1321                 info = "preparing type ";
1322                 callback = dev->type->pm->prepare;
1323         } else if (dev->class && dev->class->pm) {
1324                 info = "preparing class ";
1325                 callback = dev->class->pm->prepare;
1326         } else if (dev->bus && dev->bus->pm) {
1327                 info = "preparing bus ";
1328                 callback = dev->bus->pm->prepare;
1329         }
1330
1331         if (!callback && dev->driver && dev->driver->pm) {
1332                 info = "preparing driver ";
1333                 callback = dev->driver->pm->prepare;
1334         }
1335
1336         if (callback) {
1337                 error = callback(dev);
1338                 suspend_report_result(callback, error);
1339         }
1340
1341         device_unlock(dev);
1342
1343         return error;
1344 }
1345
1346 /**
1347  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1348  * @state: PM transition of the system being carried out.
1349  *
1350  * Execute the ->prepare() callback(s) for all devices.
1351  */
1352 int dpm_prepare(pm_message_t state)
1353 {
1354         int error = 0;
1355
1356         might_sleep();
1357
1358         mutex_lock(&dpm_list_mtx);
1359         while (!list_empty(&dpm_list)) {
1360                 struct device *dev = to_device(dpm_list.next);
1361
1362                 get_device(dev);
1363                 mutex_unlock(&dpm_list_mtx);
1364
1365                 error = device_prepare(dev, state);
1366
1367                 mutex_lock(&dpm_list_mtx);
1368                 if (error) {
1369                         if (error == -EAGAIN) {
1370                                 put_device(dev);
1371                                 error = 0;
1372                                 continue;
1373                         }
1374                         printk(KERN_INFO "PM: Device %s not prepared "
1375                                 "for power transition: code %d\n",
1376                                 dev_name(dev), error);
1377                         put_device(dev);
1378                         break;
1379                 }
1380                 dev->power.is_prepared = true;
1381                 if (!list_empty(&dev->power.entry))
1382                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1383                 put_device(dev);
1384         }
1385         mutex_unlock(&dpm_list_mtx);
1386         return error;
1387 }
1388
1389 /**
1390  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1391  * @state: PM transition of the system being carried out.
1392  *
1393  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1394  * callbacks for them.
1395  */
1396 int dpm_suspend_start(pm_message_t state)
1397 {
1398         int error;
1399
1400         error = dpm_prepare(state);
1401         if (error) {
1402                 suspend_stats.failed_prepare++;
1403                 dpm_save_failed_step(SUSPEND_PREPARE);
1404         } else
1405                 error = dpm_suspend(state);
1406         return error;
1407 }
1408 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1409
1410 void __suspend_report_result(const char *function, void *fn, int ret)
1411 {
1412         if (ret)
1413                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1414 }
1415 EXPORT_SYMBOL_GPL(__suspend_report_result);
1416
1417 /**
1418  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1419  * @dev: Device to wait for.
1420  * @subordinate: Device that needs to wait for @dev.
1421  */
1422 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1423 {
1424         dpm_wait(dev, subordinate->power.async_suspend);
1425         return async_error;
1426 }
1427 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1428
1429 /**
1430  * dpm_for_each_dev - device iterator.
1431  * @data: data for the callback.
1432  * @fn: function to be called for each device.
1433  *
1434  * Iterate over devices in dpm_list, and call @fn for each device,
1435  * passing it @data.
1436  */
1437 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1438 {
1439         struct device *dev;
1440
1441         if (!fn)
1442                 return;
1443
1444         device_pm_lock();
1445         list_for_each_entry(dev, &dpm_list, power.entry)
1446                 fn(dev, data);
1447         device_pm_unlock();
1448 }
1449 EXPORT_SYMBOL_GPL(dpm_for_each_dev);