Merge remote-tracking branch 'lsk/v3.10/topic/mm' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/resume-trace.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/async.h>
30 #include <linux/suspend.h>
31 #include <trace/events/power.h>
32 #include <linux/cpufreq.h>
33 #include <linux/cpuidle.h>
34 #include "../base.h"
35 #include "power.h"
36
37 typedef int (*pm_callback_t)(struct device *);
38
39 /*
40  * The entries in the dpm_list list are in a depth first order, simply
41  * because children are guaranteed to be discovered after parents, and
42  * are inserted at the back of the list on discovery.
43  *
44  * Since device_pm_add() may be called with a device lock held,
45  * we must never try to acquire a device lock while holding
46  * dpm_list_mutex.
47  */
48
49 LIST_HEAD(dpm_list);
50 static LIST_HEAD(dpm_prepared_list);
51 static LIST_HEAD(dpm_suspended_list);
52 static LIST_HEAD(dpm_late_early_list);
53 static LIST_HEAD(dpm_noirq_list);
54
55 struct suspend_stats suspend_stats;
56 static DEFINE_MUTEX(dpm_list_mtx);
57 static pm_message_t pm_transition;
58
59 static int async_error;
60
61 /**
62  * device_pm_sleep_init - Initialize system suspend-related device fields.
63  * @dev: Device object being initialized.
64  */
65 void device_pm_sleep_init(struct device *dev)
66 {
67         dev->power.is_prepared = false;
68         dev->power.is_suspended = false;
69         init_completion(&dev->power.completion);
70         complete_all(&dev->power.completion);
71         dev->power.wakeup = NULL;
72         INIT_LIST_HEAD(&dev->power.entry);
73 }
74
75 /**
76  * device_pm_lock - Lock the list of active devices used by the PM core.
77  */
78 void device_pm_lock(void)
79 {
80         mutex_lock(&dpm_list_mtx);
81 }
82
83 /**
84  * device_pm_unlock - Unlock the list of active devices used by the PM core.
85  */
86 void device_pm_unlock(void)
87 {
88         mutex_unlock(&dpm_list_mtx);
89 }
90
91 /**
92  * device_pm_add - Add a device to the PM core's list of active devices.
93  * @dev: Device to add to the list.
94  */
95 void device_pm_add(struct device *dev)
96 {
97         pr_debug("PM: Adding info for %s:%s\n",
98                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
99         mutex_lock(&dpm_list_mtx);
100         if (dev->parent && dev->parent->power.is_prepared)
101                 dev_warn(dev, "parent %s should not be sleeping\n",
102                         dev_name(dev->parent));
103         list_add_tail(&dev->power.entry, &dpm_list);
104         mutex_unlock(&dpm_list_mtx);
105 }
106
107 /**
108  * device_pm_remove - Remove a device from the PM core's list of active devices.
109  * @dev: Device to be removed from the list.
110  */
111 void device_pm_remove(struct device *dev)
112 {
113         pr_debug("PM: Removing info for %s:%s\n",
114                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
115         complete_all(&dev->power.completion);
116         mutex_lock(&dpm_list_mtx);
117         list_del_init(&dev->power.entry);
118         mutex_unlock(&dpm_list_mtx);
119         device_wakeup_disable(dev);
120         pm_runtime_remove(dev);
121 }
122
123 /**
124  * device_pm_move_before - Move device in the PM core's list of active devices.
125  * @deva: Device to move in dpm_list.
126  * @devb: Device @deva should come before.
127  */
128 void device_pm_move_before(struct device *deva, struct device *devb)
129 {
130         pr_debug("PM: Moving %s:%s before %s:%s\n",
131                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
132                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
133         /* Delete deva from dpm_list and reinsert before devb. */
134         list_move_tail(&deva->power.entry, &devb->power.entry);
135 }
136
137 /**
138  * device_pm_move_after - Move device in the PM core's list of active devices.
139  * @deva: Device to move in dpm_list.
140  * @devb: Device @deva should come after.
141  */
142 void device_pm_move_after(struct device *deva, struct device *devb)
143 {
144         pr_debug("PM: Moving %s:%s after %s:%s\n",
145                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
146                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
147         /* Delete deva from dpm_list and reinsert after devb. */
148         list_move(&deva->power.entry, &devb->power.entry);
149 }
150
151 /**
152  * device_pm_move_last - Move device to end of the PM core's list of devices.
153  * @dev: Device to move in dpm_list.
154  */
155 void device_pm_move_last(struct device *dev)
156 {
157         pr_debug("PM: Moving %s:%s to end of list\n",
158                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
159         list_move_tail(&dev->power.entry, &dpm_list);
160 }
161
162 static ktime_t initcall_debug_start(struct device *dev)
163 {
164         ktime_t calltime = ktime_set(0, 0);
165
166         if (pm_print_times_enabled) {
167                 pr_info("calling  %s+ @ %i, parent: %s\n",
168                         dev_name(dev), task_pid_nr(current),
169                         dev->parent ? dev_name(dev->parent) : "none");
170                 calltime = ktime_get();
171         }
172
173         return calltime;
174 }
175
176 static void initcall_debug_report(struct device *dev, ktime_t calltime,
177                                   int error)
178 {
179         ktime_t delta, rettime;
180
181         if (pm_print_times_enabled) {
182                 rettime = ktime_get();
183                 delta = ktime_sub(rettime, calltime);
184                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
185                         error, (unsigned long long)ktime_to_ns(delta) >> 10);
186         }
187 }
188
189 /**
190  * dpm_wait - Wait for a PM operation to complete.
191  * @dev: Device to wait for.
192  * @async: If unset, wait only if the device's power.async_suspend flag is set.
193  */
194 static void dpm_wait(struct device *dev, bool async)
195 {
196         if (!dev)
197                 return;
198
199         if (async || (pm_async_enabled && dev->power.async_suspend))
200                 wait_for_completion(&dev->power.completion);
201 }
202
203 static int dpm_wait_fn(struct device *dev, void *async_ptr)
204 {
205         dpm_wait(dev, *((bool *)async_ptr));
206         return 0;
207 }
208
209 static void dpm_wait_for_children(struct device *dev, bool async)
210 {
211        device_for_each_child(dev, &async, dpm_wait_fn);
212 }
213
214 /**
215  * pm_op - Return the PM operation appropriate for given PM event.
216  * @ops: PM operations to choose from.
217  * @state: PM transition of the system being carried out.
218  */
219 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
220 {
221         switch (state.event) {
222 #ifdef CONFIG_SUSPEND
223         case PM_EVENT_SUSPEND:
224                 return ops->suspend;
225         case PM_EVENT_RESUME:
226                 return ops->resume;
227 #endif /* CONFIG_SUSPEND */
228 #ifdef CONFIG_HIBERNATE_CALLBACKS
229         case PM_EVENT_FREEZE:
230         case PM_EVENT_QUIESCE:
231                 return ops->freeze;
232         case PM_EVENT_HIBERNATE:
233                 return ops->poweroff;
234         case PM_EVENT_THAW:
235         case PM_EVENT_RECOVER:
236                 return ops->thaw;
237                 break;
238         case PM_EVENT_RESTORE:
239                 return ops->restore;
240 #endif /* CONFIG_HIBERNATE_CALLBACKS */
241         }
242
243         return NULL;
244 }
245
246 /**
247  * pm_late_early_op - Return the PM operation appropriate for given PM event.
248  * @ops: PM operations to choose from.
249  * @state: PM transition of the system being carried out.
250  *
251  * Runtime PM is disabled for @dev while this function is being executed.
252  */
253 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
254                                       pm_message_t state)
255 {
256         switch (state.event) {
257 #ifdef CONFIG_SUSPEND
258         case PM_EVENT_SUSPEND:
259                 return ops->suspend_late;
260         case PM_EVENT_RESUME:
261                 return ops->resume_early;
262 #endif /* CONFIG_SUSPEND */
263 #ifdef CONFIG_HIBERNATE_CALLBACKS
264         case PM_EVENT_FREEZE:
265         case PM_EVENT_QUIESCE:
266                 return ops->freeze_late;
267         case PM_EVENT_HIBERNATE:
268                 return ops->poweroff_late;
269         case PM_EVENT_THAW:
270         case PM_EVENT_RECOVER:
271                 return ops->thaw_early;
272         case PM_EVENT_RESTORE:
273                 return ops->restore_early;
274 #endif /* CONFIG_HIBERNATE_CALLBACKS */
275         }
276
277         return NULL;
278 }
279
280 /**
281  * pm_noirq_op - Return the PM operation appropriate for given PM event.
282  * @ops: PM operations to choose from.
283  * @state: PM transition of the system being carried out.
284  *
285  * The driver of @dev will not receive interrupts while this function is being
286  * executed.
287  */
288 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
289 {
290         switch (state.event) {
291 #ifdef CONFIG_SUSPEND
292         case PM_EVENT_SUSPEND:
293                 return ops->suspend_noirq;
294         case PM_EVENT_RESUME:
295                 return ops->resume_noirq;
296 #endif /* CONFIG_SUSPEND */
297 #ifdef CONFIG_HIBERNATE_CALLBACKS
298         case PM_EVENT_FREEZE:
299         case PM_EVENT_QUIESCE:
300                 return ops->freeze_noirq;
301         case PM_EVENT_HIBERNATE:
302                 return ops->poweroff_noirq;
303         case PM_EVENT_THAW:
304         case PM_EVENT_RECOVER:
305                 return ops->thaw_noirq;
306         case PM_EVENT_RESTORE:
307                 return ops->restore_noirq;
308 #endif /* CONFIG_HIBERNATE_CALLBACKS */
309         }
310
311         return NULL;
312 }
313
314 static char *pm_verb(int event)
315 {
316         switch (event) {
317         case PM_EVENT_SUSPEND:
318                 return "suspend";
319         case PM_EVENT_RESUME:
320                 return "resume";
321         case PM_EVENT_FREEZE:
322                 return "freeze";
323         case PM_EVENT_QUIESCE:
324                 return "quiesce";
325         case PM_EVENT_HIBERNATE:
326                 return "hibernate";
327         case PM_EVENT_THAW:
328                 return "thaw";
329         case PM_EVENT_RESTORE:
330                 return "restore";
331         case PM_EVENT_RECOVER:
332                 return "recover";
333         default:
334                 return "(unknown PM event)";
335         }
336 }
337
338 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
339 {
340         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
341                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
342                 ", may wakeup" : "");
343 }
344
345 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
346                         int error)
347 {
348         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
349                 dev_name(dev), pm_verb(state.event), info, error);
350 }
351
352 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
353 {
354         ktime_t calltime;
355         u64 usecs64;
356         int usecs;
357
358         calltime = ktime_get();
359         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
360         do_div(usecs64, NSEC_PER_USEC);
361         usecs = usecs64;
362         if (usecs == 0)
363                 usecs = 1;
364         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
365                 info ?: "", info ? " " : "", pm_verb(state.event),
366                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
367 }
368
369 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
370                             pm_message_t state, char *info)
371 {
372         ktime_t calltime;
373         int error;
374
375         if (!cb)
376                 return 0;
377
378         calltime = initcall_debug_start(dev);
379
380         pm_dev_dbg(dev, state, info);
381         error = cb(dev);
382         suspend_report_result(cb, error);
383
384         initcall_debug_report(dev, calltime, error);
385
386         return error;
387 }
388
389 /*------------------------- Resume routines -------------------------*/
390
391 /**
392  * device_resume_noirq - Execute an "early resume" callback for given device.
393  * @dev: Device to handle.
394  * @state: PM transition of the system being carried out.
395  *
396  * The driver of @dev will not receive interrupts while this function is being
397  * executed.
398  */
399 static int device_resume_noirq(struct device *dev, pm_message_t state)
400 {
401         pm_callback_t callback = NULL;
402         char *info = NULL;
403         int error = 0;
404
405         TRACE_DEVICE(dev);
406         TRACE_RESUME(0);
407
408         if (dev->power.syscore)
409                 goto Out;
410
411         if (dev->pm_domain) {
412                 info = "noirq power domain ";
413                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
414         } else if (dev->type && dev->type->pm) {
415                 info = "noirq type ";
416                 callback = pm_noirq_op(dev->type->pm, state);
417         } else if (dev->class && dev->class->pm) {
418                 info = "noirq class ";
419                 callback = pm_noirq_op(dev->class->pm, state);
420         } else if (dev->bus && dev->bus->pm) {
421                 info = "noirq bus ";
422                 callback = pm_noirq_op(dev->bus->pm, state);
423         }
424
425         if (!callback && dev->driver && dev->driver->pm) {
426                 info = "noirq driver ";
427                 callback = pm_noirq_op(dev->driver->pm, state);
428         }
429
430         error = dpm_run_callback(callback, dev, state, info);
431
432  Out:
433         TRACE_RESUME(error);
434         return error;
435 }
436
437 /**
438  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
439  * @state: PM transition of the system being carried out.
440  *
441  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
442  * enable device drivers to receive interrupts.
443  */
444 static void dpm_resume_noirq(pm_message_t state)
445 {
446         ktime_t starttime = ktime_get();
447
448         mutex_lock(&dpm_list_mtx);
449         while (!list_empty(&dpm_noirq_list)) {
450                 struct device *dev = to_device(dpm_noirq_list.next);
451                 int error;
452
453                 get_device(dev);
454                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
455                 mutex_unlock(&dpm_list_mtx);
456
457                 error = device_resume_noirq(dev, state);
458                 if (error) {
459                         suspend_stats.failed_resume_noirq++;
460                         dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
461                         dpm_save_failed_dev(dev_name(dev));
462                         pm_dev_err(dev, state, " noirq", error);
463                 }
464
465                 mutex_lock(&dpm_list_mtx);
466                 put_device(dev);
467         }
468         mutex_unlock(&dpm_list_mtx);
469         dpm_show_time(starttime, state, "noirq");
470         resume_device_irqs();
471         cpuidle_resume();
472 }
473
474 /**
475  * device_resume_early - Execute an "early resume" callback for given device.
476  * @dev: Device to handle.
477  * @state: PM transition of the system being carried out.
478  *
479  * Runtime PM is disabled for @dev while this function is being executed.
480  */
481 static int device_resume_early(struct device *dev, pm_message_t state)
482 {
483         pm_callback_t callback = NULL;
484         char *info = NULL;
485         int error = 0;
486
487         TRACE_DEVICE(dev);
488         TRACE_RESUME(0);
489
490         if (dev->power.syscore)
491                 goto Out;
492
493         if (dev->pm_domain) {
494                 info = "early power domain ";
495                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
496         } else if (dev->type && dev->type->pm) {
497                 info = "early type ";
498                 callback = pm_late_early_op(dev->type->pm, state);
499         } else if (dev->class && dev->class->pm) {
500                 info = "early class ";
501                 callback = pm_late_early_op(dev->class->pm, state);
502         } else if (dev->bus && dev->bus->pm) {
503                 info = "early bus ";
504                 callback = pm_late_early_op(dev->bus->pm, state);
505         }
506
507         if (!callback && dev->driver && dev->driver->pm) {
508                 info = "early driver ";
509                 callback = pm_late_early_op(dev->driver->pm, state);
510         }
511
512         error = dpm_run_callback(callback, dev, state, info);
513
514  Out:
515         TRACE_RESUME(error);
516
517         pm_runtime_enable(dev);
518         return error;
519 }
520
521 /**
522  * dpm_resume_early - Execute "early resume" callbacks for all devices.
523  * @state: PM transition of the system being carried out.
524  */
525 static void dpm_resume_early(pm_message_t state)
526 {
527         ktime_t starttime = ktime_get();
528
529         mutex_lock(&dpm_list_mtx);
530         while (!list_empty(&dpm_late_early_list)) {
531                 struct device *dev = to_device(dpm_late_early_list.next);
532                 int error;
533
534                 get_device(dev);
535                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
536                 mutex_unlock(&dpm_list_mtx);
537
538                 error = device_resume_early(dev, state);
539                 if (error) {
540                         suspend_stats.failed_resume_early++;
541                         dpm_save_failed_step(SUSPEND_RESUME_EARLY);
542                         dpm_save_failed_dev(dev_name(dev));
543                         pm_dev_err(dev, state, " early", error);
544                 }
545
546                 mutex_lock(&dpm_list_mtx);
547                 put_device(dev);
548         }
549         mutex_unlock(&dpm_list_mtx);
550         dpm_show_time(starttime, state, "early");
551 }
552
553 /**
554  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
555  * @state: PM transition of the system being carried out.
556  */
557 void dpm_resume_start(pm_message_t state)
558 {
559         dpm_resume_noirq(state);
560         dpm_resume_early(state);
561 }
562 EXPORT_SYMBOL_GPL(dpm_resume_start);
563
564 /**
565  * device_resume - Execute "resume" callbacks for given device.
566  * @dev: Device to handle.
567  * @state: PM transition of the system being carried out.
568  * @async: If true, the device is being resumed asynchronously.
569  */
570 static int device_resume(struct device *dev, pm_message_t state, bool async)
571 {
572         pm_callback_t callback = NULL;
573         char *info = NULL;
574         int error = 0;
575
576         TRACE_DEVICE(dev);
577         TRACE_RESUME(0);
578
579         if (dev->power.syscore)
580                 goto Complete;
581
582         dpm_wait(dev->parent, async);
583         device_lock(dev);
584
585         /*
586          * This is a fib.  But we'll allow new children to be added below
587          * a resumed device, even if the device hasn't been completed yet.
588          */
589         dev->power.is_prepared = false;
590
591         if (!dev->power.is_suspended)
592                 goto Unlock;
593
594         if (dev->pm_domain) {
595                 info = "power domain ";
596                 callback = pm_op(&dev->pm_domain->ops, state);
597                 goto Driver;
598         }
599
600         if (dev->type && dev->type->pm) {
601                 info = "type ";
602                 callback = pm_op(dev->type->pm, state);
603                 goto Driver;
604         }
605
606         if (dev->class) {
607                 if (dev->class->pm) {
608                         info = "class ";
609                         callback = pm_op(dev->class->pm, state);
610                         goto Driver;
611                 } else if (dev->class->resume) {
612                         info = "legacy class ";
613                         callback = dev->class->resume;
614                         goto End;
615                 }
616         }
617
618         if (dev->bus) {
619                 if (dev->bus->pm) {
620                         info = "bus ";
621                         callback = pm_op(dev->bus->pm, state);
622                 } else if (dev->bus->resume) {
623                         info = "legacy bus ";
624                         callback = dev->bus->resume;
625                         goto End;
626                 }
627         }
628
629  Driver:
630         if (!callback && dev->driver && dev->driver->pm) {
631                 info = "driver ";
632                 callback = pm_op(dev->driver->pm, state);
633         }
634
635  End:
636         error = dpm_run_callback(callback, dev, state, info);
637         dev->power.is_suspended = false;
638
639  Unlock:
640         device_unlock(dev);
641
642  Complete:
643         complete_all(&dev->power.completion);
644
645         TRACE_RESUME(error);
646
647         return error;
648 }
649
650 static void async_resume(void *data, async_cookie_t cookie)
651 {
652         struct device *dev = (struct device *)data;
653         int error;
654
655         error = device_resume(dev, pm_transition, true);
656         if (error)
657                 pm_dev_err(dev, pm_transition, " async", error);
658         put_device(dev);
659 }
660
661 static bool is_async(struct device *dev)
662 {
663         return dev->power.async_suspend && pm_async_enabled
664                 && !pm_trace_is_enabled();
665 }
666
667 /**
668  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
669  * @state: PM transition of the system being carried out.
670  *
671  * Execute the appropriate "resume" callback for all devices whose status
672  * indicates that they are suspended.
673  */
674 void dpm_resume(pm_message_t state)
675 {
676         struct device *dev;
677         ktime_t starttime = ktime_get();
678
679         might_sleep();
680
681         mutex_lock(&dpm_list_mtx);
682         pm_transition = state;
683         async_error = 0;
684
685         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
686                 INIT_COMPLETION(dev->power.completion);
687                 if (is_async(dev)) {
688                         get_device(dev);
689                         async_schedule(async_resume, dev);
690                 }
691         }
692
693         while (!list_empty(&dpm_suspended_list)) {
694                 dev = to_device(dpm_suspended_list.next);
695                 get_device(dev);
696                 if (!is_async(dev)) {
697                         int error;
698
699                         mutex_unlock(&dpm_list_mtx);
700
701                         error = device_resume(dev, state, false);
702                         if (error) {
703                                 suspend_stats.failed_resume++;
704                                 dpm_save_failed_step(SUSPEND_RESUME);
705                                 dpm_save_failed_dev(dev_name(dev));
706                                 pm_dev_err(dev, state, "", error);
707                         }
708
709                         mutex_lock(&dpm_list_mtx);
710                 }
711                 if (!list_empty(&dev->power.entry))
712                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
713                 put_device(dev);
714         }
715         mutex_unlock(&dpm_list_mtx);
716         async_synchronize_full();
717         dpm_show_time(starttime, state, NULL);
718
719         cpufreq_resume();
720 }
721
722 /**
723  * device_complete - Complete a PM transition for given device.
724  * @dev: Device to handle.
725  * @state: PM transition of the system being carried out.
726  */
727 static void device_complete(struct device *dev, pm_message_t state)
728 {
729         void (*callback)(struct device *) = NULL;
730         char *info = NULL;
731
732         if (dev->power.syscore)
733                 return;
734
735         device_lock(dev);
736
737         if (dev->pm_domain) {
738                 info = "completing power domain ";
739                 callback = dev->pm_domain->ops.complete;
740         } else if (dev->type && dev->type->pm) {
741                 info = "completing type ";
742                 callback = dev->type->pm->complete;
743         } else if (dev->class && dev->class->pm) {
744                 info = "completing class ";
745                 callback = dev->class->pm->complete;
746         } else if (dev->bus && dev->bus->pm) {
747                 info = "completing bus ";
748                 callback = dev->bus->pm->complete;
749         }
750
751         if (!callback && dev->driver && dev->driver->pm) {
752                 info = "completing driver ";
753                 callback = dev->driver->pm->complete;
754         }
755
756         if (callback) {
757                 pm_dev_dbg(dev, state, info);
758                 callback(dev);
759         }
760
761         device_unlock(dev);
762
763         pm_runtime_put(dev);
764 }
765
766 /**
767  * dpm_complete - Complete a PM transition for all non-sysdev devices.
768  * @state: PM transition of the system being carried out.
769  *
770  * Execute the ->complete() callbacks for all devices whose PM status is not
771  * DPM_ON (this allows new devices to be registered).
772  */
773 void dpm_complete(pm_message_t state)
774 {
775         struct list_head list;
776
777         might_sleep();
778
779         INIT_LIST_HEAD(&list);
780         mutex_lock(&dpm_list_mtx);
781         while (!list_empty(&dpm_prepared_list)) {
782                 struct device *dev = to_device(dpm_prepared_list.prev);
783
784                 get_device(dev);
785                 dev->power.is_prepared = false;
786                 list_move(&dev->power.entry, &list);
787                 mutex_unlock(&dpm_list_mtx);
788
789                 device_complete(dev, state);
790
791                 mutex_lock(&dpm_list_mtx);
792                 put_device(dev);
793         }
794         list_splice(&list, &dpm_list);
795         mutex_unlock(&dpm_list_mtx);
796 }
797
798 /**
799  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
800  * @state: PM transition of the system being carried out.
801  *
802  * Execute "resume" callbacks for all devices and complete the PM transition of
803  * the system.
804  */
805 void dpm_resume_end(pm_message_t state)
806 {
807         dpm_resume(state);
808         dpm_complete(state);
809 }
810 EXPORT_SYMBOL_GPL(dpm_resume_end);
811
812
813 /*------------------------- Suspend routines -------------------------*/
814
815 /**
816  * resume_event - Return a "resume" message for given "suspend" sleep state.
817  * @sleep_state: PM message representing a sleep state.
818  *
819  * Return a PM message representing the resume event corresponding to given
820  * sleep state.
821  */
822 static pm_message_t resume_event(pm_message_t sleep_state)
823 {
824         switch (sleep_state.event) {
825         case PM_EVENT_SUSPEND:
826                 return PMSG_RESUME;
827         case PM_EVENT_FREEZE:
828         case PM_EVENT_QUIESCE:
829                 return PMSG_RECOVER;
830         case PM_EVENT_HIBERNATE:
831                 return PMSG_RESTORE;
832         }
833         return PMSG_ON;
834 }
835
836 /**
837  * device_suspend_noirq - Execute a "late suspend" callback for given device.
838  * @dev: Device to handle.
839  * @state: PM transition of the system being carried out.
840  *
841  * The driver of @dev will not receive interrupts while this function is being
842  * executed.
843  */
844 static int device_suspend_noirq(struct device *dev, pm_message_t state)
845 {
846         pm_callback_t callback = NULL;
847         char *info = NULL;
848
849         if (dev->power.syscore)
850                 return 0;
851
852         if (dev->pm_domain) {
853                 info = "noirq power domain ";
854                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
855         } else if (dev->type && dev->type->pm) {
856                 info = "noirq type ";
857                 callback = pm_noirq_op(dev->type->pm, state);
858         } else if (dev->class && dev->class->pm) {
859                 info = "noirq class ";
860                 callback = pm_noirq_op(dev->class->pm, state);
861         } else if (dev->bus && dev->bus->pm) {
862                 info = "noirq bus ";
863                 callback = pm_noirq_op(dev->bus->pm, state);
864         }
865
866         if (!callback && dev->driver && dev->driver->pm) {
867                 info = "noirq driver ";
868                 callback = pm_noirq_op(dev->driver->pm, state);
869         }
870
871         return dpm_run_callback(callback, dev, state, info);
872 }
873
874 /**
875  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
876  * @state: PM transition of the system being carried out.
877  *
878  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
879  * handlers for all non-sysdev devices.
880  */
881 static int dpm_suspend_noirq(pm_message_t state)
882 {
883         ktime_t starttime = ktime_get();
884         int error = 0;
885
886         cpuidle_pause();
887         suspend_device_irqs();
888         mutex_lock(&dpm_list_mtx);
889         while (!list_empty(&dpm_late_early_list)) {
890                 struct device *dev = to_device(dpm_late_early_list.prev);
891
892                 get_device(dev);
893                 mutex_unlock(&dpm_list_mtx);
894
895                 error = device_suspend_noirq(dev, state);
896
897                 mutex_lock(&dpm_list_mtx);
898                 if (error) {
899                         pm_dev_err(dev, state, " noirq", error);
900                         suspend_stats.failed_suspend_noirq++;
901                         dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
902                         dpm_save_failed_dev(dev_name(dev));
903                         put_device(dev);
904                         break;
905                 }
906                 if (!list_empty(&dev->power.entry))
907                         list_move(&dev->power.entry, &dpm_noirq_list);
908                 put_device(dev);
909
910                 if (pm_wakeup_pending()) {
911                         error = -EBUSY;
912                         break;
913                 }
914         }
915         mutex_unlock(&dpm_list_mtx);
916         if (error)
917                 dpm_resume_noirq(resume_event(state));
918         else
919                 dpm_show_time(starttime, state, "noirq");
920         return error;
921 }
922
923 /**
924  * device_suspend_late - Execute a "late suspend" callback for given device.
925  * @dev: Device to handle.
926  * @state: PM transition of the system being carried out.
927  *
928  * Runtime PM is disabled for @dev while this function is being executed.
929  */
930 static int device_suspend_late(struct device *dev, pm_message_t state)
931 {
932         pm_callback_t callback = NULL;
933         char *info = NULL;
934
935         __pm_runtime_disable(dev, false);
936
937         if (dev->power.syscore)
938                 return 0;
939
940         if (dev->pm_domain) {
941                 info = "late power domain ";
942                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
943         } else if (dev->type && dev->type->pm) {
944                 info = "late type ";
945                 callback = pm_late_early_op(dev->type->pm, state);
946         } else if (dev->class && dev->class->pm) {
947                 info = "late class ";
948                 callback = pm_late_early_op(dev->class->pm, state);
949         } else if (dev->bus && dev->bus->pm) {
950                 info = "late bus ";
951                 callback = pm_late_early_op(dev->bus->pm, state);
952         }
953
954         if (!callback && dev->driver && dev->driver->pm) {
955                 info = "late driver ";
956                 callback = pm_late_early_op(dev->driver->pm, state);
957         }
958
959         return dpm_run_callback(callback, dev, state, info);
960 }
961
962 /**
963  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
964  * @state: PM transition of the system being carried out.
965  */
966 static int dpm_suspend_late(pm_message_t state)
967 {
968         ktime_t starttime = ktime_get();
969         int error = 0;
970
971         mutex_lock(&dpm_list_mtx);
972         while (!list_empty(&dpm_suspended_list)) {
973                 struct device *dev = to_device(dpm_suspended_list.prev);
974
975                 get_device(dev);
976                 mutex_unlock(&dpm_list_mtx);
977
978                 error = device_suspend_late(dev, state);
979
980                 mutex_lock(&dpm_list_mtx);
981                 if (error) {
982                         pm_dev_err(dev, state, " late", error);
983                         suspend_stats.failed_suspend_late++;
984                         dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
985                         dpm_save_failed_dev(dev_name(dev));
986                         put_device(dev);
987                         break;
988                 }
989                 if (!list_empty(&dev->power.entry))
990                         list_move(&dev->power.entry, &dpm_late_early_list);
991                 put_device(dev);
992
993                 if (pm_wakeup_pending()) {
994                         error = -EBUSY;
995                         break;
996                 }
997         }
998         mutex_unlock(&dpm_list_mtx);
999         if (error)
1000                 dpm_resume_early(resume_event(state));
1001         else
1002                 dpm_show_time(starttime, state, "late");
1003
1004         return error;
1005 }
1006
1007 /**
1008  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1009  * @state: PM transition of the system being carried out.
1010  */
1011 int dpm_suspend_end(pm_message_t state)
1012 {
1013         int error = dpm_suspend_late(state);
1014         if (error)
1015                 return error;
1016
1017         error = dpm_suspend_noirq(state);
1018         if (error) {
1019                 dpm_resume_early(resume_event(state));
1020                 return error;
1021         }
1022
1023         return 0;
1024 }
1025 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1026
1027 /**
1028  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1029  * @dev: Device to suspend.
1030  * @state: PM transition of the system being carried out.
1031  * @cb: Suspend callback to execute.
1032  */
1033 static int legacy_suspend(struct device *dev, pm_message_t state,
1034                           int (*cb)(struct device *dev, pm_message_t state))
1035 {
1036         int error;
1037         ktime_t calltime;
1038
1039         calltime = initcall_debug_start(dev);
1040
1041         error = cb(dev, state);
1042         suspend_report_result(cb, error);
1043
1044         initcall_debug_report(dev, calltime, error);
1045
1046         return error;
1047 }
1048
1049 /**
1050  * device_suspend - Execute "suspend" callbacks for given device.
1051  * @dev: Device to handle.
1052  * @state: PM transition of the system being carried out.
1053  * @async: If true, the device is being suspended asynchronously.
1054  */
1055 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1056 {
1057         pm_callback_t callback = NULL;
1058         char *info = NULL;
1059         int error = 0;
1060
1061         dpm_wait_for_children(dev, async);
1062
1063         if (async_error)
1064                 goto Complete;
1065
1066         /*
1067          * If a device configured to wake up the system from sleep states
1068          * has been suspended at run time and there's a resume request pending
1069          * for it, this is equivalent to the device signaling wakeup, so the
1070          * system suspend operation should be aborted.
1071          */
1072         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1073                 pm_wakeup_event(dev, 0);
1074
1075         if (pm_wakeup_pending()) {
1076                 async_error = -EBUSY;
1077                 goto Complete;
1078         }
1079
1080         if (dev->power.syscore)
1081                 goto Complete;
1082
1083         device_lock(dev);
1084
1085         if (dev->pm_domain) {
1086                 info = "power domain ";
1087                 callback = pm_op(&dev->pm_domain->ops, state);
1088                 goto Run;
1089         }
1090
1091         if (dev->type && dev->type->pm) {
1092                 info = "type ";
1093                 callback = pm_op(dev->type->pm, state);
1094                 goto Run;
1095         }
1096
1097         if (dev->class) {
1098                 if (dev->class->pm) {
1099                         info = "class ";
1100                         callback = pm_op(dev->class->pm, state);
1101                         goto Run;
1102                 } else if (dev->class->suspend) {
1103                         pm_dev_dbg(dev, state, "legacy class ");
1104                         error = legacy_suspend(dev, state, dev->class->suspend);
1105                         goto End;
1106                 }
1107         }
1108
1109         if (dev->bus) {
1110                 if (dev->bus->pm) {
1111                         info = "bus ";
1112                         callback = pm_op(dev->bus->pm, state);
1113                 } else if (dev->bus->suspend) {
1114                         pm_dev_dbg(dev, state, "legacy bus ");
1115                         error = legacy_suspend(dev, state, dev->bus->suspend);
1116                         goto End;
1117                 }
1118         }
1119
1120  Run:
1121         if (!callback && dev->driver && dev->driver->pm) {
1122                 info = "driver ";
1123                 callback = pm_op(dev->driver->pm, state);
1124         }
1125
1126         error = dpm_run_callback(callback, dev, state, info);
1127
1128  End:
1129         if (!error) {
1130                 dev->power.is_suspended = true;
1131                 if (dev->power.wakeup_path
1132                     && dev->parent && !dev->parent->power.ignore_children)
1133                         dev->parent->power.wakeup_path = true;
1134         }
1135
1136         device_unlock(dev);
1137
1138  Complete:
1139         complete_all(&dev->power.completion);
1140         if (error)
1141                 async_error = error;
1142
1143         return error;
1144 }
1145
1146 static void async_suspend(void *data, async_cookie_t cookie)
1147 {
1148         struct device *dev = (struct device *)data;
1149         int error;
1150
1151         error = __device_suspend(dev, pm_transition, true);
1152         if (error) {
1153                 dpm_save_failed_dev(dev_name(dev));
1154                 pm_dev_err(dev, pm_transition, " async", error);
1155         }
1156
1157         put_device(dev);
1158 }
1159
1160 static int device_suspend(struct device *dev)
1161 {
1162         INIT_COMPLETION(dev->power.completion);
1163
1164         if (pm_async_enabled && dev->power.async_suspend) {
1165                 get_device(dev);
1166                 async_schedule(async_suspend, dev);
1167                 return 0;
1168         }
1169
1170         return __device_suspend(dev, pm_transition, false);
1171 }
1172
1173 /**
1174  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1175  * @state: PM transition of the system being carried out.
1176  */
1177 int dpm_suspend(pm_message_t state)
1178 {
1179         ktime_t starttime = ktime_get();
1180         int error = 0;
1181
1182         might_sleep();
1183
1184         cpufreq_suspend();
1185
1186         mutex_lock(&dpm_list_mtx);
1187         pm_transition = state;
1188         async_error = 0;
1189         while (!list_empty(&dpm_prepared_list)) {
1190                 struct device *dev = to_device(dpm_prepared_list.prev);
1191
1192                 get_device(dev);
1193                 mutex_unlock(&dpm_list_mtx);
1194
1195                 error = device_suspend(dev);
1196
1197                 mutex_lock(&dpm_list_mtx);
1198                 if (error) {
1199                         pm_dev_err(dev, state, "", error);
1200                         dpm_save_failed_dev(dev_name(dev));
1201                         put_device(dev);
1202                         break;
1203                 }
1204                 if (!list_empty(&dev->power.entry))
1205                         list_move(&dev->power.entry, &dpm_suspended_list);
1206                 put_device(dev);
1207                 if (async_error)
1208                         break;
1209         }
1210         mutex_unlock(&dpm_list_mtx);
1211         async_synchronize_full();
1212         if (!error)
1213                 error = async_error;
1214         if (error) {
1215                 suspend_stats.failed_suspend++;
1216                 dpm_save_failed_step(SUSPEND_SUSPEND);
1217         } else
1218                 dpm_show_time(starttime, state, NULL);
1219         return error;
1220 }
1221
1222 /**
1223  * device_prepare - Prepare a device for system power transition.
1224  * @dev: Device to handle.
1225  * @state: PM transition of the system being carried out.
1226  *
1227  * Execute the ->prepare() callback(s) for given device.  No new children of the
1228  * device may be registered after this function has returned.
1229  */
1230 static int device_prepare(struct device *dev, pm_message_t state)
1231 {
1232         int (*callback)(struct device *) = NULL;
1233         char *info = NULL;
1234         int error = 0;
1235
1236         if (dev->power.syscore)
1237                 return 0;
1238
1239         /*
1240          * If a device's parent goes into runtime suspend at the wrong time,
1241          * it won't be possible to resume the device.  To prevent this we
1242          * block runtime suspend here, during the prepare phase, and allow
1243          * it again during the complete phase.
1244          */
1245         pm_runtime_get_noresume(dev);
1246
1247         device_lock(dev);
1248
1249         dev->power.wakeup_path = device_may_wakeup(dev);
1250
1251         if (dev->pm_domain) {
1252                 info = "preparing power domain ";
1253                 callback = dev->pm_domain->ops.prepare;
1254         } else if (dev->type && dev->type->pm) {
1255                 info = "preparing type ";
1256                 callback = dev->type->pm->prepare;
1257         } else if (dev->class && dev->class->pm) {
1258                 info = "preparing class ";
1259                 callback = dev->class->pm->prepare;
1260         } else if (dev->bus && dev->bus->pm) {
1261                 info = "preparing bus ";
1262                 callback = dev->bus->pm->prepare;
1263         }
1264
1265         if (!callback && dev->driver && dev->driver->pm) {
1266                 info = "preparing driver ";
1267                 callback = dev->driver->pm->prepare;
1268         }
1269
1270         if (callback) {
1271                 error = callback(dev);
1272                 suspend_report_result(callback, error);
1273         }
1274
1275         device_unlock(dev);
1276
1277         return error;
1278 }
1279
1280 /**
1281  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1282  * @state: PM transition of the system being carried out.
1283  *
1284  * Execute the ->prepare() callback(s) for all devices.
1285  */
1286 int dpm_prepare(pm_message_t state)
1287 {
1288         int error = 0;
1289
1290         might_sleep();
1291
1292         mutex_lock(&dpm_list_mtx);
1293         while (!list_empty(&dpm_list)) {
1294                 struct device *dev = to_device(dpm_list.next);
1295
1296                 get_device(dev);
1297                 mutex_unlock(&dpm_list_mtx);
1298
1299                 error = device_prepare(dev, state);
1300
1301                 mutex_lock(&dpm_list_mtx);
1302                 if (error) {
1303                         if (error == -EAGAIN) {
1304                                 put_device(dev);
1305                                 error = 0;
1306                                 continue;
1307                         }
1308                         printk(KERN_INFO "PM: Device %s not prepared "
1309                                 "for power transition: code %d\n",
1310                                 dev_name(dev), error);
1311                         put_device(dev);
1312                         break;
1313                 }
1314                 dev->power.is_prepared = true;
1315                 if (!list_empty(&dev->power.entry))
1316                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1317                 put_device(dev);
1318         }
1319         mutex_unlock(&dpm_list_mtx);
1320         return error;
1321 }
1322
1323 /**
1324  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1325  * @state: PM transition of the system being carried out.
1326  *
1327  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1328  * callbacks for them.
1329  */
1330 int dpm_suspend_start(pm_message_t state)
1331 {
1332         int error;
1333
1334         error = dpm_prepare(state);
1335         if (error) {
1336                 suspend_stats.failed_prepare++;
1337                 dpm_save_failed_step(SUSPEND_PREPARE);
1338         } else
1339                 error = dpm_suspend(state);
1340         return error;
1341 }
1342 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1343
1344 void __suspend_report_result(const char *function, void *fn, int ret)
1345 {
1346         if (ret)
1347                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1348 }
1349 EXPORT_SYMBOL_GPL(__suspend_report_result);
1350
1351 /**
1352  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1353  * @dev: Device to wait for.
1354  * @subordinate: Device that needs to wait for @dev.
1355  */
1356 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1357 {
1358         dpm_wait(dev, subordinate->power.async_suspend);
1359         return async_error;
1360 }
1361 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1362
1363 /**
1364  * dpm_for_each_dev - device iterator.
1365  * @data: data for the callback.
1366  * @fn: function to be called for each device.
1367  *
1368  * Iterate over devices in dpm_list, and call @fn for each device,
1369  * passing it @data.
1370  */
1371 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1372 {
1373         struct device *dev;
1374
1375         if (!fn)
1376                 return;
1377
1378         device_pm_lock();
1379         list_for_each_entry(dev, &dpm_list, power.entry)
1380                 fn(dev, data);
1381         device_pm_unlock();
1382 }
1383 EXPORT_SYMBOL_GPL(dpm_for_each_dev);