079024a40a9da7093cc8bdfafb4707245a6dd3e5
[firefly-linux-kernel-4.4.55.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will intialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/resume-trace.h>
26 #include <linux/rwsem.h>
27 #include <linux/interrupt.h>
28 #include <linux/timer.h>
29
30 #include "../base.h"
31 #include "power.h"
32
33 /*
34  * The entries in the dpm_list list are in a depth first order, simply
35  * because children are guaranteed to be discovered after parents, and
36  * are inserted at the back of the list on discovery.
37  *
38  * Since device_pm_add() may be called with a device semaphore held,
39  * we must never try to acquire a device semaphore while holding
40  * dpm_list_mutex.
41  */
42
43 LIST_HEAD(dpm_list);
44
45 static DEFINE_MUTEX(dpm_list_mtx);
46
47 static void dpm_drv_timeout(unsigned long data);
48 static DEFINE_TIMER(dpm_drv_wd, dpm_drv_timeout, 0, 0);
49 static struct {
50         struct device *dev;
51         struct task_struct *tsk;
52 } dpm_drv_wd_data;
53
54 /*
55  * Set once the preparation of devices for a PM transition has started, reset
56  * before starting to resume devices.  Protected by dpm_list_mtx.
57  */
58 static bool transition_started;
59
60 /**
61  * device_pm_init - Initialize the PM-related part of a device object.
62  * @dev: Device object being initialized.
63  */
64 void device_pm_init(struct device *dev)
65 {
66         dev->power.status = DPM_ON;
67         pm_runtime_init(dev);
68 }
69
70 /**
71  * device_pm_lock - Lock the list of active devices used by the PM core.
72  */
73 void device_pm_lock(void)
74 {
75         mutex_lock(&dpm_list_mtx);
76 }
77
78 /**
79  * device_pm_unlock - Unlock the list of active devices used by the PM core.
80  */
81 void device_pm_unlock(void)
82 {
83         mutex_unlock(&dpm_list_mtx);
84 }
85
86 /**
87  * device_pm_add - Add a device to the PM core's list of active devices.
88  * @dev: Device to add to the list.
89  */
90 void device_pm_add(struct device *dev)
91 {
92         pr_debug("PM: Adding info for %s:%s\n",
93                  dev->bus ? dev->bus->name : "No Bus",
94                  kobject_name(&dev->kobj));
95         mutex_lock(&dpm_list_mtx);
96         if (dev->parent) {
97                 if (dev->parent->power.status >= DPM_SUSPENDING)
98                         dev_warn(dev, "parent %s should not be sleeping\n",
99                                  dev_name(dev->parent));
100         } else if (transition_started) {
101                 /*
102                  * We refuse to register parentless devices while a PM
103                  * transition is in progress in order to avoid leaving them
104                  * unhandled down the road
105                  */
106                 dev_WARN(dev, "Parentless device registered during a PM transaction\n");
107         }
108
109         list_add_tail(&dev->power.entry, &dpm_list);
110         mutex_unlock(&dpm_list_mtx);
111 }
112
113 /**
114  * device_pm_remove - Remove a device from the PM core's list of active devices.
115  * @dev: Device to be removed from the list.
116  */
117 void device_pm_remove(struct device *dev)
118 {
119         pr_debug("PM: Removing info for %s:%s\n",
120                  dev->bus ? dev->bus->name : "No Bus",
121                  kobject_name(&dev->kobj));
122         mutex_lock(&dpm_list_mtx);
123         list_del_init(&dev->power.entry);
124         mutex_unlock(&dpm_list_mtx);
125         pm_runtime_remove(dev);
126 }
127
128 /**
129  * device_pm_move_before - Move device in the PM core's list of active devices.
130  * @deva: Device to move in dpm_list.
131  * @devb: Device @deva should come before.
132  */
133 void device_pm_move_before(struct device *deva, struct device *devb)
134 {
135         pr_debug("PM: Moving %s:%s before %s:%s\n",
136                  deva->bus ? deva->bus->name : "No Bus",
137                  kobject_name(&deva->kobj),
138                  devb->bus ? devb->bus->name : "No Bus",
139                  kobject_name(&devb->kobj));
140         /* Delete deva from dpm_list and reinsert before devb. */
141         list_move_tail(&deva->power.entry, &devb->power.entry);
142 }
143
144 /**
145  * device_pm_move_after - Move device in the PM core's list of active devices.
146  * @deva: Device to move in dpm_list.
147  * @devb: Device @deva should come after.
148  */
149 void device_pm_move_after(struct device *deva, struct device *devb)
150 {
151         pr_debug("PM: Moving %s:%s after %s:%s\n",
152                  deva->bus ? deva->bus->name : "No Bus",
153                  kobject_name(&deva->kobj),
154                  devb->bus ? devb->bus->name : "No Bus",
155                  kobject_name(&devb->kobj));
156         /* Delete deva from dpm_list and reinsert after devb. */
157         list_move(&deva->power.entry, &devb->power.entry);
158 }
159
160 /**
161  * device_pm_move_last - Move device to end of the PM core's list of devices.
162  * @dev: Device to move in dpm_list.
163  */
164 void device_pm_move_last(struct device *dev)
165 {
166         pr_debug("PM: Moving %s:%s to end of list\n",
167                  dev->bus ? dev->bus->name : "No Bus",
168                  kobject_name(&dev->kobj));
169         list_move_tail(&dev->power.entry, &dpm_list);
170 }
171
172 /**
173  * pm_op - Execute the PM operation appropriate for given PM event.
174  * @dev: Device to handle.
175  * @ops: PM operations to choose from.
176  * @state: PM transition of the system being carried out.
177  */
178 static int pm_op(struct device *dev,
179                  const struct dev_pm_ops *ops,
180                  pm_message_t state)
181 {
182         int error = 0;
183
184         switch (state.event) {
185 #ifdef CONFIG_SUSPEND
186         case PM_EVENT_SUSPEND:
187                 if (ops->suspend) {
188                         error = ops->suspend(dev);
189                         suspend_report_result(ops->suspend, error);
190                 }
191                 break;
192         case PM_EVENT_RESUME:
193                 if (ops->resume) {
194                         error = ops->resume(dev);
195                         suspend_report_result(ops->resume, error);
196                 }
197                 break;
198 #endif /* CONFIG_SUSPEND */
199 #ifdef CONFIG_HIBERNATION
200         case PM_EVENT_FREEZE:
201         case PM_EVENT_QUIESCE:
202                 if (ops->freeze) {
203                         error = ops->freeze(dev);
204                         suspend_report_result(ops->freeze, error);
205                 }
206                 break;
207         case PM_EVENT_HIBERNATE:
208                 if (ops->poweroff) {
209                         error = ops->poweroff(dev);
210                         suspend_report_result(ops->poweroff, error);
211                 }
212                 break;
213         case PM_EVENT_THAW:
214         case PM_EVENT_RECOVER:
215                 if (ops->thaw) {
216                         error = ops->thaw(dev);
217                         suspend_report_result(ops->thaw, error);
218                 }
219                 break;
220         case PM_EVENT_RESTORE:
221                 if (ops->restore) {
222                         error = ops->restore(dev);
223                         suspend_report_result(ops->restore, error);
224                 }
225                 break;
226 #endif /* CONFIG_HIBERNATION */
227         default:
228                 error = -EINVAL;
229         }
230         return error;
231 }
232
233 /**
234  * pm_noirq_op - Execute the PM operation appropriate for given PM event.
235  * @dev: Device to handle.
236  * @ops: PM operations to choose from.
237  * @state: PM transition of the system being carried out.
238  *
239  * The driver of @dev will not receive interrupts while this function is being
240  * executed.
241  */
242 static int pm_noirq_op(struct device *dev,
243                         const struct dev_pm_ops *ops,
244                         pm_message_t state)
245 {
246         int error = 0;
247
248         switch (state.event) {
249 #ifdef CONFIG_SUSPEND
250         case PM_EVENT_SUSPEND:
251                 if (ops->suspend_noirq) {
252                         error = ops->suspend_noirq(dev);
253                         suspend_report_result(ops->suspend_noirq, error);
254                 }
255                 break;
256         case PM_EVENT_RESUME:
257                 if (ops->resume_noirq) {
258                         error = ops->resume_noirq(dev);
259                         suspend_report_result(ops->resume_noirq, error);
260                 }
261                 break;
262 #endif /* CONFIG_SUSPEND */
263 #ifdef CONFIG_HIBERNATION
264         case PM_EVENT_FREEZE:
265         case PM_EVENT_QUIESCE:
266                 if (ops->freeze_noirq) {
267                         error = ops->freeze_noirq(dev);
268                         suspend_report_result(ops->freeze_noirq, error);
269                 }
270                 break;
271         case PM_EVENT_HIBERNATE:
272                 if (ops->poweroff_noirq) {
273                         error = ops->poweroff_noirq(dev);
274                         suspend_report_result(ops->poweroff_noirq, error);
275                 }
276                 break;
277         case PM_EVENT_THAW:
278         case PM_EVENT_RECOVER:
279                 if (ops->thaw_noirq) {
280                         error = ops->thaw_noirq(dev);
281                         suspend_report_result(ops->thaw_noirq, error);
282                 }
283                 break;
284         case PM_EVENT_RESTORE:
285                 if (ops->restore_noirq) {
286                         error = ops->restore_noirq(dev);
287                         suspend_report_result(ops->restore_noirq, error);
288                 }
289                 break;
290 #endif /* CONFIG_HIBERNATION */
291         default:
292                 error = -EINVAL;
293         }
294         return error;
295 }
296
297 static char *pm_verb(int event)
298 {
299         switch (event) {
300         case PM_EVENT_SUSPEND:
301                 return "suspend";
302         case PM_EVENT_RESUME:
303                 return "resume";
304         case PM_EVENT_FREEZE:
305                 return "freeze";
306         case PM_EVENT_QUIESCE:
307                 return "quiesce";
308         case PM_EVENT_HIBERNATE:
309                 return "hibernate";
310         case PM_EVENT_THAW:
311                 return "thaw";
312         case PM_EVENT_RESTORE:
313                 return "restore";
314         case PM_EVENT_RECOVER:
315                 return "recover";
316         default:
317                 return "(unknown PM event)";
318         }
319 }
320
321 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
322 {
323         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
324                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
325                 ", may wakeup" : "");
326 }
327
328 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
329                         int error)
330 {
331         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
332                 kobject_name(&dev->kobj), pm_verb(state.event), info, error);
333 }
334
335 /*------------------------- Resume routines -------------------------*/
336
337 /**
338  * device_resume_noirq - Execute an "early resume" callback for given device.
339  * @dev: Device to handle.
340  * @state: PM transition of the system being carried out.
341  *
342  * The driver of @dev will not receive interrupts while this function is being
343  * executed.
344  */
345 static int device_resume_noirq(struct device *dev, pm_message_t state)
346 {
347         int error = 0;
348
349         TRACE_DEVICE(dev);
350         TRACE_RESUME(0);
351
352         if (!dev->bus)
353                 goto End;
354
355         if (dev->bus->pm) {
356                 pm_dev_dbg(dev, state, "EARLY ");
357                 error = pm_noirq_op(dev, dev->bus->pm, state);
358         }
359  End:
360         TRACE_RESUME(error);
361         return error;
362 }
363
364 /**
365  * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
366  * @state: PM transition of the system being carried out.
367  *
368  * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
369  * enable device drivers to receive interrupts.
370  */
371 void dpm_resume_noirq(pm_message_t state)
372 {
373         struct device *dev;
374
375         mutex_lock(&dpm_list_mtx);
376         transition_started = false;
377         list_for_each_entry(dev, &dpm_list, power.entry)
378                 if (dev->power.status > DPM_OFF) {
379                         int error;
380
381                         dev->power.status = DPM_OFF;
382                         error = device_resume_noirq(dev, state);
383                         if (error)
384                                 pm_dev_err(dev, state, " early", error);
385                 }
386         mutex_unlock(&dpm_list_mtx);
387         resume_device_irqs();
388 }
389 EXPORT_SYMBOL_GPL(dpm_resume_noirq);
390
391 /**
392  * device_resume - Execute "resume" callbacks for given device.
393  * @dev: Device to handle.
394  * @state: PM transition of the system being carried out.
395  */
396 static int device_resume(struct device *dev, pm_message_t state)
397 {
398         int error = 0;
399
400         TRACE_DEVICE(dev);
401         TRACE_RESUME(0);
402
403         down(&dev->sem);
404
405         if (dev->bus) {
406                 if (dev->bus->pm) {
407                         pm_dev_dbg(dev, state, "");
408                         error = pm_op(dev, dev->bus->pm, state);
409                 } else if (dev->bus->resume) {
410                         pm_dev_dbg(dev, state, "legacy ");
411                         error = dev->bus->resume(dev);
412                 }
413                 if (error)
414                         goto End;
415         }
416
417         if (dev->type) {
418                 if (dev->type->pm) {
419                         pm_dev_dbg(dev, state, "type ");
420                         error = pm_op(dev, dev->type->pm, state);
421                 }
422                 if (error)
423                         goto End;
424         }
425
426         if (dev->class) {
427                 if (dev->class->pm) {
428                         pm_dev_dbg(dev, state, "class ");
429                         error = pm_op(dev, dev->class->pm, state);
430                 } else if (dev->class->resume) {
431                         pm_dev_dbg(dev, state, "legacy class ");
432                         error = dev->class->resume(dev);
433                 }
434         }
435  End:
436         up(&dev->sem);
437
438         TRACE_RESUME(error);
439         return error;
440 }
441
442 /**
443  *      dpm_drv_timeout - Driver suspend / resume watchdog handler
444  *      @data: struct device which timed out
445  *
446  *      Called when a driver has timed out suspending or resuming.
447  *      There's not much we can do here to recover so
448  *      BUG() out for a crash-dump
449  *
450  */
451 static void dpm_drv_timeout(unsigned long data)
452 {
453         struct device *dev = dpm_drv_wd_data.dev;
454         struct task_struct *tsk = dpm_drv_wd_data.tsk;
455
456 #ifdef CONFIG_ARCH_RK29
457 #include <linux/console.h>
458         resume_console();
459 #endif
460
461         printk(KERN_EMERG "**** DPM device timeout: %s (%s)\n", dev_name(dev),
462                (dev->driver ? dev->driver->name : "no driver"));
463
464         printk(KERN_EMERG "dpm suspend stack:\n");
465         show_stack(tsk, NULL);
466
467         BUG();
468 }
469
470 /**
471  *      dpm_drv_wdset - Sets up driver suspend/resume watchdog timer.
472  *      @dev: struct device which we're guarding.
473  *
474  */
475 static void dpm_drv_wdset(struct device *dev)
476 {
477         dpm_drv_wd_data.dev = dev;
478         dpm_drv_wd_data.tsk = get_current();
479         dpm_drv_wd.data = (unsigned long) &dpm_drv_wd_data;
480         mod_timer(&dpm_drv_wd, jiffies + (HZ * 3));
481 }
482
483 /**
484  *      dpm_drv_wdclr - clears driver suspend/resume watchdog timer.
485  *      @dev: struct device which we're no longer guarding.
486  *
487  */
488 static void dpm_drv_wdclr(struct device *dev)
489 {
490         del_timer_sync(&dpm_drv_wd);
491 }
492
493 /**
494  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
495  * @state: PM transition of the system being carried out.
496  *
497  * Execute the appropriate "resume" callback for all devices whose status
498  * indicates that they are suspended.
499  */
500 static void dpm_resume(pm_message_t state)
501 {
502         struct list_head list;
503
504         INIT_LIST_HEAD(&list);
505         mutex_lock(&dpm_list_mtx);
506         while (!list_empty(&dpm_list)) {
507                 struct device *dev = to_device(dpm_list.next);
508
509                 get_device(dev);
510                 if (dev->power.status >= DPM_OFF) {
511                         int error;
512
513                         dev->power.status = DPM_RESUMING;
514                         mutex_unlock(&dpm_list_mtx);
515
516                         error = device_resume(dev, state);
517
518                         mutex_lock(&dpm_list_mtx);
519                         if (error)
520                                 pm_dev_err(dev, state, "", error);
521                 } else if (dev->power.status == DPM_SUSPENDING) {
522                         /* Allow new children of the device to be registered */
523                         dev->power.status = DPM_RESUMING;
524                 }
525                 if (!list_empty(&dev->power.entry))
526                         list_move_tail(&dev->power.entry, &list);
527                 put_device(dev);
528         }
529         list_splice(&list, &dpm_list);
530         mutex_unlock(&dpm_list_mtx);
531 }
532
533 /**
534  * device_complete - Complete a PM transition for given device.
535  * @dev: Device to handle.
536  * @state: PM transition of the system being carried out.
537  */
538 static void device_complete(struct device *dev, pm_message_t state)
539 {
540         down(&dev->sem);
541
542         if (dev->class && dev->class->pm && dev->class->pm->complete) {
543                 pm_dev_dbg(dev, state, "completing class ");
544                 dev->class->pm->complete(dev);
545         }
546
547         if (dev->type && dev->type->pm && dev->type->pm->complete) {
548                 pm_dev_dbg(dev, state, "completing type ");
549                 dev->type->pm->complete(dev);
550         }
551
552         if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
553                 pm_dev_dbg(dev, state, "completing ");
554                 dev->bus->pm->complete(dev);
555         }
556
557         up(&dev->sem);
558 }
559
560 /**
561  * dpm_complete - Complete a PM transition for all non-sysdev devices.
562  * @state: PM transition of the system being carried out.
563  *
564  * Execute the ->complete() callbacks for all devices whose PM status is not
565  * DPM_ON (this allows new devices to be registered).
566  */
567 static void dpm_complete(pm_message_t state)
568 {
569         struct list_head list;
570
571         INIT_LIST_HEAD(&list);
572         mutex_lock(&dpm_list_mtx);
573         transition_started = false;
574         while (!list_empty(&dpm_list)) {
575                 struct device *dev = to_device(dpm_list.prev);
576
577                 get_device(dev);
578                 if (dev->power.status > DPM_ON) {
579                         dev->power.status = DPM_ON;
580                         mutex_unlock(&dpm_list_mtx);
581
582                         device_complete(dev, state);
583                         pm_runtime_put_noidle(dev);
584
585                         mutex_lock(&dpm_list_mtx);
586                 }
587                 if (!list_empty(&dev->power.entry))
588                         list_move(&dev->power.entry, &list);
589                 put_device(dev);
590         }
591         list_splice(&list, &dpm_list);
592         mutex_unlock(&dpm_list_mtx);
593 }
594
595 /**
596  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
597  * @state: PM transition of the system being carried out.
598  *
599  * Execute "resume" callbacks for all devices and complete the PM transition of
600  * the system.
601  */
602 void dpm_resume_end(pm_message_t state)
603 {
604         might_sleep();
605         dpm_resume(state);
606         dpm_complete(state);
607 }
608 EXPORT_SYMBOL_GPL(dpm_resume_end);
609
610
611 /*------------------------- Suspend routines -------------------------*/
612
613 /**
614  * resume_event - Return a "resume" message for given "suspend" sleep state.
615  * @sleep_state: PM message representing a sleep state.
616  *
617  * Return a PM message representing the resume event corresponding to given
618  * sleep state.
619  */
620 static pm_message_t resume_event(pm_message_t sleep_state)
621 {
622         switch (sleep_state.event) {
623         case PM_EVENT_SUSPEND:
624                 return PMSG_RESUME;
625         case PM_EVENT_FREEZE:
626         case PM_EVENT_QUIESCE:
627                 return PMSG_RECOVER;
628         case PM_EVENT_HIBERNATE:
629                 return PMSG_RESTORE;
630         }
631         return PMSG_ON;
632 }
633
634 /**
635  * device_suspend_noirq - Execute a "late suspend" callback for given device.
636  * @dev: Device to handle.
637  * @state: PM transition of the system being carried out.
638  *
639  * The driver of @dev will not receive interrupts while this function is being
640  * executed.
641  */
642 static int device_suspend_noirq(struct device *dev, pm_message_t state)
643 {
644         int error = 0;
645
646         if (!dev->bus)
647                 return 0;
648
649         if (dev->bus->pm) {
650                 pm_dev_dbg(dev, state, "LATE ");
651                 error = pm_noirq_op(dev, dev->bus->pm, state);
652         }
653         return error;
654 }
655
656 /**
657  * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
658  * @state: PM transition of the system being carried out.
659  *
660  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
661  * handlers for all non-sysdev devices.
662  */
663 int dpm_suspend_noirq(pm_message_t state)
664 {
665         struct device *dev;
666         int error = 0;
667
668         suspend_device_irqs();
669         mutex_lock(&dpm_list_mtx);
670         list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
671                 error = device_suspend_noirq(dev, state);
672                 if (error) {
673                         pm_dev_err(dev, state, " late", error);
674                         break;
675                 }
676                 dev->power.status = DPM_OFF_IRQ;
677         }
678         mutex_unlock(&dpm_list_mtx);
679         if (error)
680                 dpm_resume_noirq(resume_event(state));
681         return error;
682 }
683 EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
684
685 /**
686  * device_suspend - Execute "suspend" callbacks for given device.
687  * @dev: Device to handle.
688  * @state: PM transition of the system being carried out.
689  */
690 static int device_suspend(struct device *dev, pm_message_t state)
691 {
692         int error = 0;
693
694         down(&dev->sem);
695
696         if (dev->class) {
697                 if (dev->class->pm) {
698                         pm_dev_dbg(dev, state, "class ");
699                         error = pm_op(dev, dev->class->pm, state);
700                 } else if (dev->class->suspend) {
701                         pm_dev_dbg(dev, state, "legacy class ");
702                         error = dev->class->suspend(dev, state);
703                         suspend_report_result(dev->class->suspend, error);
704                 }
705                 if (error)
706                         goto End;
707         }
708
709         if (dev->type) {
710                 if (dev->type->pm) {
711                         pm_dev_dbg(dev, state, "type ");
712                         error = pm_op(dev, dev->type->pm, state);
713                 }
714                 if (error)
715                         goto End;
716         }
717
718         if (dev->bus) {
719                 if (dev->bus->pm) {
720                         pm_dev_dbg(dev, state, "");
721                         error = pm_op(dev, dev->bus->pm, state);
722                 } else if (dev->bus->suspend) {
723                         pm_dev_dbg(dev, state, "legacy ");
724                         error = dev->bus->suspend(dev, state);
725                         suspend_report_result(dev->bus->suspend, error);
726                 }
727         }
728  End:
729         up(&dev->sem);
730
731         return error;
732 }
733
734 /**
735  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
736  * @state: PM transition of the system being carried out.
737  */
738 static int dpm_suspend(pm_message_t state)
739 {
740         struct list_head list;
741         int error = 0;
742
743         INIT_LIST_HEAD(&list);
744         mutex_lock(&dpm_list_mtx);
745         while (!list_empty(&dpm_list)) {
746                 struct device *dev = to_device(dpm_list.prev);
747
748                 get_device(dev);
749                 mutex_unlock(&dpm_list_mtx);
750
751                 dpm_drv_wdset(dev);
752                 error = device_suspend(dev, state);
753                 dpm_drv_wdclr(dev);
754
755                 mutex_lock(&dpm_list_mtx);
756                 if (error) {
757                         pm_dev_err(dev, state, "", error);
758                         put_device(dev);
759                         break;
760                 }
761                 dev->power.status = DPM_OFF;
762                 if (!list_empty(&dev->power.entry))
763                         list_move(&dev->power.entry, &list);
764                 put_device(dev);
765         }
766         list_splice(&list, dpm_list.prev);
767         mutex_unlock(&dpm_list_mtx);
768         return error;
769 }
770
771 /**
772  * device_prepare - Prepare a device for system power transition.
773  * @dev: Device to handle.
774  * @state: PM transition of the system being carried out.
775  *
776  * Execute the ->prepare() callback(s) for given device.  No new children of the
777  * device may be registered after this function has returned.
778  */
779 static int device_prepare(struct device *dev, pm_message_t state)
780 {
781         int error = 0;
782
783         down(&dev->sem);
784
785         if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
786                 pm_dev_dbg(dev, state, "preparing ");
787                 error = dev->bus->pm->prepare(dev);
788                 suspend_report_result(dev->bus->pm->prepare, error);
789                 if (error)
790                         goto End;
791         }
792
793         if (dev->type && dev->type->pm && dev->type->pm->prepare) {
794                 pm_dev_dbg(dev, state, "preparing type ");
795                 error = dev->type->pm->prepare(dev);
796                 suspend_report_result(dev->type->pm->prepare, error);
797                 if (error)
798                         goto End;
799         }
800
801         if (dev->class && dev->class->pm && dev->class->pm->prepare) {
802                 pm_dev_dbg(dev, state, "preparing class ");
803                 error = dev->class->pm->prepare(dev);
804                 suspend_report_result(dev->class->pm->prepare, error);
805         }
806  End:
807         up(&dev->sem);
808
809         return error;
810 }
811
812 /**
813  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
814  * @state: PM transition of the system being carried out.
815  *
816  * Execute the ->prepare() callback(s) for all devices.
817  */
818 static int dpm_prepare(pm_message_t state)
819 {
820         struct list_head list;
821         int error = 0;
822
823         INIT_LIST_HEAD(&list);
824         mutex_lock(&dpm_list_mtx);
825         transition_started = true;
826         while (!list_empty(&dpm_list)) {
827                 struct device *dev = to_device(dpm_list.next);
828
829                 get_device(dev);
830                 dev->power.status = DPM_PREPARING;
831                 mutex_unlock(&dpm_list_mtx);
832
833                 pm_runtime_get_noresume(dev);
834                 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
835                         /* Wake-up requested during system sleep transition. */
836                         pm_runtime_put_noidle(dev);
837                         error = -EBUSY;
838                 } else {
839                         error = device_prepare(dev, state);
840                 }
841
842                 mutex_lock(&dpm_list_mtx);
843                 if (error) {
844                         dev->power.status = DPM_ON;
845                         if (error == -EAGAIN) {
846                                 put_device(dev);
847                                 error = 0;
848                                 continue;
849                         }
850                         printk(KERN_ERR "PM: Failed to prepare device %s "
851                                 "for power transition: error %d\n",
852                                 kobject_name(&dev->kobj), error);
853                         put_device(dev);
854                         break;
855                 }
856                 dev->power.status = DPM_SUSPENDING;
857                 if (!list_empty(&dev->power.entry))
858                         list_move_tail(&dev->power.entry, &list);
859                 put_device(dev);
860         }
861         list_splice(&list, &dpm_list);
862         mutex_unlock(&dpm_list_mtx);
863         return error;
864 }
865
866 /**
867  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
868  * @state: PM transition of the system being carried out.
869  *
870  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
871  * callbacks for them.
872  */
873 int dpm_suspend_start(pm_message_t state)
874 {
875         int error;
876
877         might_sleep();
878         error = dpm_prepare(state);
879         if (!error)
880                 error = dpm_suspend(state);
881         return error;
882 }
883 EXPORT_SYMBOL_GPL(dpm_suspend_start);
884
885 void __suspend_report_result(const char *function, void *fn, int ret)
886 {
887         if (ret)
888                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
889 }
890 EXPORT_SYMBOL_GPL(__suspend_report_result);