mmc: rk_sdmmc: fix compile warning
[firefly-linux-kernel-4.4.55.git] / kernel / power / suspend.c
1 /*
2  * kernel/power/suspend.c - Suspend to RAM and standby functionality.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
7  *
8  * This file is released under the GPLv2.
9  */
10
11 #include <linux/string.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/console.h>
16 #include <linux/cpu.h>
17 #include <linux/syscalls.h>
18 #include <linux/gfp.h>
19 #include <linux/io.h>
20 #include <linux/kernel.h>
21 #include <linux/list.h>
22 #include <linux/mm.h>
23 #include <linux/slab.h>
24 #include <linux/export.h>
25 #include <linux/suspend.h>
26 #include <linux/syscore_ops.h>
27 #include <linux/ftrace.h>
28 #include <linux/rtc.h>
29 #include <trace/events/power.h>
30 #include <linux/wakeup_reason.h>
31
32 #include "power.h"
33
34 struct pm_sleep_state pm_states[PM_SUSPEND_MAX] = {
35         [PM_SUSPEND_FREEZE] = { .label = "freeze", .state = PM_SUSPEND_FREEZE },
36         [PM_SUSPEND_STANDBY] = { .label = "standby", },
37         [PM_SUSPEND_MEM] = { .label = "mem", },
38 };
39
40 static const struct platform_suspend_ops *suspend_ops;
41
42 static bool need_suspend_ops(suspend_state_t state)
43 {
44         return !!(state > PM_SUSPEND_FREEZE);
45 }
46
47 static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head);
48 static bool suspend_freeze_wake;
49
50 static void freeze_begin(void)
51 {
52         suspend_freeze_wake = false;
53 }
54
55 static void freeze_enter(void)
56 {
57         wait_event(suspend_freeze_wait_head, suspend_freeze_wake);
58 }
59
60 void freeze_wake(void)
61 {
62         suspend_freeze_wake = true;
63         wake_up(&suspend_freeze_wait_head);
64 }
65 EXPORT_SYMBOL_GPL(freeze_wake);
66
67 static bool valid_state(suspend_state_t state)
68 {
69         /*
70          * PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states need low level
71          * support and need to be valid to the low level
72          * implementation, no valid callback implies that none are valid.
73          */
74         return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
75 }
76
77 /**
78  * suspend_set_ops - Set the global suspend method table.
79  * @ops: Suspend operations to use.
80  */
81 void suspend_set_ops(const struct platform_suspend_ops *ops)
82 {
83         suspend_state_t i;
84
85         lock_system_sleep();
86
87         suspend_ops = ops;
88         for (i = PM_SUSPEND_STANDBY; i <= PM_SUSPEND_MEM; i++)
89                 pm_states[i].state = valid_state(i) ? i : 0;
90
91         unlock_system_sleep();
92 }
93 EXPORT_SYMBOL_GPL(suspend_set_ops);
94
95 /**
96  * suspend_valid_only_mem - Generic memory-only valid callback.
97  *
98  * Platform drivers that implement mem suspend only and only need to check for
99  * that in their .valid() callback can use this instead of rolling their own
100  * .valid() callback.
101  */
102 int suspend_valid_only_mem(suspend_state_t state)
103 {
104         return state == PM_SUSPEND_MEM;
105 }
106 EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
107
108 static int suspend_test(int level)
109 {
110 #ifdef CONFIG_PM_DEBUG
111         if (pm_test_level == level) {
112                 printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
113                 mdelay(5000);
114                 return 1;
115         }
116 #endif /* !CONFIG_PM_DEBUG */
117         return 0;
118 }
119
120 /**
121  * suspend_prepare - Prepare for entering system sleep state.
122  *
123  * Common code run for every system sleep state that can be entered (except for
124  * hibernation).  Run suspend notifiers, allocate the "suspend" console and
125  * freeze processes.
126  */
127 static int suspend_prepare(suspend_state_t state)
128 {
129         int error;
130
131         if (need_suspend_ops(state) && (!suspend_ops || !suspend_ops->enter))
132                 return -EPERM;
133
134         pm_prepare_console();
135
136         error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
137         if (error)
138                 goto Finish;
139
140         error = suspend_freeze_processes();
141         if (!error)
142                 return 0;
143         log_suspend_abort_reason("One or more tasks refusing to freeze");
144         suspend_stats.failed_freeze++;
145         dpm_save_failed_step(SUSPEND_FREEZE);
146  Finish:
147         pm_notifier_call_chain(PM_POST_SUSPEND);
148         pm_restore_console();
149         return error;
150 }
151
152 /* default implementation */
153 void __attribute__ ((weak)) arch_suspend_disable_irqs(void)
154 {
155         local_irq_disable();
156 }
157
158 /* default implementation */
159 void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
160 {
161         local_irq_enable();
162 }
163
164 /**
165  * suspend_enter - Make the system enter the given sleep state.
166  * @state: System sleep state to enter.
167  * @wakeup: Returns information that the sleep state should not be re-entered.
168  *
169  * This function should be called after devices have been suspended.
170  */
171 static int suspend_enter(suspend_state_t state, bool *wakeup)
172 {
173         char suspend_abort[MAX_SUSPEND_ABORT_LEN];
174         int error, last_dev;
175
176         if (need_suspend_ops(state) && suspend_ops->prepare) {
177                 error = suspend_ops->prepare();
178                 if (error)
179                         goto Platform_finish;
180         }
181
182         error = dpm_suspend_end(PMSG_SUSPEND);
183         if (error) {
184                 last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
185                 last_dev %= REC_FAILED_NUM;
186                 printk(KERN_ERR "PM: Some devices failed to power down\n");
187                 log_suspend_abort_reason("%s device failed to power down",
188                         suspend_stats.failed_devs[last_dev]);
189                 goto Platform_finish;
190         }
191
192         if (need_suspend_ops(state) && suspend_ops->prepare_late) {
193                 error = suspend_ops->prepare_late();
194                 if (error)
195                         goto Platform_wake;
196         }
197
198         if (suspend_test(TEST_PLATFORM))
199                 goto Platform_wake;
200
201         /*
202          * PM_SUSPEND_FREEZE equals
203          * frozen processes + suspended devices + idle processors.
204          * Thus we should invoke freeze_enter() soon after
205          * all the devices are suspended.
206          */
207         if (state == PM_SUSPEND_FREEZE) {
208                 freeze_enter();
209                 goto Platform_wake;
210         }
211
212         error = disable_nonboot_cpus();
213         if (error || suspend_test(TEST_CPUS)) {
214                 log_suspend_abort_reason("Disabling non-boot cpus failed");
215                 goto Enable_cpus;
216         }
217
218         arch_suspend_disable_irqs();
219         BUG_ON(!irqs_disabled());
220
221         error = syscore_suspend();
222         if (!error) {
223                 *wakeup = pm_wakeup_pending();
224                 if (!(suspend_test(TEST_CORE) || *wakeup)) {
225                         error = suspend_ops->enter(state);
226                         events_check_enabled = false;
227                 } else {
228                         pm_get_active_wakeup_sources(suspend_abort,
229                                 MAX_SUSPEND_ABORT_LEN);
230                         log_suspend_abort_reason(suspend_abort);
231                 }
232                 syscore_resume();
233         }
234
235         arch_suspend_enable_irqs();
236         BUG_ON(irqs_disabled());
237
238  Enable_cpus:
239         enable_nonboot_cpus();
240
241  Platform_wake:
242         if (need_suspend_ops(state) && suspend_ops->wake)
243                 suspend_ops->wake();
244
245         dpm_resume_start(PMSG_RESUME);
246
247  Platform_finish:
248         if (need_suspend_ops(state) && suspend_ops->finish)
249                 suspend_ops->finish();
250
251         return error;
252 }
253
254 /**
255  * suspend_devices_and_enter - Suspend devices and enter system sleep state.
256  * @state: System sleep state to enter.
257  */
258 int suspend_devices_and_enter(suspend_state_t state)
259 {
260         int error;
261         bool wakeup = false;
262
263         if (need_suspend_ops(state) && !suspend_ops)
264                 return -ENOSYS;
265
266         trace_machine_suspend(state);
267         if (need_suspend_ops(state) && suspend_ops->begin) {
268                 error = suspend_ops->begin(state);
269                 if (error)
270                         goto Close;
271         }
272         suspend_console();
273         ftrace_stop();
274         suspend_test_start();
275         error = dpm_suspend_start(PMSG_SUSPEND);
276         if (error) {
277                 printk(KERN_ERR "PM: Some devices failed to suspend\n");
278                 log_suspend_abort_reason("Some devices failed to suspend");
279                 goto Recover_platform;
280         }
281         suspend_test_finish("suspend devices");
282         if (suspend_test(TEST_DEVICES))
283                 goto Recover_platform;
284
285         do {
286                 error = suspend_enter(state, &wakeup);
287         } while (!error && !wakeup && need_suspend_ops(state)
288                 && suspend_ops->suspend_again && suspend_ops->suspend_again());
289
290  Resume_devices:
291         suspend_test_start();
292         dpm_resume_end(PMSG_RESUME);
293         suspend_test_finish("resume devices");
294         ftrace_start();
295         resume_console();
296  Close:
297         if (need_suspend_ops(state) && suspend_ops->end)
298                 suspend_ops->end();
299         trace_machine_suspend(PWR_EVENT_EXIT);
300         return error;
301
302  Recover_platform:
303         if (need_suspend_ops(state) && suspend_ops->recover)
304                 suspend_ops->recover();
305         goto Resume_devices;
306 }
307
308 /**
309  * suspend_finish - Clean up before finishing the suspend sequence.
310  *
311  * Call platform code to clean up, restart processes, and free the console that
312  * we've allocated. This routine is not called for hibernation.
313  */
314 static void suspend_finish(void)
315 {
316         suspend_thaw_processes();
317         pm_notifier_call_chain(PM_POST_SUSPEND);
318         pm_restore_console();
319 }
320
321 /**
322  * enter_state - Do common work needed to enter system sleep state.
323  * @state: System sleep state to enter.
324  *
325  * Make sure that no one else is trying to put the system into a sleep state.
326  * Fail if that's not the case.  Otherwise, prepare for system suspend, make the
327  * system enter the given sleep state and clean up after wakeup.
328  */
329 static int enter_state(suspend_state_t state)
330 {
331         int error;
332
333         if (state == PM_SUSPEND_FREEZE) {
334 #ifdef CONFIG_PM_DEBUG
335                 if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) {
336                         pr_warning("PM: Unsupported test mode for freeze state,"
337                                    "please choose none/freezer/devices/platform.\n");
338                         return -EAGAIN;
339                 }
340 #endif
341         } else if (!valid_state(state)) {
342                 return -EINVAL;
343         }
344         if (!mutex_trylock(&pm_mutex))
345                 return -EBUSY;
346
347         if (state == PM_SUSPEND_FREEZE)
348                 freeze_begin();
349
350         printk(KERN_INFO "PM: Syncing filesystems ... ");
351         sys_sync();
352         printk("done.\n");
353
354         pr_debug("PM: Preparing system for %s sleep\n", pm_states[state].label);
355         error = suspend_prepare(state);
356         if (error)
357                 goto Unlock;
358
359         if (suspend_test(TEST_FREEZER))
360                 goto Finish;
361
362         pr_debug("PM: Entering %s sleep\n", pm_states[state].label);
363         pm_restrict_gfp_mask();
364         error = suspend_devices_and_enter(state);
365         pm_restore_gfp_mask();
366
367  Finish:
368         pr_debug("PM: Finishing wakeup.\n");
369         suspend_finish();
370  Unlock:
371         mutex_unlock(&pm_mutex);
372         return error;
373 }
374
375 static void pm_suspend_marker(char *annotation)
376 {
377         struct timespec ts;
378         struct rtc_time tm;
379
380         getnstimeofday(&ts);
381         rtc_time_to_tm(ts.tv_sec, &tm);
382         pr_info("PM: suspend %s %d-%02d-%02d %02d:%02d:%02d.%09lu UTC\n",
383                 annotation, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
384                 tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
385 }
386
387 /**
388  * pm_suspend - Externally visible function for suspending the system.
389  * @state: System sleep state to enter.
390  *
391  * Check if the value of @state represents one of the supported states,
392  * execute enter_state() and update system suspend statistics.
393  */
394 int pm_suspend(suspend_state_t state)
395 {
396         int error;
397
398         if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
399                 return -EINVAL;
400
401         pm_suspend_marker("entry");
402         error = enter_state(state);
403         if (error) {
404                 suspend_stats.fail++;
405                 dpm_save_failed_errno(error);
406         } else {
407                 suspend_stats.success++;
408         }
409         pm_suspend_marker("exit");
410         return error;
411 }
412 EXPORT_SYMBOL(pm_suspend);