Merge branch 'for-3.5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[firefly-linux-kernel-4.4.55.git] / drivers / base / power / domain.c
1 /*
2  * drivers/base/power/domain.c - Common code related to device power domains.
3  *
4  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5  *
6  * This file is released under the GPLv2.
7  */
8
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_domain.h>
14 #include <linux/pm_qos.h>
15 #include <linux/slab.h>
16 #include <linux/err.h>
17 #include <linux/sched.h>
18 #include <linux/suspend.h>
19 #include <linux/export.h>
20
21 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)          \
22 ({                                                              \
23         type (*__routine)(struct device *__d);                  \
24         type __ret = (type)0;                                   \
25                                                                 \
26         __routine = genpd->dev_ops.callback;                    \
27         if (__routine) {                                        \
28                 __ret = __routine(dev);                         \
29         } else {                                                \
30                 __routine = dev_gpd_data(dev)->ops.callback;    \
31                 if (__routine)                                  \
32                         __ret = __routine(dev);                 \
33         }                                                       \
34         __ret;                                                  \
35 })
36
37 #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name)       \
38 ({                                                                              \
39         ktime_t __start = ktime_get();                                          \
40         type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev);         \
41         s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start));           \
42         struct gpd_timing_data *__td = &dev_gpd_data(dev)->td;                  \
43         if (!__retval && __elapsed > __td->field) {                             \
44                 __td->field = __elapsed;                                        \
45                 dev_warn(dev, name " latency exceeded, new value %lld ns\n",    \
46                         __elapsed);                                             \
47                 genpd->max_off_time_changed = true;                             \
48                 __td->constraint_changed = true;                                \
49         }                                                                       \
50         __retval;                                                               \
51 })
52
53 static LIST_HEAD(gpd_list);
54 static DEFINE_MUTEX(gpd_list_lock);
55
56 #ifdef CONFIG_PM
57
58 struct generic_pm_domain *dev_to_genpd(struct device *dev)
59 {
60         if (IS_ERR_OR_NULL(dev->pm_domain))
61                 return ERR_PTR(-EINVAL);
62
63         return pd_to_genpd(dev->pm_domain);
64 }
65
66 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
67 {
68         return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
69                                         stop_latency_ns, "stop");
70 }
71
72 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
73 {
74         return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
75                                         start_latency_ns, "start");
76 }
77
78 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
79 {
80         return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
81                                         save_state_latency_ns, "state save");
82 }
83
84 static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
85 {
86         return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
87                                         restore_state_latency_ns,
88                                         "state restore");
89 }
90
91 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
92 {
93         bool ret = false;
94
95         if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
96                 ret = !!atomic_dec_and_test(&genpd->sd_count);
97
98         return ret;
99 }
100
101 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
102 {
103         atomic_inc(&genpd->sd_count);
104         smp_mb__after_atomic_inc();
105 }
106
107 static void genpd_acquire_lock(struct generic_pm_domain *genpd)
108 {
109         DEFINE_WAIT(wait);
110
111         mutex_lock(&genpd->lock);
112         /*
113          * Wait for the domain to transition into either the active,
114          * or the power off state.
115          */
116         for (;;) {
117                 prepare_to_wait(&genpd->status_wait_queue, &wait,
118                                 TASK_UNINTERRUPTIBLE);
119                 if (genpd->status == GPD_STATE_ACTIVE
120                     || genpd->status == GPD_STATE_POWER_OFF)
121                         break;
122                 mutex_unlock(&genpd->lock);
123
124                 schedule();
125
126                 mutex_lock(&genpd->lock);
127         }
128         finish_wait(&genpd->status_wait_queue, &wait);
129 }
130
131 static void genpd_release_lock(struct generic_pm_domain *genpd)
132 {
133         mutex_unlock(&genpd->lock);
134 }
135
136 static void genpd_set_active(struct generic_pm_domain *genpd)
137 {
138         if (genpd->resume_count == 0)
139                 genpd->status = GPD_STATE_ACTIVE;
140 }
141
142 /**
143  * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
144  * @genpd: PM domain to power up.
145  *
146  * Restore power to @genpd and all of its masters so that it is possible to
147  * resume a device belonging to it.
148  */
149 int __pm_genpd_poweron(struct generic_pm_domain *genpd)
150         __releases(&genpd->lock) __acquires(&genpd->lock)
151 {
152         struct gpd_link *link;
153         DEFINE_WAIT(wait);
154         int ret = 0;
155
156         /* If the domain's master is being waited for, we have to wait too. */
157         for (;;) {
158                 prepare_to_wait(&genpd->status_wait_queue, &wait,
159                                 TASK_UNINTERRUPTIBLE);
160                 if (genpd->status != GPD_STATE_WAIT_MASTER)
161                         break;
162                 mutex_unlock(&genpd->lock);
163
164                 schedule();
165
166                 mutex_lock(&genpd->lock);
167         }
168         finish_wait(&genpd->status_wait_queue, &wait);
169
170         if (genpd->status == GPD_STATE_ACTIVE
171             || (genpd->prepared_count > 0 && genpd->suspend_power_off))
172                 return 0;
173
174         if (genpd->status != GPD_STATE_POWER_OFF) {
175                 genpd_set_active(genpd);
176                 return 0;
177         }
178
179         /*
180          * The list is guaranteed not to change while the loop below is being
181          * executed, unless one of the masters' .power_on() callbacks fiddles
182          * with it.
183          */
184         list_for_each_entry(link, &genpd->slave_links, slave_node) {
185                 genpd_sd_counter_inc(link->master);
186                 genpd->status = GPD_STATE_WAIT_MASTER;
187
188                 mutex_unlock(&genpd->lock);
189
190                 ret = pm_genpd_poweron(link->master);
191
192                 mutex_lock(&genpd->lock);
193
194                 /*
195                  * The "wait for parent" status is guaranteed not to change
196                  * while the master is powering on.
197                  */
198                 genpd->status = GPD_STATE_POWER_OFF;
199                 wake_up_all(&genpd->status_wait_queue);
200                 if (ret) {
201                         genpd_sd_counter_dec(link->master);
202                         goto err;
203                 }
204         }
205
206         if (genpd->power_on) {
207                 ktime_t time_start = ktime_get();
208                 s64 elapsed_ns;
209
210                 ret = genpd->power_on(genpd);
211                 if (ret)
212                         goto err;
213
214                 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
215                 if (elapsed_ns > genpd->power_on_latency_ns) {
216                         genpd->power_on_latency_ns = elapsed_ns;
217                         genpd->max_off_time_changed = true;
218                         if (genpd->name)
219                                 pr_warning("%s: Power-on latency exceeded, "
220                                         "new value %lld ns\n", genpd->name,
221                                         elapsed_ns);
222                 }
223         }
224
225         genpd_set_active(genpd);
226
227         return 0;
228
229  err:
230         list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
231                 genpd_sd_counter_dec(link->master);
232
233         return ret;
234 }
235
236 /**
237  * pm_genpd_poweron - Restore power to a given PM domain and its masters.
238  * @genpd: PM domain to power up.
239  */
240 int pm_genpd_poweron(struct generic_pm_domain *genpd)
241 {
242         int ret;
243
244         mutex_lock(&genpd->lock);
245         ret = __pm_genpd_poweron(genpd);
246         mutex_unlock(&genpd->lock);
247         return ret;
248 }
249
250 #endif /* CONFIG_PM */
251
252 #ifdef CONFIG_PM_RUNTIME
253
254 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
255                                      unsigned long val, void *ptr)
256 {
257         struct generic_pm_domain_data *gpd_data;
258         struct device *dev;
259
260         gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
261
262         mutex_lock(&gpd_data->lock);
263         dev = gpd_data->base.dev;
264         if (!dev) {
265                 mutex_unlock(&gpd_data->lock);
266                 return NOTIFY_DONE;
267         }
268         mutex_unlock(&gpd_data->lock);
269
270         for (;;) {
271                 struct generic_pm_domain *genpd;
272                 struct pm_domain_data *pdd;
273
274                 spin_lock_irq(&dev->power.lock);
275
276                 pdd = dev->power.subsys_data ?
277                                 dev->power.subsys_data->domain_data : NULL;
278                 if (pdd) {
279                         to_gpd_data(pdd)->td.constraint_changed = true;
280                         genpd = dev_to_genpd(dev);
281                 } else {
282                         genpd = ERR_PTR(-ENODATA);
283                 }
284
285                 spin_unlock_irq(&dev->power.lock);
286
287                 if (!IS_ERR(genpd)) {
288                         mutex_lock(&genpd->lock);
289                         genpd->max_off_time_changed = true;
290                         mutex_unlock(&genpd->lock);
291                 }
292
293                 dev = dev->parent;
294                 if (!dev || dev->power.ignore_children)
295                         break;
296         }
297
298         return NOTIFY_DONE;
299 }
300
301 /**
302  * __pm_genpd_save_device - Save the pre-suspend state of a device.
303  * @pdd: Domain data of the device to save the state of.
304  * @genpd: PM domain the device belongs to.
305  */
306 static int __pm_genpd_save_device(struct pm_domain_data *pdd,
307                                   struct generic_pm_domain *genpd)
308         __releases(&genpd->lock) __acquires(&genpd->lock)
309 {
310         struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
311         struct device *dev = pdd->dev;
312         int ret = 0;
313
314         if (gpd_data->need_restore)
315                 return 0;
316
317         mutex_unlock(&genpd->lock);
318
319         genpd_start_dev(genpd, dev);
320         ret = genpd_save_dev(genpd, dev);
321         genpd_stop_dev(genpd, dev);
322
323         mutex_lock(&genpd->lock);
324
325         if (!ret)
326                 gpd_data->need_restore = true;
327
328         return ret;
329 }
330
331 /**
332  * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
333  * @pdd: Domain data of the device to restore the state of.
334  * @genpd: PM domain the device belongs to.
335  */
336 static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
337                                       struct generic_pm_domain *genpd)
338         __releases(&genpd->lock) __acquires(&genpd->lock)
339 {
340         struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
341         struct device *dev = pdd->dev;
342
343         if (!gpd_data->need_restore)
344                 return;
345
346         mutex_unlock(&genpd->lock);
347
348         genpd_start_dev(genpd, dev);
349         genpd_restore_dev(genpd, dev);
350         genpd_stop_dev(genpd, dev);
351
352         mutex_lock(&genpd->lock);
353
354         gpd_data->need_restore = false;
355 }
356
357 /**
358  * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
359  * @genpd: PM domain to check.
360  *
361  * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
362  * a "power off" operation, which means that a "power on" has occured in the
363  * meantime, or if its resume_count field is different from zero, which means
364  * that one of its devices has been resumed in the meantime.
365  */
366 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
367 {
368         return genpd->status == GPD_STATE_WAIT_MASTER
369                 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
370 }
371
372 /**
373  * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
374  * @genpd: PM domait to power off.
375  *
376  * Queue up the execution of pm_genpd_poweroff() unless it's already been done
377  * before.
378  */
379 void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
380 {
381         if (!work_pending(&genpd->power_off_work))
382                 queue_work(pm_wq, &genpd->power_off_work);
383 }
384
385 /**
386  * pm_genpd_poweroff - Remove power from a given PM domain.
387  * @genpd: PM domain to power down.
388  *
389  * If all of the @genpd's devices have been suspended and all of its subdomains
390  * have been powered down, run the runtime suspend callbacks provided by all of
391  * the @genpd's devices' drivers and remove power from @genpd.
392  */
393 static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
394         __releases(&genpd->lock) __acquires(&genpd->lock)
395 {
396         struct pm_domain_data *pdd;
397         struct gpd_link *link;
398         unsigned int not_suspended;
399         int ret = 0;
400
401  start:
402         /*
403          * Do not try to power off the domain in the following situations:
404          * (1) The domain is already in the "power off" state.
405          * (2) The domain is waiting for its master to power up.
406          * (3) One of the domain's devices is being resumed right now.
407          * (4) System suspend is in progress.
408          */
409         if (genpd->status == GPD_STATE_POWER_OFF
410             || genpd->status == GPD_STATE_WAIT_MASTER
411             || genpd->resume_count > 0 || genpd->prepared_count > 0)
412                 return 0;
413
414         if (atomic_read(&genpd->sd_count) > 0)
415                 return -EBUSY;
416
417         not_suspended = 0;
418         list_for_each_entry(pdd, &genpd->dev_list, list_node)
419                 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
420                     || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on))
421                         not_suspended++;
422
423         if (not_suspended > genpd->in_progress)
424                 return -EBUSY;
425
426         if (genpd->poweroff_task) {
427                 /*
428                  * Another instance of pm_genpd_poweroff() is executing
429                  * callbacks, so tell it to start over and return.
430                  */
431                 genpd->status = GPD_STATE_REPEAT;
432                 return 0;
433         }
434
435         if (genpd->gov && genpd->gov->power_down_ok) {
436                 if (!genpd->gov->power_down_ok(&genpd->domain))
437                         return -EAGAIN;
438         }
439
440         genpd->status = GPD_STATE_BUSY;
441         genpd->poweroff_task = current;
442
443         list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
444                 ret = atomic_read(&genpd->sd_count) == 0 ?
445                         __pm_genpd_save_device(pdd, genpd) : -EBUSY;
446
447                 if (genpd_abort_poweroff(genpd))
448                         goto out;
449
450                 if (ret) {
451                         genpd_set_active(genpd);
452                         goto out;
453                 }
454
455                 if (genpd->status == GPD_STATE_REPEAT) {
456                         genpd->poweroff_task = NULL;
457                         goto start;
458                 }
459         }
460
461         if (genpd->power_off) {
462                 ktime_t time_start;
463                 s64 elapsed_ns;
464
465                 if (atomic_read(&genpd->sd_count) > 0) {
466                         ret = -EBUSY;
467                         goto out;
468                 }
469
470                 time_start = ktime_get();
471
472                 /*
473                  * If sd_count > 0 at this point, one of the subdomains hasn't
474                  * managed to call pm_genpd_poweron() for the master yet after
475                  * incrementing it.  In that case pm_genpd_poweron() will wait
476                  * for us to drop the lock, so we can call .power_off() and let
477                  * the pm_genpd_poweron() restore power for us (this shouldn't
478                  * happen very often).
479                  */
480                 ret = genpd->power_off(genpd);
481                 if (ret == -EBUSY) {
482                         genpd_set_active(genpd);
483                         goto out;
484                 }
485
486                 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
487                 if (elapsed_ns > genpd->power_off_latency_ns) {
488                         genpd->power_off_latency_ns = elapsed_ns;
489                         genpd->max_off_time_changed = true;
490                         if (genpd->name)
491                                 pr_warning("%s: Power-off latency exceeded, "
492                                         "new value %lld ns\n", genpd->name,
493                                         elapsed_ns);
494                 }
495         }
496
497         genpd->status = GPD_STATE_POWER_OFF;
498
499         list_for_each_entry(link, &genpd->slave_links, slave_node) {
500                 genpd_sd_counter_dec(link->master);
501                 genpd_queue_power_off_work(link->master);
502         }
503
504  out:
505         genpd->poweroff_task = NULL;
506         wake_up_all(&genpd->status_wait_queue);
507         return ret;
508 }
509
510 /**
511  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
512  * @work: Work structure used for scheduling the execution of this function.
513  */
514 static void genpd_power_off_work_fn(struct work_struct *work)
515 {
516         struct generic_pm_domain *genpd;
517
518         genpd = container_of(work, struct generic_pm_domain, power_off_work);
519
520         genpd_acquire_lock(genpd);
521         pm_genpd_poweroff(genpd);
522         genpd_release_lock(genpd);
523 }
524
525 /**
526  * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
527  * @dev: Device to suspend.
528  *
529  * Carry out a runtime suspend of a device under the assumption that its
530  * pm_domain field points to the domain member of an object of type
531  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
532  */
533 static int pm_genpd_runtime_suspend(struct device *dev)
534 {
535         struct generic_pm_domain *genpd;
536         bool (*stop_ok)(struct device *__dev);
537         int ret;
538
539         dev_dbg(dev, "%s()\n", __func__);
540
541         genpd = dev_to_genpd(dev);
542         if (IS_ERR(genpd))
543                 return -EINVAL;
544
545         might_sleep_if(!genpd->dev_irq_safe);
546
547         if (dev_gpd_data(dev)->always_on)
548                 return -EBUSY;
549
550         stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
551         if (stop_ok && !stop_ok(dev))
552                 return -EBUSY;
553
554         ret = genpd_stop_dev(genpd, dev);
555         if (ret)
556                 return ret;
557
558         /*
559          * If power.irq_safe is set, this routine will be run with interrupts
560          * off, so it can't use mutexes.
561          */
562         if (dev->power.irq_safe)
563                 return 0;
564
565         mutex_lock(&genpd->lock);
566         genpd->in_progress++;
567         pm_genpd_poweroff(genpd);
568         genpd->in_progress--;
569         mutex_unlock(&genpd->lock);
570
571         return 0;
572 }
573
574 /**
575  * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
576  * @dev: Device to resume.
577  *
578  * Carry out a runtime resume of a device under the assumption that its
579  * pm_domain field points to the domain member of an object of type
580  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
581  */
582 static int pm_genpd_runtime_resume(struct device *dev)
583 {
584         struct generic_pm_domain *genpd;
585         DEFINE_WAIT(wait);
586         int ret;
587
588         dev_dbg(dev, "%s()\n", __func__);
589
590         genpd = dev_to_genpd(dev);
591         if (IS_ERR(genpd))
592                 return -EINVAL;
593
594         might_sleep_if(!genpd->dev_irq_safe);
595
596         /* If power.irq_safe, the PM domain is never powered off. */
597         if (dev->power.irq_safe)
598                 goto out;
599
600         mutex_lock(&genpd->lock);
601         ret = __pm_genpd_poweron(genpd);
602         if (ret) {
603                 mutex_unlock(&genpd->lock);
604                 return ret;
605         }
606         genpd->status = GPD_STATE_BUSY;
607         genpd->resume_count++;
608         for (;;) {
609                 prepare_to_wait(&genpd->status_wait_queue, &wait,
610                                 TASK_UNINTERRUPTIBLE);
611                 /*
612                  * If current is the powering off task, we have been called
613                  * reentrantly from one of the device callbacks, so we should
614                  * not wait.
615                  */
616                 if (!genpd->poweroff_task || genpd->poweroff_task == current)
617                         break;
618                 mutex_unlock(&genpd->lock);
619
620                 schedule();
621
622                 mutex_lock(&genpd->lock);
623         }
624         finish_wait(&genpd->status_wait_queue, &wait);
625         __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
626         genpd->resume_count--;
627         genpd_set_active(genpd);
628         wake_up_all(&genpd->status_wait_queue);
629         mutex_unlock(&genpd->lock);
630
631  out:
632         genpd_start_dev(genpd, dev);
633
634         return 0;
635 }
636
637 /**
638  * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
639  */
640 void pm_genpd_poweroff_unused(void)
641 {
642         struct generic_pm_domain *genpd;
643
644         mutex_lock(&gpd_list_lock);
645
646         list_for_each_entry(genpd, &gpd_list, gpd_list_node)
647                 genpd_queue_power_off_work(genpd);
648
649         mutex_unlock(&gpd_list_lock);
650 }
651
652 #else
653
654 static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
655                                             unsigned long val, void *ptr)
656 {
657         return NOTIFY_DONE;
658 }
659
660 static inline void genpd_power_off_work_fn(struct work_struct *work) {}
661
662 #define pm_genpd_runtime_suspend        NULL
663 #define pm_genpd_runtime_resume         NULL
664
665 #endif /* CONFIG_PM_RUNTIME */
666
667 #ifdef CONFIG_PM_SLEEP
668
669 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
670                                     struct device *dev)
671 {
672         return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
673 }
674
675 static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
676 {
677         return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
678 }
679
680 static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
681 {
682         return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
683 }
684
685 static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
686 {
687         return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
688 }
689
690 static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
691 {
692         return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
693 }
694
695 static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
696 {
697         return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
698 }
699
700 static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
701 {
702         return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
703 }
704
705 static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
706 {
707         return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
708 }
709
710 static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
711 {
712         return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
713 }
714
715 /**
716  * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
717  * @genpd: PM domain to power off, if possible.
718  *
719  * Check if the given PM domain can be powered off (during system suspend or
720  * hibernation) and do that if so.  Also, in that case propagate to its masters.
721  *
722  * This function is only called in "noirq" stages of system power transitions,
723  * so it need not acquire locks (all of the "noirq" callbacks are executed
724  * sequentially, so it is guaranteed that it will never run twice in parallel).
725  */
726 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
727 {
728         struct gpd_link *link;
729
730         if (genpd->status == GPD_STATE_POWER_OFF)
731                 return;
732
733         if (genpd->suspended_count != genpd->device_count
734             || atomic_read(&genpd->sd_count) > 0)
735                 return;
736
737         if (genpd->power_off)
738                 genpd->power_off(genpd);
739
740         genpd->status = GPD_STATE_POWER_OFF;
741
742         list_for_each_entry(link, &genpd->slave_links, slave_node) {
743                 genpd_sd_counter_dec(link->master);
744                 pm_genpd_sync_poweroff(link->master);
745         }
746 }
747
748 /**
749  * resume_needed - Check whether to resume a device before system suspend.
750  * @dev: Device to check.
751  * @genpd: PM domain the device belongs to.
752  *
753  * There are two cases in which a device that can wake up the system from sleep
754  * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
755  * to wake up the system and it has to remain active for this purpose while the
756  * system is in the sleep state and (2) if the device is not enabled to wake up
757  * the system from sleep states and it generally doesn't generate wakeup signals
758  * by itself (those signals are generated on its behalf by other parts of the
759  * system).  In the latter case it may be necessary to reconfigure the device's
760  * wakeup settings during system suspend, because it may have been set up to
761  * signal remote wakeup from the system's working state as needed by runtime PM.
762  * Return 'true' in either of the above cases.
763  */
764 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
765 {
766         bool active_wakeup;
767
768         if (!device_can_wakeup(dev))
769                 return false;
770
771         active_wakeup = genpd_dev_active_wakeup(genpd, dev);
772         return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
773 }
774
775 /**
776  * pm_genpd_prepare - Start power transition of a device in a PM domain.
777  * @dev: Device to start the transition of.
778  *
779  * Start a power transition of a device (during a system-wide power transition)
780  * under the assumption that its pm_domain field points to the domain member of
781  * an object of type struct generic_pm_domain representing a PM domain
782  * consisting of I/O devices.
783  */
784 static int pm_genpd_prepare(struct device *dev)
785 {
786         struct generic_pm_domain *genpd;
787         int ret;
788
789         dev_dbg(dev, "%s()\n", __func__);
790
791         genpd = dev_to_genpd(dev);
792         if (IS_ERR(genpd))
793                 return -EINVAL;
794
795         /*
796          * If a wakeup request is pending for the device, it should be woken up
797          * at this point and a system wakeup event should be reported if it's
798          * set up to wake up the system from sleep states.
799          */
800         pm_runtime_get_noresume(dev);
801         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
802                 pm_wakeup_event(dev, 0);
803
804         if (pm_wakeup_pending()) {
805                 pm_runtime_put_sync(dev);
806                 return -EBUSY;
807         }
808
809         if (resume_needed(dev, genpd))
810                 pm_runtime_resume(dev);
811
812         genpd_acquire_lock(genpd);
813
814         if (genpd->prepared_count++ == 0) {
815                 genpd->suspended_count = 0;
816                 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
817         }
818
819         genpd_release_lock(genpd);
820
821         if (genpd->suspend_power_off) {
822                 pm_runtime_put_noidle(dev);
823                 return 0;
824         }
825
826         /*
827          * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
828          * so pm_genpd_poweron() will return immediately, but if the device
829          * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
830          * to make it operational.
831          */
832         pm_runtime_resume(dev);
833         __pm_runtime_disable(dev, false);
834
835         ret = pm_generic_prepare(dev);
836         if (ret) {
837                 mutex_lock(&genpd->lock);
838
839                 if (--genpd->prepared_count == 0)
840                         genpd->suspend_power_off = false;
841
842                 mutex_unlock(&genpd->lock);
843                 pm_runtime_enable(dev);
844         }
845
846         pm_runtime_put_sync(dev);
847         return ret;
848 }
849
850 /**
851  * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
852  * @dev: Device to suspend.
853  *
854  * Suspend a device under the assumption that its pm_domain field points to the
855  * domain member of an object of type struct generic_pm_domain representing
856  * a PM domain consisting of I/O devices.
857  */
858 static int pm_genpd_suspend(struct device *dev)
859 {
860         struct generic_pm_domain *genpd;
861
862         dev_dbg(dev, "%s()\n", __func__);
863
864         genpd = dev_to_genpd(dev);
865         if (IS_ERR(genpd))
866                 return -EINVAL;
867
868         return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
869 }
870
871 /**
872  * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
873  * @dev: Device to suspend.
874  *
875  * Carry out a late suspend of a device under the assumption that its
876  * pm_domain field points to the domain member of an object of type
877  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
878  */
879 static int pm_genpd_suspend_late(struct device *dev)
880 {
881         struct generic_pm_domain *genpd;
882
883         dev_dbg(dev, "%s()\n", __func__);
884
885         genpd = dev_to_genpd(dev);
886         if (IS_ERR(genpd))
887                 return -EINVAL;
888
889         return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
890 }
891
892 /**
893  * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
894  * @dev: Device to suspend.
895  *
896  * Stop the device and remove power from the domain if all devices in it have
897  * been stopped.
898  */
899 static int pm_genpd_suspend_noirq(struct device *dev)
900 {
901         struct generic_pm_domain *genpd;
902
903         dev_dbg(dev, "%s()\n", __func__);
904
905         genpd = dev_to_genpd(dev);
906         if (IS_ERR(genpd))
907                 return -EINVAL;
908
909         if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
910             || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
911                 return 0;
912
913         genpd_stop_dev(genpd, dev);
914
915         /*
916          * Since all of the "noirq" callbacks are executed sequentially, it is
917          * guaranteed that this function will never run twice in parallel for
918          * the same PM domain, so it is not necessary to use locking here.
919          */
920         genpd->suspended_count++;
921         pm_genpd_sync_poweroff(genpd);
922
923         return 0;
924 }
925
926 /**
927  * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
928  * @dev: Device to resume.
929  *
930  * Restore power to the device's PM domain, if necessary, and start the device.
931  */
932 static int pm_genpd_resume_noirq(struct device *dev)
933 {
934         struct generic_pm_domain *genpd;
935
936         dev_dbg(dev, "%s()\n", __func__);
937
938         genpd = dev_to_genpd(dev);
939         if (IS_ERR(genpd))
940                 return -EINVAL;
941
942         if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
943             || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
944                 return 0;
945
946         /*
947          * Since all of the "noirq" callbacks are executed sequentially, it is
948          * guaranteed that this function will never run twice in parallel for
949          * the same PM domain, so it is not necessary to use locking here.
950          */
951         pm_genpd_poweron(genpd);
952         genpd->suspended_count--;
953
954         return genpd_start_dev(genpd, dev);
955 }
956
957 /**
958  * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
959  * @dev: Device to resume.
960  *
961  * Carry out an early resume of a device under the assumption that its
962  * pm_domain field points to the domain member of an object of type
963  * struct generic_pm_domain representing a power domain consisting of I/O
964  * devices.
965  */
966 static int pm_genpd_resume_early(struct device *dev)
967 {
968         struct generic_pm_domain *genpd;
969
970         dev_dbg(dev, "%s()\n", __func__);
971
972         genpd = dev_to_genpd(dev);
973         if (IS_ERR(genpd))
974                 return -EINVAL;
975
976         return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
977 }
978
979 /**
980  * pm_genpd_resume - Resume of device in an I/O PM domain.
981  * @dev: Device to resume.
982  *
983  * Resume a device under the assumption that its pm_domain field points to the
984  * domain member of an object of type struct generic_pm_domain representing
985  * a power domain consisting of I/O devices.
986  */
987 static int pm_genpd_resume(struct device *dev)
988 {
989         struct generic_pm_domain *genpd;
990
991         dev_dbg(dev, "%s()\n", __func__);
992
993         genpd = dev_to_genpd(dev);
994         if (IS_ERR(genpd))
995                 return -EINVAL;
996
997         return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
998 }
999
1000 /**
1001  * pm_genpd_freeze - Freezing a device in an I/O PM domain.
1002  * @dev: Device to freeze.
1003  *
1004  * Freeze a device under the assumption that its pm_domain field points to the
1005  * domain member of an object of type struct generic_pm_domain representing
1006  * a power domain consisting of I/O devices.
1007  */
1008 static int pm_genpd_freeze(struct device *dev)
1009 {
1010         struct generic_pm_domain *genpd;
1011
1012         dev_dbg(dev, "%s()\n", __func__);
1013
1014         genpd = dev_to_genpd(dev);
1015         if (IS_ERR(genpd))
1016                 return -EINVAL;
1017
1018         return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
1019 }
1020
1021 /**
1022  * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
1023  * @dev: Device to freeze.
1024  *
1025  * Carry out a late freeze of a device under the assumption that its
1026  * pm_domain field points to the domain member of an object of type
1027  * struct generic_pm_domain representing a power domain consisting of I/O
1028  * devices.
1029  */
1030 static int pm_genpd_freeze_late(struct device *dev)
1031 {
1032         struct generic_pm_domain *genpd;
1033
1034         dev_dbg(dev, "%s()\n", __func__);
1035
1036         genpd = dev_to_genpd(dev);
1037         if (IS_ERR(genpd))
1038                 return -EINVAL;
1039
1040         return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
1041 }
1042
1043 /**
1044  * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1045  * @dev: Device to freeze.
1046  *
1047  * Carry out a late freeze of a device under the assumption that its
1048  * pm_domain field points to the domain member of an object of type
1049  * struct generic_pm_domain representing a power domain consisting of I/O
1050  * devices.
1051  */
1052 static int pm_genpd_freeze_noirq(struct device *dev)
1053 {
1054         struct generic_pm_domain *genpd;
1055
1056         dev_dbg(dev, "%s()\n", __func__);
1057
1058         genpd = dev_to_genpd(dev);
1059         if (IS_ERR(genpd))
1060                 return -EINVAL;
1061
1062         return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
1063                 0 : genpd_stop_dev(genpd, dev);
1064 }
1065
1066 /**
1067  * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1068  * @dev: Device to thaw.
1069  *
1070  * Start the device, unless power has been removed from the domain already
1071  * before the system transition.
1072  */
1073 static int pm_genpd_thaw_noirq(struct device *dev)
1074 {
1075         struct generic_pm_domain *genpd;
1076
1077         dev_dbg(dev, "%s()\n", __func__);
1078
1079         genpd = dev_to_genpd(dev);
1080         if (IS_ERR(genpd))
1081                 return -EINVAL;
1082
1083         return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
1084                 0 : genpd_start_dev(genpd, dev);
1085 }
1086
1087 /**
1088  * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
1089  * @dev: Device to thaw.
1090  *
1091  * Carry out an early thaw of a device under the assumption that its
1092  * pm_domain field points to the domain member of an object of type
1093  * struct generic_pm_domain representing a power domain consisting of I/O
1094  * devices.
1095  */
1096 static int pm_genpd_thaw_early(struct device *dev)
1097 {
1098         struct generic_pm_domain *genpd;
1099
1100         dev_dbg(dev, "%s()\n", __func__);
1101
1102         genpd = dev_to_genpd(dev);
1103         if (IS_ERR(genpd))
1104                 return -EINVAL;
1105
1106         return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
1107 }
1108
1109 /**
1110  * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
1111  * @dev: Device to thaw.
1112  *
1113  * Thaw a device under the assumption that its pm_domain field points to the
1114  * domain member of an object of type struct generic_pm_domain representing
1115  * a power domain consisting of I/O devices.
1116  */
1117 static int pm_genpd_thaw(struct device *dev)
1118 {
1119         struct generic_pm_domain *genpd;
1120
1121         dev_dbg(dev, "%s()\n", __func__);
1122
1123         genpd = dev_to_genpd(dev);
1124         if (IS_ERR(genpd))
1125                 return -EINVAL;
1126
1127         return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
1128 }
1129
1130 /**
1131  * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1132  * @dev: Device to resume.
1133  *
1134  * Make sure the domain will be in the same power state as before the
1135  * hibernation the system is resuming from and start the device if necessary.
1136  */
1137 static int pm_genpd_restore_noirq(struct device *dev)
1138 {
1139         struct generic_pm_domain *genpd;
1140
1141         dev_dbg(dev, "%s()\n", __func__);
1142
1143         genpd = dev_to_genpd(dev);
1144         if (IS_ERR(genpd))
1145                 return -EINVAL;
1146
1147         /*
1148          * Since all of the "noirq" callbacks are executed sequentially, it is
1149          * guaranteed that this function will never run twice in parallel for
1150          * the same PM domain, so it is not necessary to use locking here.
1151          *
1152          * At this point suspended_count == 0 means we are being run for the
1153          * first time for the given domain in the present cycle.
1154          */
1155         if (genpd->suspended_count++ == 0) {
1156                 /*
1157                  * The boot kernel might put the domain into arbitrary state,
1158                  * so make it appear as powered off to pm_genpd_poweron(), so
1159                  * that it tries to power it on in case it was really off.
1160                  */
1161                 genpd->status = GPD_STATE_POWER_OFF;
1162                 if (genpd->suspend_power_off) {
1163                         /*
1164                          * If the domain was off before the hibernation, make
1165                          * sure it will be off going forward.
1166                          */
1167                         if (genpd->power_off)
1168                                 genpd->power_off(genpd);
1169
1170                         return 0;
1171                 }
1172         }
1173
1174         if (genpd->suspend_power_off)
1175                 return 0;
1176
1177         pm_genpd_poweron(genpd);
1178
1179         return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev);
1180 }
1181
1182 /**
1183  * pm_genpd_complete - Complete power transition of a device in a power domain.
1184  * @dev: Device to complete the transition of.
1185  *
1186  * Complete a power transition of a device (during a system-wide power
1187  * transition) under the assumption that its pm_domain field points to the
1188  * domain member of an object of type struct generic_pm_domain representing
1189  * a power domain consisting of I/O devices.
1190  */
1191 static void pm_genpd_complete(struct device *dev)
1192 {
1193         struct generic_pm_domain *genpd;
1194         bool run_complete;
1195
1196         dev_dbg(dev, "%s()\n", __func__);
1197
1198         genpd = dev_to_genpd(dev);
1199         if (IS_ERR(genpd))
1200                 return;
1201
1202         mutex_lock(&genpd->lock);
1203
1204         run_complete = !genpd->suspend_power_off;
1205         if (--genpd->prepared_count == 0)
1206                 genpd->suspend_power_off = false;
1207
1208         mutex_unlock(&genpd->lock);
1209
1210         if (run_complete) {
1211                 pm_generic_complete(dev);
1212                 pm_runtime_set_active(dev);
1213                 pm_runtime_enable(dev);
1214                 pm_runtime_idle(dev);
1215         }
1216 }
1217
1218 #else
1219
1220 #define pm_genpd_prepare                NULL
1221 #define pm_genpd_suspend                NULL
1222 #define pm_genpd_suspend_late           NULL
1223 #define pm_genpd_suspend_noirq          NULL
1224 #define pm_genpd_resume_early           NULL
1225 #define pm_genpd_resume_noirq           NULL
1226 #define pm_genpd_resume                 NULL
1227 #define pm_genpd_freeze                 NULL
1228 #define pm_genpd_freeze_late            NULL
1229 #define pm_genpd_freeze_noirq           NULL
1230 #define pm_genpd_thaw_early             NULL
1231 #define pm_genpd_thaw_noirq             NULL
1232 #define pm_genpd_thaw                   NULL
1233 #define pm_genpd_restore_noirq          NULL
1234 #define pm_genpd_complete               NULL
1235
1236 #endif /* CONFIG_PM_SLEEP */
1237
1238 /**
1239  * __pm_genpd_add_device - Add a device to an I/O PM domain.
1240  * @genpd: PM domain to add the device to.
1241  * @dev: Device to be added.
1242  * @td: Set of PM QoS timing parameters to attach to the device.
1243  */
1244 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1245                           struct gpd_timing_data *td)
1246 {
1247         struct generic_pm_domain_data *gpd_data;
1248         struct pm_domain_data *pdd;
1249         int ret = 0;
1250
1251         dev_dbg(dev, "%s()\n", __func__);
1252
1253         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1254                 return -EINVAL;
1255
1256         gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1257         if (!gpd_data)
1258                 return -ENOMEM;
1259
1260         mutex_init(&gpd_data->lock);
1261         gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1262         dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1263
1264         genpd_acquire_lock(genpd);
1265
1266         if (genpd->prepared_count > 0) {
1267                 ret = -EAGAIN;
1268                 goto out;
1269         }
1270
1271         list_for_each_entry(pdd, &genpd->dev_list, list_node)
1272                 if (pdd->dev == dev) {
1273                         ret = -EINVAL;
1274                         goto out;
1275                 }
1276
1277         genpd->device_count++;
1278         genpd->max_off_time_changed = true;
1279
1280         dev_pm_get_subsys_data(dev);
1281
1282         mutex_lock(&gpd_data->lock);
1283         spin_lock_irq(&dev->power.lock);
1284         dev->pm_domain = &genpd->domain;
1285         dev->power.subsys_data->domain_data = &gpd_data->base;
1286         gpd_data->base.dev = dev;
1287         list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1288         gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
1289         if (td)
1290                 gpd_data->td = *td;
1291
1292         gpd_data->td.constraint_changed = true;
1293         gpd_data->td.effective_constraint_ns = -1;
1294         spin_unlock_irq(&dev->power.lock);
1295         mutex_unlock(&gpd_data->lock);
1296
1297         genpd_release_lock(genpd);
1298
1299         return 0;
1300
1301  out:
1302         genpd_release_lock(genpd);
1303
1304         dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1305         kfree(gpd_data);
1306         return ret;
1307 }
1308
1309 /**
1310  * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
1311  * @genpd_node: Device tree node pointer representing a PM domain to which the
1312  *   the device is added to.
1313  * @dev: Device to be added.
1314  * @td: Set of PM QoS timing parameters to attach to the device.
1315  */
1316 int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
1317                              struct gpd_timing_data *td)
1318 {
1319         struct generic_pm_domain *genpd = NULL, *gpd;
1320
1321         dev_dbg(dev, "%s()\n", __func__);
1322
1323         if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
1324                 return -EINVAL;
1325
1326         mutex_lock(&gpd_list_lock);
1327         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1328                 if (gpd->of_node == genpd_node) {
1329                         genpd = gpd;
1330                         break;
1331                 }
1332         }
1333         mutex_unlock(&gpd_list_lock);
1334
1335         if (!genpd)
1336                 return -EINVAL;
1337
1338         return __pm_genpd_add_device(genpd, dev, td);
1339 }
1340
1341 /**
1342  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1343  * @genpd: PM domain to remove the device from.
1344  * @dev: Device to be removed.
1345  */
1346 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1347                            struct device *dev)
1348 {
1349         struct generic_pm_domain_data *gpd_data;
1350         struct pm_domain_data *pdd;
1351         int ret = 0;
1352
1353         dev_dbg(dev, "%s()\n", __func__);
1354
1355         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
1356             ||  IS_ERR_OR_NULL(dev->pm_domain)
1357             ||  pd_to_genpd(dev->pm_domain) != genpd)
1358                 return -EINVAL;
1359
1360         genpd_acquire_lock(genpd);
1361
1362         if (genpd->prepared_count > 0) {
1363                 ret = -EAGAIN;
1364                 goto out;
1365         }
1366
1367         genpd->device_count--;
1368         genpd->max_off_time_changed = true;
1369
1370         spin_lock_irq(&dev->power.lock);
1371         dev->pm_domain = NULL;
1372         pdd = dev->power.subsys_data->domain_data;
1373         list_del_init(&pdd->list_node);
1374         dev->power.subsys_data->domain_data = NULL;
1375         spin_unlock_irq(&dev->power.lock);
1376
1377         gpd_data = to_gpd_data(pdd);
1378         mutex_lock(&gpd_data->lock);
1379         pdd->dev = NULL;
1380         mutex_unlock(&gpd_data->lock);
1381
1382         genpd_release_lock(genpd);
1383
1384         dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1385         kfree(gpd_data);
1386         dev_pm_put_subsys_data(dev);
1387         return 0;
1388
1389  out:
1390         genpd_release_lock(genpd);
1391
1392         return ret;
1393 }
1394
1395 /**
1396  * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device.
1397  * @dev: Device to set/unset the flag for.
1398  * @val: The new value of the device's "always on" flag.
1399  */
1400 void pm_genpd_dev_always_on(struct device *dev, bool val)
1401 {
1402         struct pm_subsys_data *psd;
1403         unsigned long flags;
1404
1405         spin_lock_irqsave(&dev->power.lock, flags);
1406
1407         psd = dev_to_psd(dev);
1408         if (psd && psd->domain_data)
1409                 to_gpd_data(psd->domain_data)->always_on = val;
1410
1411         spin_unlock_irqrestore(&dev->power.lock, flags);
1412 }
1413 EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
1414
1415 /**
1416  * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
1417  * @dev: Device to set/unset the flag for.
1418  * @val: The new value of the device's "need restore" flag.
1419  */
1420 void pm_genpd_dev_need_restore(struct device *dev, bool val)
1421 {
1422         struct pm_subsys_data *psd;
1423         unsigned long flags;
1424
1425         spin_lock_irqsave(&dev->power.lock, flags);
1426
1427         psd = dev_to_psd(dev);
1428         if (psd && psd->domain_data)
1429                 to_gpd_data(psd->domain_data)->need_restore = val;
1430
1431         spin_unlock_irqrestore(&dev->power.lock, flags);
1432 }
1433 EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);
1434
1435 /**
1436  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1437  * @genpd: Master PM domain to add the subdomain to.
1438  * @subdomain: Subdomain to be added.
1439  */
1440 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1441                            struct generic_pm_domain *subdomain)
1442 {
1443         struct gpd_link *link;
1444         int ret = 0;
1445
1446         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1447                 return -EINVAL;
1448
1449  start:
1450         genpd_acquire_lock(genpd);
1451         mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1452
1453         if (subdomain->status != GPD_STATE_POWER_OFF
1454             && subdomain->status != GPD_STATE_ACTIVE) {
1455                 mutex_unlock(&subdomain->lock);
1456                 genpd_release_lock(genpd);
1457                 goto start;
1458         }
1459
1460         if (genpd->status == GPD_STATE_POWER_OFF
1461             &&  subdomain->status != GPD_STATE_POWER_OFF) {
1462                 ret = -EINVAL;
1463                 goto out;
1464         }
1465
1466         list_for_each_entry(link, &genpd->master_links, master_node) {
1467                 if (link->slave == subdomain && link->master == genpd) {
1468                         ret = -EINVAL;
1469                         goto out;
1470                 }
1471         }
1472
1473         link = kzalloc(sizeof(*link), GFP_KERNEL);
1474         if (!link) {
1475                 ret = -ENOMEM;
1476                 goto out;
1477         }
1478         link->master = genpd;
1479         list_add_tail(&link->master_node, &genpd->master_links);
1480         link->slave = subdomain;
1481         list_add_tail(&link->slave_node, &subdomain->slave_links);
1482         if (subdomain->status != GPD_STATE_POWER_OFF)
1483                 genpd_sd_counter_inc(genpd);
1484
1485  out:
1486         mutex_unlock(&subdomain->lock);
1487         genpd_release_lock(genpd);
1488
1489         return ret;
1490 }
1491
1492 /**
1493  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1494  * @genpd: Master PM domain to remove the subdomain from.
1495  * @subdomain: Subdomain to be removed.
1496  */
1497 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1498                               struct generic_pm_domain *subdomain)
1499 {
1500         struct gpd_link *link;
1501         int ret = -EINVAL;
1502
1503         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1504                 return -EINVAL;
1505
1506  start:
1507         genpd_acquire_lock(genpd);
1508
1509         list_for_each_entry(link, &genpd->master_links, master_node) {
1510                 if (link->slave != subdomain)
1511                         continue;
1512
1513                 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1514
1515                 if (subdomain->status != GPD_STATE_POWER_OFF
1516                     && subdomain->status != GPD_STATE_ACTIVE) {
1517                         mutex_unlock(&subdomain->lock);
1518                         genpd_release_lock(genpd);
1519                         goto start;
1520                 }
1521
1522                 list_del(&link->master_node);
1523                 list_del(&link->slave_node);
1524                 kfree(link);
1525                 if (subdomain->status != GPD_STATE_POWER_OFF)
1526                         genpd_sd_counter_dec(genpd);
1527
1528                 mutex_unlock(&subdomain->lock);
1529
1530                 ret = 0;
1531                 break;
1532         }
1533
1534         genpd_release_lock(genpd);
1535
1536         return ret;
1537 }
1538
1539 /**
1540  * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
1541  * @dev: Device to add the callbacks to.
1542  * @ops: Set of callbacks to add.
1543  * @td: Timing data to add to the device along with the callbacks (optional).
1544  */
1545 int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
1546                            struct gpd_timing_data *td)
1547 {
1548         struct pm_domain_data *pdd;
1549         int ret = 0;
1550
1551         if (!(dev && dev->power.subsys_data && ops))
1552                 return -EINVAL;
1553
1554         pm_runtime_disable(dev);
1555         device_pm_lock();
1556
1557         pdd = dev->power.subsys_data->domain_data;
1558         if (pdd) {
1559                 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1560
1561                 gpd_data->ops = *ops;
1562                 if (td)
1563                         gpd_data->td = *td;
1564         } else {
1565                 ret = -EINVAL;
1566         }
1567
1568         device_pm_unlock();
1569         pm_runtime_enable(dev);
1570
1571         return ret;
1572 }
1573 EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
1574
1575 /**
1576  * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
1577  * @dev: Device to remove the callbacks from.
1578  * @clear_td: If set, clear the device's timing data too.
1579  */
1580 int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
1581 {
1582         struct pm_domain_data *pdd;
1583         int ret = 0;
1584
1585         if (!(dev && dev->power.subsys_data))
1586                 return -EINVAL;
1587
1588         pm_runtime_disable(dev);
1589         device_pm_lock();
1590
1591         pdd = dev->power.subsys_data->domain_data;
1592         if (pdd) {
1593                 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
1594
1595                 gpd_data->ops = (struct gpd_dev_ops){ 0 };
1596                 if (clear_td)
1597                         gpd_data->td = (struct gpd_timing_data){ 0 };
1598         } else {
1599                 ret = -EINVAL;
1600         }
1601
1602         device_pm_unlock();
1603         pm_runtime_enable(dev);
1604
1605         return ret;
1606 }
1607 EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
1608
1609 /* Default device callbacks for generic PM domains. */
1610
1611 /**
1612  * pm_genpd_default_save_state - Default "save device state" for PM domians.
1613  * @dev: Device to handle.
1614  */
1615 static int pm_genpd_default_save_state(struct device *dev)
1616 {
1617         int (*cb)(struct device *__dev);
1618         struct device_driver *drv = dev->driver;
1619
1620         cb = dev_gpd_data(dev)->ops.save_state;
1621         if (cb)
1622                 return cb(dev);
1623
1624         if (drv && drv->pm && drv->pm->runtime_suspend)
1625                 return drv->pm->runtime_suspend(dev);
1626
1627         return 0;
1628 }
1629
1630 /**
1631  * pm_genpd_default_restore_state - Default PM domians "restore device state".
1632  * @dev: Device to handle.
1633  */
1634 static int pm_genpd_default_restore_state(struct device *dev)
1635 {
1636         int (*cb)(struct device *__dev);
1637         struct device_driver *drv = dev->driver;
1638
1639         cb = dev_gpd_data(dev)->ops.restore_state;
1640         if (cb)
1641                 return cb(dev);
1642
1643         if (drv && drv->pm && drv->pm->runtime_resume)
1644                 return drv->pm->runtime_resume(dev);
1645
1646         return 0;
1647 }
1648
1649 #ifdef CONFIG_PM_SLEEP
1650
1651 /**
1652  * pm_genpd_default_suspend - Default "device suspend" for PM domians.
1653  * @dev: Device to handle.
1654  */
1655 static int pm_genpd_default_suspend(struct device *dev)
1656 {
1657         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
1658
1659         return cb ? cb(dev) : pm_generic_suspend(dev);
1660 }
1661
1662 /**
1663  * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
1664  * @dev: Device to handle.
1665  */
1666 static int pm_genpd_default_suspend_late(struct device *dev)
1667 {
1668         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
1669
1670         return cb ? cb(dev) : pm_generic_suspend_late(dev);
1671 }
1672
1673 /**
1674  * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
1675  * @dev: Device to handle.
1676  */
1677 static int pm_genpd_default_resume_early(struct device *dev)
1678 {
1679         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
1680
1681         return cb ? cb(dev) : pm_generic_resume_early(dev);
1682 }
1683
1684 /**
1685  * pm_genpd_default_resume - Default "device resume" for PM domians.
1686  * @dev: Device to handle.
1687  */
1688 static int pm_genpd_default_resume(struct device *dev)
1689 {
1690         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
1691
1692         return cb ? cb(dev) : pm_generic_resume(dev);
1693 }
1694
1695 /**
1696  * pm_genpd_default_freeze - Default "device freeze" for PM domians.
1697  * @dev: Device to handle.
1698  */
1699 static int pm_genpd_default_freeze(struct device *dev)
1700 {
1701         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
1702
1703         return cb ? cb(dev) : pm_generic_freeze(dev);
1704 }
1705
1706 /**
1707  * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
1708  * @dev: Device to handle.
1709  */
1710 static int pm_genpd_default_freeze_late(struct device *dev)
1711 {
1712         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
1713
1714         return cb ? cb(dev) : pm_generic_freeze_late(dev);
1715 }
1716
1717 /**
1718  * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
1719  * @dev: Device to handle.
1720  */
1721 static int pm_genpd_default_thaw_early(struct device *dev)
1722 {
1723         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
1724
1725         return cb ? cb(dev) : pm_generic_thaw_early(dev);
1726 }
1727
1728 /**
1729  * pm_genpd_default_thaw - Default "device thaw" for PM domians.
1730  * @dev: Device to handle.
1731  */
1732 static int pm_genpd_default_thaw(struct device *dev)
1733 {
1734         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
1735
1736         return cb ? cb(dev) : pm_generic_thaw(dev);
1737 }
1738
1739 #else /* !CONFIG_PM_SLEEP */
1740
1741 #define pm_genpd_default_suspend        NULL
1742 #define pm_genpd_default_suspend_late   NULL
1743 #define pm_genpd_default_resume_early   NULL
1744 #define pm_genpd_default_resume         NULL
1745 #define pm_genpd_default_freeze         NULL
1746 #define pm_genpd_default_freeze_late    NULL
1747 #define pm_genpd_default_thaw_early     NULL
1748 #define pm_genpd_default_thaw           NULL
1749
1750 #endif /* !CONFIG_PM_SLEEP */
1751
1752 /**
1753  * pm_genpd_init - Initialize a generic I/O PM domain object.
1754  * @genpd: PM domain object to initialize.
1755  * @gov: PM domain governor to associate with the domain (may be NULL).
1756  * @is_off: Initial value of the domain's power_is_off field.
1757  */
1758 void pm_genpd_init(struct generic_pm_domain *genpd,
1759                    struct dev_power_governor *gov, bool is_off)
1760 {
1761         if (IS_ERR_OR_NULL(genpd))
1762                 return;
1763
1764         INIT_LIST_HEAD(&genpd->master_links);
1765         INIT_LIST_HEAD(&genpd->slave_links);
1766         INIT_LIST_HEAD(&genpd->dev_list);
1767         mutex_init(&genpd->lock);
1768         genpd->gov = gov;
1769         INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1770         genpd->in_progress = 0;
1771         atomic_set(&genpd->sd_count, 0);
1772         genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1773         init_waitqueue_head(&genpd->status_wait_queue);
1774         genpd->poweroff_task = NULL;
1775         genpd->resume_count = 0;
1776         genpd->device_count = 0;
1777         genpd->max_off_time_ns = -1;
1778         genpd->max_off_time_changed = true;
1779         genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1780         genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1781         genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
1782         genpd->domain.ops.prepare = pm_genpd_prepare;
1783         genpd->domain.ops.suspend = pm_genpd_suspend;
1784         genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
1785         genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1786         genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1787         genpd->domain.ops.resume_early = pm_genpd_resume_early;
1788         genpd->domain.ops.resume = pm_genpd_resume;
1789         genpd->domain.ops.freeze = pm_genpd_freeze;
1790         genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
1791         genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1792         genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1793         genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
1794         genpd->domain.ops.thaw = pm_genpd_thaw;
1795         genpd->domain.ops.poweroff = pm_genpd_suspend;
1796         genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
1797         genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1798         genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1799         genpd->domain.ops.restore_early = pm_genpd_resume_early;
1800         genpd->domain.ops.restore = pm_genpd_resume;
1801         genpd->domain.ops.complete = pm_genpd_complete;
1802         genpd->dev_ops.save_state = pm_genpd_default_save_state;
1803         genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1804         genpd->dev_ops.suspend = pm_genpd_default_suspend;
1805         genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
1806         genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
1807         genpd->dev_ops.resume = pm_genpd_default_resume;
1808         genpd->dev_ops.freeze = pm_genpd_default_freeze;
1809         genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
1810         genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
1811         genpd->dev_ops.thaw = pm_genpd_default_thaw;
1812         mutex_lock(&gpd_list_lock);
1813         list_add(&genpd->gpd_list_node, &gpd_list);
1814         mutex_unlock(&gpd_list_lock);
1815 }