Merge tag 'v3.10.51' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / arch / arm / mach-vexpress / tc2_pm.c
1 /*
2  * arch/arm/mach-vexpress/tc2_pm.c - TC2 power management support
3  *
4  * Created by:  Nicolas Pitre, October 2012
5  * Copyright:   (C) 2012  Linaro Limited
6  *
7  * Some portions of this file were originally written by Achin Gupta
8  * Copyright:   (C) 2012  ARM Limited
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14
15 #include <linux/init.h>
16 #include <linux/kernel.h>
17 #include <linux/spinlock.h>
18 #include <linux/errno.h>
19 #include <linux/irqchip/arm-gic.h>
20
21 #include <asm/mcpm.h>
22 #include <asm/proc-fns.h>
23 #include <asm/cacheflush.h>
24 #include <asm/cputype.h>
25 #include <asm/cp15.h>
26 #include <asm/psci.h>
27
28 #include <mach/motherboard.h>
29 #include <mach/tc2.h>
30
31 #include <linux/vexpress.h>
32 #include <linux/arm-cci.h>
33
34 /*
35  * We can't use regular spinlocks. In the switcher case, it is possible
36  * for an outbound CPU to call power_down() after its inbound counterpart
37  * is already live using the same logical CPU number which trips lockdep
38  * debugging.
39  */
40 static arch_spinlock_t tc2_pm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
41
42 static int tc2_pm_use_count[TC2_MAX_CPUS][TC2_MAX_CLUSTERS];
43
44 static int tc2_pm_power_up(unsigned int cpu, unsigned int cluster)
45 {
46         pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
47         if (cluster >= TC2_MAX_CLUSTERS ||
48             cpu >= vexpress_spc_get_nb_cpus(cluster))
49                 return -EINVAL;
50
51         /*
52          * Since this is called with IRQs enabled, and no arch_spin_lock_irq
53          * variant exists, we need to disable IRQs manually here.
54          */
55         local_irq_disable();
56         arch_spin_lock(&tc2_pm_lock);
57
58         if (!tc2_pm_use_count[0][cluster] &&
59             !tc2_pm_use_count[1][cluster] &&
60             !tc2_pm_use_count[2][cluster])
61                 vexpress_spc_powerdown_enable(cluster, 0);
62
63         tc2_pm_use_count[cpu][cluster]++;
64         if (tc2_pm_use_count[cpu][cluster] == 1) {
65                 vexpress_spc_write_resume_reg(cluster, cpu,
66                                               virt_to_phys(mcpm_entry_point));
67                 vexpress_spc_set_cpu_wakeup_irq(cpu, cluster, 1);
68         } else if (tc2_pm_use_count[cpu][cluster] != 2) {
69                 /*
70                  * The only possible values are:
71                  * 0 = CPU down
72                  * 1 = CPU (still) up
73                  * 2 = CPU requested to be up before it had a chance
74                  *     to actually make itself down.
75                  * Any other value is a bug.
76                  */
77                 BUG();
78         }
79
80         arch_spin_unlock(&tc2_pm_lock);
81         local_irq_enable();
82
83         return 0;
84 }
85
86 static void tc2_pm_down(u64 residency)
87 {
88         unsigned int mpidr, cpu, cluster;
89         bool last_man = false, skip_wfi = false;
90
91         mpidr = read_cpuid_mpidr();
92         cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
93         cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
94
95         pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
96         BUG_ON(cluster >= TC2_MAX_CLUSTERS ||
97                cpu >= vexpress_spc_get_nb_cpus(cluster));
98
99         __mcpm_cpu_going_down(cpu, cluster);
100
101         arch_spin_lock(&tc2_pm_lock);
102         BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
103         tc2_pm_use_count[cpu][cluster]--;
104         if (tc2_pm_use_count[cpu][cluster] == 0) {
105                 vexpress_spc_set_cpu_wakeup_irq(cpu, cluster, 1);
106                 if (!tc2_pm_use_count[0][cluster] &&
107                     !tc2_pm_use_count[1][cluster] &&
108                     !tc2_pm_use_count[2][cluster] &&
109                     (!residency || residency > 5000)) {
110                         vexpress_spc_powerdown_enable(cluster, 1);
111                         vexpress_spc_set_global_wakeup_intr(1);
112                         last_man = true;
113                 }
114         } else if (tc2_pm_use_count[cpu][cluster] == 1) {
115                 /*
116                  * A power_up request went ahead of us.
117                  * Even if we do not want to shut this CPU down,
118                  * the caller expects a certain state as if the WFI
119                  * was aborted.  So let's continue with cache cleaning.
120                  */
121                 skip_wfi = true;
122         } else
123                 BUG();
124
125         /*
126          * If the CPU is committed to power down, make sure
127          * the power controller will be in charge of waking it
128          * up upon IRQ, ie IRQ lines are cut from GIC CPU IF
129          * to the CPU by disabling the GIC CPU IF to prevent wfi
130          * from completing execution behind power controller back
131          */
132         if (!skip_wfi)
133                 gic_cpu_if_down();
134
135         if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
136                 arch_spin_unlock(&tc2_pm_lock);
137
138                 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) {
139                         /*
140                          * On the Cortex-A15 we need to disable
141                          * L2 prefetching before flushing the cache.
142                          */
143                         asm volatile(
144                         "mcr    p15, 1, %0, c15, c0, 3 \n\t"
145                         "isb    \n\t"
146                         "dsb    "
147                         : : "r" (0x400) );
148                 }
149
150                 v7_exit_coherency_flush(all);
151
152                 cci_disable_port_by_cpu(mpidr);
153
154                 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
155         } else {
156                 /*
157                  * If last man then undo any setup done previously.
158                  */
159                 if (last_man) {
160                         vexpress_spc_powerdown_enable(cluster, 0);
161                         vexpress_spc_set_global_wakeup_intr(0);
162                 }
163
164                 arch_spin_unlock(&tc2_pm_lock);
165
166                 v7_exit_coherency_flush(louis);
167         }
168
169         __mcpm_cpu_down(cpu, cluster);
170
171         /* Now we are prepared for power-down, do it: */
172         if (!skip_wfi)
173                 wfi();
174
175         /* Not dead at this point?  Let our caller cope. */
176 }
177
178 static void tc2_pm_power_down(void)
179 {
180         tc2_pm_down(0);
181 }
182
183 static void tc2_pm_suspend(u64 residency)
184 {
185         extern void tc2_resume(void);
186         unsigned int mpidr, cpu, cluster;
187
188         mpidr = read_cpuid_mpidr();
189         cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
190         cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
191         vexpress_spc_write_resume_reg(cluster, cpu,
192                                       virt_to_phys(tc2_resume));
193
194         tc2_pm_down(residency);
195 }
196
197 static void tc2_pm_powered_up(void)
198 {
199         unsigned int mpidr, cpu, cluster;
200         unsigned long flags;
201
202         mpidr = read_cpuid_mpidr();
203         cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
204         cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
205
206         pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
207         BUG_ON(cluster >= TC2_MAX_CLUSTERS ||
208                cpu >= vexpress_spc_get_nb_cpus(cluster));
209
210         local_irq_save(flags);
211         arch_spin_lock(&tc2_pm_lock);
212
213         if (!tc2_pm_use_count[0][cluster] &&
214             !tc2_pm_use_count[1][cluster] &&
215             !tc2_pm_use_count[2][cluster]) {
216                 vexpress_spc_powerdown_enable(cluster, 0);
217                 vexpress_spc_set_global_wakeup_intr(0);
218         }
219
220         if (!tc2_pm_use_count[cpu][cluster])
221                 tc2_pm_use_count[cpu][cluster] = 1;
222
223         vexpress_spc_set_cpu_wakeup_irq(cpu, cluster, 0);
224         vexpress_spc_write_resume_reg(cluster, cpu, 0);
225
226         arch_spin_unlock(&tc2_pm_lock);
227         local_irq_restore(flags);
228 }
229
230 static const struct mcpm_platform_ops tc2_pm_power_ops = {
231         .power_up       = tc2_pm_power_up,
232         .power_down     = tc2_pm_power_down,
233         .suspend        = tc2_pm_suspend,
234         .powered_up     = tc2_pm_powered_up,
235 };
236
237 static void __init tc2_pm_usage_count_init(void)
238 {
239         unsigned int mpidr, cpu, cluster;
240
241         mpidr = read_cpuid_mpidr();
242         cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
243         cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
244
245         pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
246         BUG_ON(cluster >= TC2_MAX_CLUSTERS ||
247                cpu >= vexpress_spc_get_nb_cpus(cluster));
248
249         tc2_pm_use_count[cpu][cluster] = 1;
250 }
251
252 extern void tc2_pm_power_up_setup(unsigned int affinity_level);
253
254 static int __init tc2_pm_init(void)
255 {
256         int ret;
257
258         ret = psci_probe();
259         if (!ret) {
260                 pr_debug("psci found. Aborting native init\n");
261                 return -ENODEV;
262         }
263
264         if (!vexpress_spc_check_loaded())
265                 return -ENODEV;
266
267         tc2_pm_usage_count_init();
268
269         ret = mcpm_platform_register(&tc2_pm_power_ops);
270         if (!ret)
271                 ret = mcpm_sync_init(tc2_pm_power_up_setup);
272         if (!ret)
273                 pr_info("TC2 power management initialized\n");
274         return ret;
275 }
276
277 early_initcall(tc2_pm_init);