2 * arch/arm/mach-vexpress/tc2_pm.c - TC2 power management support
4 * Created by: Nicolas Pitre, October 2012
5 * Copyright: (C) 2012 Linaro Limited
7 * Some portions of this file were originally written by Achin Gupta
8 * Copyright: (C) 2012 ARM Limited
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/init.h>
16 #include <linux/kernel.h>
17 #include <linux/spinlock.h>
18 #include <linux/errno.h>
19 #include <linux/irqchip/arm-gic.h>
22 #include <asm/proc-fns.h>
23 #include <asm/cacheflush.h>
24 #include <asm/cputype.h>
27 #include <mach/motherboard.h>
29 #include <linux/vexpress.h>
30 #include <linux/arm-cci.h>
33 * We can't use regular spinlocks. In the switcher case, it is possible
34 * for an outbound CPU to call power_down() after its inbound counterpart
35 * is already live using the same logical CPU number which trips lockdep
38 static arch_spinlock_t tc2_pm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
40 static int tc2_pm_use_count[3][2];
42 static int tc2_pm_power_up(unsigned int cpu, unsigned int cluster)
44 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
45 if (cluster >= 2 || cpu >= vexpress_spc_get_nb_cpus(cluster))
49 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
50 * variant exists, we need to disable IRQs manually here.
53 arch_spin_lock(&tc2_pm_lock);
55 if (!tc2_pm_use_count[0][cluster] &&
56 !tc2_pm_use_count[1][cluster] &&
57 !tc2_pm_use_count[2][cluster])
58 vexpress_spc_powerdown_enable(cluster, 0);
60 tc2_pm_use_count[cpu][cluster]++;
61 if (tc2_pm_use_count[cpu][cluster] == 1) {
62 vexpress_spc_write_bxaddr_reg(cluster, cpu,
63 virt_to_phys(mcpm_entry_point));
64 vexpress_spc_set_cpu_wakeup_irq(cpu, cluster, 1);
65 } else if (tc2_pm_use_count[cpu][cluster] != 2) {
67 * The only possible values are:
70 * 2 = CPU requested to be up before it had a chance
71 * to actually make itself down.
72 * Any other value is a bug.
77 arch_spin_unlock(&tc2_pm_lock);
83 static void tc2_pm_power_down(void)
85 unsigned int mpidr, cpu, cluster;
86 bool last_man = false, skip_wfi = false;
88 mpidr = read_cpuid_mpidr();
89 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
90 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
92 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
93 BUG_ON(cluster >= 2 || cpu >= vexpress_spc_get_nb_cpus(cluster));
95 __mcpm_cpu_going_down(cpu, cluster);
97 arch_spin_lock(&tc2_pm_lock);
98 BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
99 tc2_pm_use_count[cpu][cluster]--;
100 if (tc2_pm_use_count[cpu][cluster] == 0) {
101 vexpress_spc_set_cpu_wakeup_irq(cpu, cluster, 1);
102 if (!tc2_pm_use_count[0][cluster] &&
103 !tc2_pm_use_count[1][cluster] &&
104 !tc2_pm_use_count[2][cluster]) {
105 vexpress_spc_powerdown_enable(cluster, 1);
106 vexpress_spc_set_global_wakeup_intr(1);
109 } else if (tc2_pm_use_count[cpu][cluster] == 1) {
111 * A power_up request went ahead of us.
112 * Even if we do not want to shut this CPU down,
113 * the caller expects a certain state as if the WFI
114 * was aborted. So let's continue with cache cleaning.
122 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
123 arch_spin_unlock(&tc2_pm_lock);
125 set_cr(get_cr() & ~CR_C);
127 asm volatile ("clrex");
128 set_auxcr(get_auxcr() & ~(1 << 6));
130 disable_cci(cluster);
133 * Ensure that both C & I bits are disabled in the SCTLR
134 * before disabling ACE snoops. This ensures that no
135 * coherency traffic will originate from this cpu after
136 * ACE snoops are turned off.
140 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
143 * If last man then undo any setup done previously.
146 vexpress_spc_powerdown_enable(cluster, 0);
147 vexpress_spc_set_global_wakeup_intr(0);
150 arch_spin_unlock(&tc2_pm_lock);
152 set_cr(get_cr() & ~CR_C);
154 asm volatile ("clrex");
155 set_auxcr(get_auxcr() & ~(1 << 6));
158 __mcpm_cpu_down(cpu, cluster);
160 /* Now we are prepared for power-down, do it: */
164 /* Not dead at this point? Let our caller cope. */
167 static void tc2_pm_powered_up(void)
169 unsigned int mpidr, cpu, cluster;
172 mpidr = read_cpuid_mpidr();
173 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
174 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
176 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
177 BUG_ON(cluster >= 2 || cpu >= vexpress_spc_get_nb_cpus(cluster));
179 local_irq_save(flags);
180 arch_spin_lock(&tc2_pm_lock);
182 if (!tc2_pm_use_count[0][cluster] &&
183 !tc2_pm_use_count[1][cluster] &&
184 !tc2_pm_use_count[2][cluster]) {
185 vexpress_spc_powerdown_enable(cluster, 0);
186 vexpress_spc_set_global_wakeup_intr(0);
189 if (!tc2_pm_use_count[cpu][cluster])
190 tc2_pm_use_count[cpu][cluster] = 1;
192 vexpress_spc_set_cpu_wakeup_irq(cpu, cluster, 0);
193 vexpress_spc_write_bxaddr_reg(cluster, cpu, 0);
195 arch_spin_unlock(&tc2_pm_lock);
196 local_irq_restore(flags);
199 static const struct mcpm_platform_ops tc2_pm_power_ops = {
200 .power_up = tc2_pm_power_up,
201 .power_down = tc2_pm_power_down,
202 .powered_up = tc2_pm_powered_up,
205 static void __init tc2_pm_usage_count_init(void)
207 unsigned int mpidr, cpu, cluster;
209 mpidr = read_cpuid_mpidr();
210 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
211 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
213 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
214 BUG_ON(cpu >= 3 || cluster >= 2);
215 tc2_pm_use_count[cpu][cluster] = 1;
218 extern void tc2_pm_power_up_setup(unsigned int affinity_level);
220 static int __init tc2_pm_init(void)
224 if (!vexpress_spc_check_loaded())
227 tc2_pm_usage_count_init();
229 ret = mcpm_platform_register(&tc2_pm_power_ops);
231 ret = mcpm_sync_init(tc2_pm_power_up_setup);
233 pr_info("TC2 power management initialized\n");
237 early_initcall(tc2_pm_init);