2 * arch/arm/mach-vexpress/dcscb.c - Dual Cluster System Configuration Block
4 * Created by: Nicolas Pitre, May 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/init.h>
13 #include <linux/kernel.h>
15 #include <linux/spinlock.h>
16 #include <linux/errno.h>
17 #include <linux/of_address.h>
18 #include <linux/vexpress.h>
19 #include <linux/arm-cci.h>
22 #include <asm/proc-fns.h>
23 #include <asm/cacheflush.h>
24 #include <asm/cputype.h>
31 #define SYS_SWRESET 0x8
33 #define RST_STAT1 0x10
34 #define EAG_CFG_R 0x20
35 #define EAG_CFG_W 0x24
36 #define KFC_CFG_R 0x28
37 #define KFC_CFG_W 0x2c
38 #define DCS_CFG_R 0x30
41 * We can't use regular spinlocks. In the switcher case, it is possible
42 * for an outbound CPU to call power_down() while its inbound counterpart
43 * is already live using the same logical CPU number which trips lockdep
46 static arch_spinlock_t dcscb_lock = __ARCH_SPIN_LOCK_UNLOCKED;
48 static void __iomem *dcscb_base;
49 static int dcscb_use_count[4][2];
50 static int dcscb_allcpus_mask[2];
52 static int dcscb_power_up(unsigned int cpu, unsigned int cluster)
54 unsigned int rst_hold, cpumask = (1 << cpu);
55 unsigned int all_mask = dcscb_allcpus_mask[cluster];
57 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
58 if (cpu >= 4 || cluster >= 2)
62 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
63 * variant exists, we need to disable IRQs manually here.
66 arch_spin_lock(&dcscb_lock);
68 dcscb_use_count[cpu][cluster]++;
69 if (dcscb_use_count[cpu][cluster] == 1) {
70 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
71 if (rst_hold & (1 << 8)) {
72 /* remove cluster reset and add individual CPU's reset */
73 rst_hold &= ~(1 << 8);
76 rst_hold &= ~(cpumask | (cpumask << 4));
77 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
78 } else if (dcscb_use_count[cpu][cluster] != 2) {
80 * The only possible values are:
83 * 2 = CPU requested to be up before it had a chance
84 * to actually make itself down.
85 * Any other value is a bug.
90 arch_spin_unlock(&dcscb_lock);
96 static void dcscb_power_down(void)
98 unsigned int mpidr, cpu, cluster, rst_hold, cpumask, all_mask;
99 bool last_man = false, skip_wfi = false;
101 mpidr = read_cpuid_mpidr();
102 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
103 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
104 cpumask = (1 << cpu);
105 all_mask = dcscb_allcpus_mask[cluster];
107 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
108 BUG_ON(cpu >= 4 || cluster >= 2);
110 __mcpm_cpu_going_down(cpu, cluster);
112 arch_spin_lock(&dcscb_lock);
113 BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
114 dcscb_use_count[cpu][cluster]--;
115 if (dcscb_use_count[cpu][cluster] == 0) {
116 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
118 if (((rst_hold | (rst_hold >> 4)) & all_mask) == all_mask) {
119 rst_hold |= (1 << 8);
122 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
123 } else if (dcscb_use_count[cpu][cluster] == 1) {
125 * A power_up request went ahead of us.
126 * Even if we do not want to shut this CPU down,
127 * the caller expects a certain state as if the WFI
128 * was aborted. So let's continue with cache cleaning.
134 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
135 arch_spin_unlock(&dcscb_lock);
137 /* Flush all cache levels for this cluster. */
138 v7_exit_coherency_flush(all);
141 * This is a harmless no-op. On platforms with a real
142 * outer cache this might either be needed or not,
143 * depending on where the outer cache sits.
148 * Disable cluster-level coherency by masking
149 * incoming snoops and DVM messages:
151 cci_disable_port_by_cpu(mpidr);
153 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
155 arch_spin_unlock(&dcscb_lock);
157 /* Disable and flush the local CPU cache. */
158 v7_exit_coherency_flush(louis);
161 __mcpm_cpu_down(cpu, cluster);
163 /* Now we are prepared for power-down, do it: */
168 /* Not dead at this point? Let our caller cope. */
171 static const struct mcpm_platform_ops dcscb_power_ops = {
172 .power_up = dcscb_power_up,
173 .power_down = dcscb_power_down,
176 static void __init dcscb_usage_count_init(void)
178 unsigned int mpidr, cpu, cluster;
180 mpidr = read_cpuid_mpidr();
181 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
182 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
184 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
185 BUG_ON(cpu >= 4 || cluster >= 2);
186 dcscb_use_count[cpu][cluster] = 1;
189 extern void dcscb_power_up_setup(unsigned int affinity_level);
191 static int __init dcscb_init(void)
193 struct device_node *node;
199 pr_debug("psci found. Aborting native init\n");
206 node = of_find_compatible_node(NULL, NULL, "arm,rtsm,dcscb");
209 dcscb_base = of_iomap(node, 0);
211 return -EADDRNOTAVAIL;
212 cfg = readl_relaxed(dcscb_base + DCS_CFG_R);
213 dcscb_allcpus_mask[0] = (1 << (((cfg >> 16) >> (0 << 2)) & 0xf)) - 1;
214 dcscb_allcpus_mask[1] = (1 << (((cfg >> 16) >> (1 << 2)) & 0xf)) - 1;
215 dcscb_usage_count_init();
217 ret = mcpm_platform_register(&dcscb_power_ops);
219 ret = mcpm_sync_init(dcscb_power_up_setup);
225 pr_info("VExpress DCSCB support installed\n");
228 * Future entries into the kernel can now go
229 * through the cluster entry vectors.
231 vexpress_flags_set(virt_to_phys(mcpm_entry_point));
236 early_initcall(dcscb_init);