ARM: TC2: basic PM support
authorNicolas Pitre <nicolas.pitre@linaro.org>
Sat, 20 Oct 2012 00:48:50 +0000 (20:48 -0400)
committerJon Medhurst <tixy@linaro.org>
Mon, 1 Jul 2013 10:05:12 +0000 (11:05 +0100)
Signed-off-by: Nicolas Pitre <nico@linaro.org>
arch/arm/mach-vexpress/Kconfig
arch/arm/mach-vexpress/Makefile
arch/arm/mach-vexpress/tc2_pm.c [new file with mode: 0644]
arch/arm/mach-vexpress/tc2_pm_setup.S [new file with mode: 0644]

index 6733f4719317b5de7145e16127f120ffb19c2ff2..6e432b2c90cfd7f7ab1a8ff8950d14e045e9466d 100644 (file)
@@ -71,4 +71,12 @@ config ARCH_VEXPRESS_TC2_PM
 config ARCH_VEXPRESS_CA9X4
        bool "Versatile Express Cortex-A9x4 tile"
 
+config ARCH_VEXPRESS_TC2
+       bool "TC2 cluster management"
+       depends on MCPM
+       select ARM_SPC
+       select ARM_CCI
+       help
+         Support for CPU and cluster power management on TC2.
+
 endmenu
index 1f57882b46c7df013566f6a5ef3381302ec72b89..9650cc4eec19c3dc0db5e78ae63a8fdc54bcf054 100644 (file)
@@ -6,6 +6,7 @@ ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
 
 obj-y                                  := v2m.o
 obj-$(CONFIG_ARCH_VEXPRESS_CA9X4)      += ct-ca9x4.o
+obj-$(CONFIG_ARCH_VEXPRESS_TC2)                += tc2_pm.o tc2_pm_setup.o
 obj-$(CONFIG_SMP)                      += platsmp.o
 obj-$(CONFIG_HOTPLUG_CPU)              += hotplug.o
 obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM)     += cpuidle-tc2.o hotplug-asm.o tc2-sleep.o
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
new file mode 100644 (file)
index 0000000..4be2d73
--- /dev/null
@@ -0,0 +1,234 @@
+/*
+ * arch/arm/mach-vexpress/tc2_pm.c - TC2 power management support
+ *
+ * Created by: Nicolas Pitre, October 2012
+ * Copyright:  (C) 2012  Linaro Limited
+ *
+ * Some portions of this file were originally written by Achin Gupta
+ * Copyright:   (C) 2012  ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+
+#include <asm/mcpm.h>
+#include <asm/proc-fns.h>
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/cp15.h>
+
+#include <mach/motherboard.h>
+
+#include <linux/vexpress.h>
+#include <linux/arm-cci.h>
+
+/*
+ * We can't use regular spinlocks. In the switcher case, it is possible
+ * for an outbound CPU to call power_down() after its inbound counterpart
+ * is already live using the same logical CPU number which trips lockdep
+ * debugging.
+ */
+static arch_spinlock_t tc2_pm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+
+static int tc2_pm_use_count[3][2];
+
+static int tc2_pm_power_up(unsigned int cpu, unsigned int cluster)
+{
+       pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+       if (cluster >= 2 || cpu >= vexpress_spc_get_nb_cpus(cluster))
+               return -EINVAL;
+
+       /*
+        * Since this is called with IRQs enabled, and no arch_spin_lock_irq
+        * variant exists, we need to disable IRQs manually here.
+        */
+       local_irq_disable();
+       arch_spin_lock(&tc2_pm_lock);
+
+       if (!tc2_pm_use_count[0][cluster] &&
+           !tc2_pm_use_count[1][cluster] &&
+           !tc2_pm_use_count[2][cluster])
+               vexpress_spc_powerdown_enable(cluster, 0);
+
+       tc2_pm_use_count[cpu][cluster]++;
+       if (tc2_pm_use_count[cpu][cluster] == 1) {
+               vexpress_spc_write_bxaddr_reg(cluster, cpu,
+                                             virt_to_phys(mcpm_entry_point));
+               vexpress_spc_set_cpu_wakeup_irq(cpu, cluster, 1);
+       } else if (tc2_pm_use_count[cpu][cluster] != 2) {
+               /*
+                * The only possible values are:
+                * 0 = CPU down
+                * 1 = CPU (still) up
+                * 2 = CPU requested to be up before it had a chance
+                *     to actually make itself down.
+                * Any other value is a bug.
+                */
+               BUG();
+       }
+
+       arch_spin_unlock(&tc2_pm_lock);
+       local_irq_enable();
+
+       return 0;
+}
+
+static void tc2_pm_power_down(void)
+{
+       unsigned int mpidr, cpu, cluster;
+       bool last_man = false, skip_wfi = false;
+
+       mpidr = read_cpuid_mpidr();
+       cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+       cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+       pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+       BUG_ON(cluster >= 2 || cpu >= vexpress_spc_get_nb_cpus(cluster));
+
+       __mcpm_cpu_going_down(cpu, cluster);
+
+       arch_spin_lock(&tc2_pm_lock);
+       BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
+       tc2_pm_use_count[cpu][cluster]--;
+       if (tc2_pm_use_count[cpu][cluster] == 0) {
+               vexpress_spc_set_cpu_wakeup_irq(cpu, cluster, 1);
+               if (!tc2_pm_use_count[0][cluster] &&
+                   !tc2_pm_use_count[1][cluster] &&
+                   !tc2_pm_use_count[2][cluster]) {
+                       vexpress_spc_powerdown_enable(cluster, 1);
+                       vexpress_spc_set_global_wakeup_intr(1);
+                       last_man = true;
+               }
+       } else if (tc2_pm_use_count[cpu][cluster] == 1) {
+               /*
+                * A power_up request went ahead of us.
+                * Even if we do not want to shut this CPU down,
+                * the caller expects a certain state as if the WFI
+                * was aborted.  So let's continue with cache cleaning.
+                */
+               skip_wfi = true;
+       } else
+               BUG();
+
+       if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
+               arch_spin_unlock(&tc2_pm_lock);
+
+               set_cr(get_cr() & ~CR_C);
+               flush_cache_all();
+               asm volatile ("clrex");
+               set_auxcr(get_auxcr() & ~(1 << 6));
+
+               disable_cci(cluster);
+
+               /*
+                * Ensure that both C & I bits are disabled in the SCTLR
+                * before disabling ACE snoops. This ensures that no
+                * coherency traffic will originate from this cpu after
+                * ACE snoops are turned off.
+                */
+               cpu_proc_fin();
+
+               __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
+       } else {
+               /*
+                * If last man then undo any setup done previously.
+                */
+               if (last_man) {
+                       vexpress_spc_powerdown_enable(cluster, 0);
+                       vexpress_spc_set_global_wakeup_intr(0);
+               }
+
+               arch_spin_unlock(&tc2_pm_lock);
+
+               set_cr(get_cr() & ~CR_C);
+               flush_cache_louis();
+               asm volatile ("clrex");
+               set_auxcr(get_auxcr() & ~(1 << 6));
+       }
+
+       __mcpm_cpu_down(cpu, cluster);
+
+       /* Now we are prepared for power-down, do it: */
+       if (!skip_wfi)
+               wfi();
+
+       /* Not dead at this point?  Let our caller cope. */
+}
+
+static void tc2_pm_powered_up(void)
+{
+       unsigned int mpidr, cpu, cluster;
+       unsigned long flags;
+
+       mpidr = read_cpuid_mpidr();
+       cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+       cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+       pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+       BUG_ON(cluster >= 2 || cpu >= vexpress_spc_get_nb_cpus(cluster));
+
+       local_irq_save(flags);
+       arch_spin_lock(&tc2_pm_lock);
+
+       if (!tc2_pm_use_count[0][cluster] &&
+           !tc2_pm_use_count[1][cluster] &&
+           !tc2_pm_use_count[2][cluster]) {
+               vexpress_spc_powerdown_enable(cluster, 0);
+               vexpress_spc_set_global_wakeup_intr(0);
+       }
+
+       if (!tc2_pm_use_count[cpu][cluster])
+               tc2_pm_use_count[cpu][cluster] = 1;
+
+       vexpress_spc_set_cpu_wakeup_irq(cpu, cluster, 0);
+       vexpress_spc_write_bxaddr_reg(cluster, cpu, 0);
+
+       arch_spin_unlock(&tc2_pm_lock);
+       local_irq_restore(flags);
+}
+
+static const struct mcpm_platform_ops tc2_pm_power_ops = {
+       .power_up       = tc2_pm_power_up,
+       .power_down     = tc2_pm_power_down,
+       .powered_up     = tc2_pm_powered_up,
+};
+
+static void __init tc2_pm_usage_count_init(void)
+{
+       unsigned int mpidr, cpu, cluster;
+
+       mpidr = read_cpuid_mpidr();
+       cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+       cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+       pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+       BUG_ON(cpu >= 3 || cluster >= 2);
+       tc2_pm_use_count[cpu][cluster] = 1;
+}
+
+extern void tc2_pm_power_up_setup(unsigned int affinity_level);
+
+static int __init tc2_pm_init(void)
+{
+       int ret;
+
+       if (!vexpress_spc_check_loaded())
+               return -ENODEV;
+
+       tc2_pm_usage_count_init();
+
+       ret = mcpm_platform_register(&tc2_pm_power_ops);
+       if (!ret)
+               ret = mcpm_sync_init(tc2_pm_power_up_setup);
+       if (!ret)
+               pr_info("TC2 power management initialized\n");
+       return ret;
+}
+
+early_initcall(tc2_pm_init);
diff --git a/arch/arm/mach-vexpress/tc2_pm_setup.S b/arch/arm/mach-vexpress/tc2_pm_setup.S
new file mode 100644 (file)
index 0000000..046890e
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * arch/arm/include/asm/tc2_pm_setup.S
+ *
+ * Created by: Nicolas Pitre, October 2012
+ (             (based on dcscb_setup.S by Dave Martin)
+ * Copyright:  (C) 2012  Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+
+#include <linux/linkage.h>
+#include <asm/mcpm.h>
+
+
+#define SPC_PHYS_BASE          0x7FFF0000
+
+#define SNOOP_CTL_A15          0x404
+#define SNOOP_CTL_A7           0x504
+
+#define A15_SNOOP_MASK         (0x3 << 7)
+#define A7_SNOOP_MASK          (0x1 << 13)
+
+#define A15_BX_ADDR0           0xB68
+
+
+#define CCI_PHYS_BASE          0x2c090000
+
+#define SLAVE_SNOOPCTL_OFFSET  0
+#define SNOOPCTL_SNOOP_ENABLE  (1 << 0)
+#define SNOOPCTL_DVM_ENABLE    (1 << 1)
+
+#define CCI_STATUS_OFFSET      0xc
+#define STATUS_CHANGE_PENDING  (1 << 0)
+
+#define CCI_SLAVE_OFFSET(n)    (0x1000 + 0x1000 * (n))
+#define CCI_SLAVE_A15          3
+#define CCI_SLAVE_A7           4
+#define CCI_A15_OFFSET         CCI_SLAVE_OFFSET(CCI_SLAVE_A15)
+#define CCI_A7_OFFSET          CCI_SLAVE_OFFSET(CCI_SLAVE_A7)
+
+
+/*
+ * Enable cluster-level coherency, in preparation for turning on the MMU.
+ * The ACTLR SMP bit does not need to be set here, because cpu_resume()
+ * already restores that.
+ */
+
+ENTRY(tc2_pm_power_up_setup)
+
+       cmp     r0, #0
+       beq     2f
+
+       @ Enable CCI snoops
+       mrc     p15, 0, r0, c0, c0, 5   @ MPIDR
+       ubfx    r0, r0, #8, #4          @ cluster
+       ldr     r3, =CCI_PHYS_BASE + CCI_A15_OFFSET
+       cmp     r0, #0          @ A15 cluster?
+       addne   r3, r3, #CCI_A7_OFFSET - CCI_A15_OFFSET
+
+       @ r3 now points to the correct CCI slave register block
+       ldr     r0, [r3, #SLAVE_SNOOPCTL_OFFSET]
+       orr     r0, r0, #SNOOPCTL_SNOOP_ENABLE | SNOOPCTL_DVM_ENABLE
+       str     r0, [r3, #SLAVE_SNOOPCTL_OFFSET]        @ enable CCI snoops
+
+       @ Wait for snoop control change to complete:
+       ldr     r3, =CCI_PHYS_BASE
+1:     ldr     r0, [r3, #CCI_STATUS_OFFSET]
+       tst     r0, #STATUS_CHANGE_PENDING
+       bne     1b
+
+       bx      lr
+
+2:     @ Clear the BX addr register
+       ldr     r3, =SPC_PHYS_BASE + A15_BX_ADDR0
+       mrc     p15, 0, r0, c0, c0, 5   @ MPIDR
+       ubfx    r1, r0, #8, #4          @ cluster
+       ubfx    r0, r0, #0, #4          @ cpu
+       add     r3, r3, r1, lsl #4
+       mov     r1, #0
+       str     r1, [r3, r0, lsl #2]
+       dsb
+
+       bx      lr
+
+ENDPROC(tc2_pm_power_up_setup)