#include <asm/cacheflush.h>
#include <asm/cputype.h>
#include <asm/cp15.h>
+#include <asm/psci.h>
#include <mach/motherboard.h>
+#include <mach/tc2.h>
#include <linux/vexpress.h>
#include <linux/arm-cci.h>
*/
static arch_spinlock_t tc2_pm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
-static int tc2_pm_use_count[3][2];
+static int tc2_pm_use_count[TC2_MAX_CPUS][TC2_MAX_CLUSTERS];
static int tc2_pm_power_up(unsigned int cpu, unsigned int cluster)
{
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
- if (cluster >= 2 || cpu >= vexpress_spc_get_nb_cpus(cluster))
+ if (cluster >= TC2_MAX_CLUSTERS ||
+ cpu >= vexpress_spc_get_nb_cpus(cluster))
return -EINVAL;
/*
tc2_pm_use_count[cpu][cluster]++;
if (tc2_pm_use_count[cpu][cluster] == 1) {
- vexpress_spc_write_bxaddr_reg(cluster, cpu,
+ vexpress_spc_write_resume_reg(cluster, cpu,
virt_to_phys(mcpm_entry_point));
vexpress_spc_set_cpu_wakeup_irq(cpu, cluster, 1);
} else if (tc2_pm_use_count[cpu][cluster] != 2) {
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
- BUG_ON(cluster >= 2 || cpu >= vexpress_spc_get_nb_cpus(cluster));
+ BUG_ON(cluster >= TC2_MAX_CLUSTERS ||
+ cpu >= vexpress_spc_get_nb_cpus(cluster));
__mcpm_cpu_going_down(cpu, cluster);
} else
BUG();
- gic_cpu_if_down();
+ /*
+ * If the CPU is committed to power down, make sure
+ * the power controller will be in charge of waking it
+ * up upon IRQ, ie IRQ lines are cut from GIC CPU IF
+ * to the CPU by disabling the GIC CPU IF to prevent wfi
+ * from completing execution behind power controller back
+ */
+ if (!skip_wfi)
+ gic_cpu_if_down();
if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
arch_spin_unlock(&tc2_pm_lock);
- set_cr(get_cr() & ~CR_C);
- flush_cache_all();
- asm volatile ("clrex");
- set_auxcr(get_auxcr() & ~(1 << 6));
+ if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) {
+ /*
+ * On the Cortex-A15 we need to disable
+ * L2 prefetching before flushing the cache.
+ */
+ asm volatile(
+ "mcr p15, 1, %0, c15, c0, 3 \n\t"
+ "isb \n\t"
+ "dsb "
+ : : "r" (0x400) );
+ }
- cci_disable_port_by_cpu(mpidr);
+ v7_exit_coherency_flush(all);
- /*
- * Ensure that both C & I bits are disabled in the SCTLR
- * before disabling ACE snoops. This ensures that no
- * coherency traffic will originate from this cpu after
- * ACE snoops are turned off.
- */
- cpu_proc_fin();
+ cci_disable_port_by_cpu(mpidr);
__mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
} else {
arch_spin_unlock(&tc2_pm_lock);
- set_cr(get_cr() & ~CR_C);
- flush_cache_louis();
- asm volatile ("clrex");
- set_auxcr(get_auxcr() & ~(1 << 6));
+ v7_exit_coherency_flush(louis);
}
__mcpm_cpu_down(cpu, cluster);
mpidr = read_cpuid_mpidr();
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
- vexpress_spc_write_bxaddr_reg(cluster, cpu,
+ vexpress_spc_write_resume_reg(cluster, cpu,
virt_to_phys(tc2_resume));
tc2_pm_down(residency);
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
- BUG_ON(cluster >= 2 || cpu >= vexpress_spc_get_nb_cpus(cluster));
+ BUG_ON(cluster >= TC2_MAX_CLUSTERS ||
+ cpu >= vexpress_spc_get_nb_cpus(cluster));
local_irq_save(flags);
arch_spin_lock(&tc2_pm_lock);
tc2_pm_use_count[cpu][cluster] = 1;
vexpress_spc_set_cpu_wakeup_irq(cpu, cluster, 0);
- vexpress_spc_write_bxaddr_reg(cluster, cpu, 0);
+ vexpress_spc_write_resume_reg(cluster, cpu, 0);
arch_spin_unlock(&tc2_pm_lock);
local_irq_restore(flags);
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
- BUG_ON(cpu >= 3 || cluster >= 2);
+ BUG_ON(cluster >= TC2_MAX_CLUSTERS ||
+ cpu >= vexpress_spc_get_nb_cpus(cluster));
+
tc2_pm_use_count[cpu][cluster] = 1;
}
{
int ret;
+ ret = psci_probe();
+ if (!ret) {
+ pr_debug("psci found. Aborting native init\n");
+ return -ENODEV;
+ }
+
if (!vexpress_spc_check_loaded())
return -ENODEV;