ARM: tegra: usb_phy: Correct utmi power off sequence
[firefly-linux-kernel-4.4.55.git] / arch / arm / mach-tegra / platsmp.c
index 1c0fd92cab39e44779a3823e89d5248997163a72..659c66967fb5b64b6c3741f97c70003577c243c7 100644 (file)
@@ -7,6 +7,8 @@
  *  Copyright (C) 2009 Palm
  *  All Rights Reserved
  *
+ *  Copyright (C) 2010 NVIDIA Corporation
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
 #include <linux/jiffies.h>
 #include <linux/smp.h>
 #include <linux/io.h>
+#include <linux/completion.h>
+#include <linux/sched.h>
+#include <linux/cpu.h>
+#include <linux/slab.h>
 
 #include <asm/cacheflush.h>
 #include <mach/hardware.h>
 #include <asm/mach-types.h>
 #include <asm/localtimer.h>
+#include <asm/tlbflush.h>
 #include <asm/smp_scu.h>
+#include <asm/cpu.h>
+#include <asm/mmu_context.h>
 
 #include <mach/iomap.h>
 
+#include "power.h"
+
 extern void tegra_secondary_startup(void);
 
 static DEFINE_SPINLOCK(boot_lock);
 static void __iomem *scu_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE);
 
+#ifdef CONFIG_HOTPLUG_CPU
+static DEFINE_PER_CPU(struct completion, cpu_killed);
+extern void tegra_hotplug_startup(void);
+#endif
+
+static DECLARE_BITMAP(cpu_init_bits, CONFIG_NR_CPUS) __read_mostly;
+const struct cpumask *const cpu_init_mask = to_cpumask(cpu_init_bits);
+#define cpu_init_map (*(cpumask_t *)cpu_init_mask)
+
 #define EVP_CPU_RESET_VECTOR \
        (IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + 0x100)
 #define CLK_RST_CONTROLLER_CLK_CPU_CMPLX \
        (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x4c)
+#define CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET \
+       (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x340)
 #define CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR \
        (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x344)
 
 void __cpuinit platform_secondary_init(unsigned int cpu)
 {
        trace_hardirqs_off();
-
-       /*
-        * if any interrupts are already enabled for the primary
-        * core (e.g. timer irq), then they will not have been enabled
-        * for us: do so
-        */
        gic_cpu_init(0, IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x100);
-
        /*
         * Synchronise with the boot thread.
         */
        spin_lock(&boot_lock);
+#ifdef CONFIG_HOTPLUG_CPU
+       cpu_set(cpu, cpu_init_map);
+       INIT_COMPLETION(per_cpu(cpu_killed, cpu));
+#endif
        spin_unlock(&boot_lock);
 }
 
@@ -70,27 +89,30 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
         */
        spin_lock(&boot_lock);
 
-
        /* set the reset vector to point to the secondary_startup routine */
+#ifdef CONFIG_HOTPLUG_CPU
+       if (cpumask_test_cpu(cpu, cpu_init_mask))
+               boot_vector = virt_to_phys(tegra_hotplug_startup);
+       else
+#endif
+               boot_vector = virt_to_phys(tegra_secondary_startup);
+
+       smp_wmb();
 
-       boot_vector = virt_to_phys(tegra_secondary_startup);
        old_boot_vector = readl(EVP_CPU_RESET_VECTOR);
        writel(boot_vector, EVP_CPU_RESET_VECTOR);
 
-       /* enable cpu clock on cpu1 */
+       /* enable cpu clock on cpu */
        reg = readl(CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
-       writel(reg & ~(1<<9), CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+       writel(reg & ~(1<<(8+cpu)), CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
 
-       reg = (1<<13) | (1<<9) | (1<<5) | (1<<1);
+       reg = 0x1111<<cpu;
        writel(reg, CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR);
 
-       smp_wmb();
-       flush_cache_all();
-
        /* unhalt the cpu */
-       writel(0, IO_ADDRESS(TEGRA_FLOW_CTRL_BASE) + 0x14);
+       writel(0, IO_ADDRESS(TEGRA_FLOW_CTRL_BASE) + 0x14 + 0x8*(cpu-1));
 
-       timeout = jiffies + (1 * HZ);
+       timeout = jiffies + HZ;
        while (time_before(jiffies, timeout)) {
                if (readl(EVP_CPU_RESET_VECTOR) != boot_vector)
                        break;
@@ -142,6 +164,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
        for (i = 0; i < max_cpus; i++)
                set_cpu_present(i, true);
 
+#ifdef CONFIG_HOTPLUG_CPU
+       for_each_present_cpu(i) {
+               init_completion(&per_cpu(cpu_killed, i));
+       }
+#endif
+
        /*
         * Initialise the SCU if there are more than one CPU and let
         * them know where to start. Note that, on modern versions of
@@ -154,3 +182,71 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
                scu_enable(scu_base);
        }
 }
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+extern void vfp_sync_state(struct thread_info *thread);
+
+void __cpuinit secondary_start_kernel(void);
+
+int platform_cpu_kill(unsigned int cpu)
+{
+       unsigned int reg;
+       int e;
+
+       e = wait_for_completion_timeout(&per_cpu(cpu_killed, cpu), 100);
+       printk(KERN_NOTICE "CPU%u: %s shutdown\n", cpu, (e) ? "clean":"forced");
+
+       if (e) {
+               do {
+                       reg = readl(CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET);
+                       cpu_relax();
+               } while (!(reg & (1<<cpu)));
+       } else {
+               writel(0x1111<<cpu, CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET);
+               /* put flow controller in WAIT_EVENT mode */
+               writel(2<<29, IO_ADDRESS(TEGRA_FLOW_CTRL_BASE)+0x14 + 0x8*(cpu-1));
+       }
+       spin_lock(&boot_lock);
+       reg = readl(CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+       writel(reg | (1<<(8+cpu)), CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+       spin_unlock(&boot_lock);
+       return e;
+}
+
+void platform_cpu_die(unsigned int cpu)
+{
+#ifdef DEBUG
+       unsigned int this_cpu = hard_smp_processor_id();
+
+       if (cpu != this_cpu) {
+               printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",
+                          this_cpu, cpu);
+               BUG();
+       }
+#endif
+
+       gic_cpu_exit(0);
+       barrier();
+       complete(&per_cpu(cpu_killed, cpu));
+       flush_cache_all();
+       barrier();
+       __cortex_a9_save(0);
+
+       /* return happens from __cortex_a9_restore */
+       barrier();
+       writel(smp_processor_id(), EVP_CPU_RESET_VECTOR);
+}
+
+int platform_cpu_disable(unsigned int cpu)
+{
+       /*
+        * we don't allow CPU 0 to be shutdown (it is still too special
+        * e.g. clock tick interrupts)
+        */
+       if (unlikely(!tegra_context_area))
+               return -ENXIO;
+
+       return cpu == 0 ? -EPERM : 0;
+}
+#endif