Merge remote-tracking branch 'lsk/v3.10/topic/arm64-misc' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / arch / arm64 / kernel / smp.c
index 9c93e126328cfaa04ffdaa8612a43e4d9d09a6aa..6b000121618997128608295ae569b2ab2e6b32a6 100644 (file)
@@ -39,6 +39,7 @@
 #include <asm/atomic.h>
 #include <asm/cacheflush.h>
 #include <asm/cputype.h>
+#include <asm/cpu_ops.h>
 #include <asm/mmu_context.h>
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
 #include <asm/ptrace.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/arm-ipi.h>
+
 /*
  * as from 2.5, kernels no longer have an init_tasks structure
  * so we need some other way of telling a new secondary core
  * where to place its SVC stack
  */
 struct secondary_data secondary_data;
-volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
 
 enum ipi_msg_type {
        IPI_RESCHEDULE,
        IPI_CALL_FUNC,
        IPI_CALL_FUNC_SINGLE,
        IPI_CPU_STOP,
+       IPI_TIMER,
 };
 
-static DEFINE_RAW_SPINLOCK(boot_lock);
-
-/*
- * Write secondary_holding_pen_release in a way that is guaranteed to be
- * visible to all observers, irrespective of whether they're taking part
- * in coherency or not.  This is necessary for the hotplug code to work
- * reliably.
- */
-static void __cpuinit write_pen_release(u64 val)
-{
-       void *start = (void *)&secondary_holding_pen_release;
-       unsigned long size = sizeof(secondary_holding_pen_release);
-
-       secondary_holding_pen_release = val;
-       __flush_dcache_area(start, size);
-}
-
 /*
  * Boot a secondary CPU, and assign it the specified idle task.
  * This also gives us the initial stack to use for this CPU.
  */
 static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
-       unsigned long timeout;
-
-       /*
-        * Set synchronisation state between this boot processor
-        * and the secondary one
-        */
-       raw_spin_lock(&boot_lock);
-
-       /*
-        * Update the pen release flag.
-        */
-       write_pen_release(cpu_logical_map(cpu));
+       if (cpu_ops[cpu]->cpu_boot)
+               return cpu_ops[cpu]->cpu_boot(cpu);
 
-       /*
-        * Send an event, causing the secondaries to read pen_release.
-        */
-       sev();
-
-       timeout = jiffies + (1 * HZ);
-       while (time_before(jiffies, timeout)) {
-               if (secondary_holding_pen_release == INVALID_HWID)
-                       break;
-               udelay(10);
-       }
-
-       /*
-        * Now the secondary core is starting up let it run its
-        * calibrations, then wait for it to finish
-        */
-       raw_spin_unlock(&boot_lock);
-
-       return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
+       return -EOPNOTSUPP;
 }
 
 static DECLARE_COMPLETION(cpu_running);
@@ -158,6 +117,11 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
        return ret;
 }
 
+static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
+{
+       store_cpu_topology(cpuid);
+}
+
 /*
  * This is the secondary CPU boot entry.  We're using this CPUs
  * idle thread stack, but a set of temporary page tables.
@@ -167,8 +131,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
        struct mm_struct *mm = &init_mm;
        unsigned int cpu = smp_processor_id();
 
-       printk("CPU%u: Booted secondary processor\n", cpu);
-
        /*
         * All kernel threads share the same mm context; grab a
         * reference and switch to it.
@@ -177,6 +139,9 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
        current->active_mm = mm;
        cpumask_set_cpu(cpu, mm_cpumask(mm));
 
+       set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+       printk("CPU%u: Booted secondary processor\n", cpu);
+
        /*
         * TTBR0 is only used for the identity mapping at this stage. Make it
         * point to zero page to avoid speculatively fetching new entries.
@@ -187,17 +152,15 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
        preempt_disable();
        trace_hardirqs_off();
 
-       /*
-        * Let the primary processor know we're out of the
-        * pen, then head off into the C entry point
-        */
-       write_pen_release(INVALID_HWID);
+       if (cpu_ops[cpu]->cpu_postboot)
+               cpu_ops[cpu]->cpu_postboot();
 
        /*
-        * Synchronise with the boot thread.
+        * Enable GIC and timers.
         */
-       raw_spin_lock(&boot_lock);
-       raw_spin_unlock(&boot_lock);
+       notify_cpu_starting(cpu);
+
+       smp_store_cpu_info(cpu);
 
        /*
         * OK, now it's safe to let the boot CPU continue.  Wait for
@@ -207,10 +170,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
        set_cpu_online(cpu, true);
        complete(&cpu_running);
 
-       /*
-        * Enable GIC and timers.
-        */
-       notify_cpu_starting(cpu);
+       local_irq_enable();
+       local_fiq_enable();
 
        local_irq_enable();
        local_fiq_enable();
@@ -221,42 +182,117 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
        cpu_startup_entry(CPUHP_ONLINE);
 }
 
-void __init smp_cpus_done(unsigned int max_cpus)
+#ifdef CONFIG_HOTPLUG_CPU
+static int op_cpu_disable(unsigned int cpu)
 {
-       unsigned long bogosum = loops_per_jiffy * num_online_cpus();
+       /*
+        * If we don't have a cpu_die method, abort before we reach the point
+        * of no return. CPU0 may not have an cpu_ops, so test for it.
+        */
+       if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
+               return -EOPNOTSUPP;
 
-       pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
-               num_online_cpus(), bogosum / (500000/HZ),
-               (bogosum / (5000/HZ)) % 100);
+       /*
+        * We may need to abort a hot unplug for some other mechanism-specific
+        * reason.
+        */
+       if (cpu_ops[cpu]->cpu_disable)
+               return cpu_ops[cpu]->cpu_disable(cpu);
+
+       return 0;
 }
 
-void __init smp_prepare_boot_cpu(void)
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpu_disable(void)
 {
-}
+       unsigned int cpu = smp_processor_id();
+       int ret;
 
-static void (*smp_cross_call)(const struct cpumask *, unsigned int);
+       ret = op_cpu_disable(cpu);
+       if (ret)
+               return ret;
 
-static const struct smp_enable_ops *enable_ops[] __initconst = {
-       &smp_spin_table_ops,
-       &smp_psci_ops,
-       NULL,
-};
+       /*
+        * Take this CPU offline.  Once we clear this, we can't return,
+        * and we must not schedule until we're ready to give up the cpu.
+        */
+       set_cpu_online(cpu, false);
 
-static const struct smp_enable_ops *smp_enable_ops[NR_CPUS];
+       /*
+        * OK - migrate IRQs away from this CPU
+        */
+       migrate_irqs();
 
-static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
-{
-       const struct smp_enable_ops **ops = enable_ops;
+       /*
+        * Remove this CPU from the vm mask set of all processes.
+        */
+       clear_tasks_mm_cpumask(cpu);
 
-       while (*ops) {
-               if (!strcmp(name, (*ops)->name))
-                       return *ops;
+       return 0;
+}
+
+static DECLARE_COMPLETION(cpu_died);
 
-               ops++;
+/*
+ * called on the thread which is asking for a CPU to be shutdown -
+ * waits until shutdown has completed, or it is timed out.
+ */
+void __cpu_die(unsigned int cpu)
+{
+       if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
+               pr_crit("CPU%u: cpu didn't die\n", cpu);
+               return;
        }
+       pr_notice("CPU%u: shutdown\n", cpu);
+}
 
-       return NULL;
+/*
+ * Called from the idle thread for the CPU which has been shutdown.
+ *
+ * Note that we disable IRQs here, but do not re-enable them
+ * before returning to the caller. This is also the behaviour
+ * of the other hotplug-cpu capable cores, so presumably coming
+ * out of idle fixes this.
+ */
+void cpu_die(void)
+{
+       unsigned int cpu = smp_processor_id();
+
+       idle_task_exit();
+
+       local_irq_disable();
+
+       /* Tell __cpu_die() that this CPU is now safe to dispose of */
+       complete(&cpu_died);
+
+       /*
+        * Actually shutdown the CPU. This must never fail. The specific hotplug
+        * mechanism must perform all required cache maintenance to ensure that
+        * no dirty lines are lost in the process of shutting down the CPU.
+        */
+       cpu_ops[cpu]->cpu_die(cpu);
+
+       BUG();
 }
+#endif
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+       unsigned long bogosum = loops_per_jiffy * num_online_cpus();
+
+       pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
+               num_online_cpus(), bogosum / (500000/HZ),
+               (bogosum / (5000/HZ)) % 100);
+}
+
+void __init smp_prepare_boot_cpu(void)
+{
+       set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+}
+
+static void (*smp_cross_call)(const struct cpumask *, unsigned int);
 
 /*
  * Enumerate the possible CPU set from the device tree and build the
@@ -265,9 +301,8 @@ static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
  */
 void __init smp_init_cpus(void)
 {
-       const char *enable_method;
        struct device_node *dn = NULL;
-       int i, cpu = 1;
+       unsigned int i, cpu = 1;
        bool bootcpu_valid = false;
 
        while ((dn = of_find_node_by_type(dn, "cpu"))) {
@@ -336,25 +371,10 @@ void __init smp_init_cpus(void)
                if (cpu >= NR_CPUS)
                        goto next;
 
-               /*
-                * We currently support only the "spin-table" enable-method.
-                */
-               enable_method = of_get_property(dn, "enable-method", NULL);
-               if (!enable_method) {
-                       pr_err("%s: missing enable-method property\n",
-                               dn->full_name);
+               if (cpu_read_ops(dn, cpu) != 0)
                        goto next;
-               }
-
-               smp_enable_ops[cpu] = smp_get_enable_ops(enable_method);
 
-               if (!smp_enable_ops[cpu]) {
-                       pr_err("%s: invalid enable-method property: %s\n",
-                              dn->full_name, enable_method);
-                       goto next;
-               }
-
-               if (smp_enable_ops[cpu]->init_cpu(dn, cpu))
+               if (cpu_ops[cpu]->cpu_init(dn, cpu))
                        goto next;
 
                pr_debug("cpu logical map 0x%llx\n", hwid);
@@ -384,8 +404,12 @@ next:
 
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
-       int cpu, err;
-       unsigned int ncores = num_possible_cpus();
+       int err;
+       unsigned int cpu, ncores = num_possible_cpus();
+
+       init_cpu_topology();
+
+       smp_store_cpu_info(smp_processor_id());
 
        /*
         * are we trying to boot more cores than exist?
@@ -412,10 +436,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
                if (cpu == smp_processor_id())
                        continue;
 
-               if (!smp_enable_ops[cpu])
+               if (!cpu_ops[cpu])
                        continue;
 
-               err = smp_enable_ops[cpu]->prepare_cpu(cpu);
+               err = cpu_ops[cpu]->cpu_prepare(cpu);
                if (err)
                        continue;
 
@@ -446,6 +470,7 @@ static const char *ipi_types[NR_IPI] = {
        S(IPI_CALL_FUNC, "Function call interrupts"),
        S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
        S(IPI_CPU_STOP, "CPU stop interrupts"),
+       S(IPI_TIMER, "Timer broadcast interrupts"),
 };
 
 void show_ipi_list(struct seq_file *p, int prec)
@@ -455,7 +480,7 @@ void show_ipi_list(struct seq_file *p, int prec)
        for (i = 0; i < NR_IPI; i++) {
                seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE,
                           prec >= 4 ? " " : "");
-               for_each_present_cpu(cpu)
+               for_each_online_cpu(cpu)
                        seq_printf(p, "%10u ",
                                   __get_irq_stat(cpu, ipi_irqs[i]));
                seq_printf(p, "      %s\n", ipi_types[i]);
@@ -531,6 +556,14 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
                irq_exit();
                break;
 
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+       case IPI_TIMER:
+               irq_enter();
+               tick_receive_broadcast();
+               irq_exit();
+               break;
+#endif
+
        default:
                pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
                break;
@@ -543,6 +576,13 @@ void smp_send_reschedule(int cpu)
        smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
 }
 
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+void tick_broadcast(const struct cpumask *mask)
+{
+       smp_cross_call(mask, IPI_TIMER);
+}
+#endif
+
 void smp_send_stop(void)
 {
        unsigned long timeout;