powerpc/smp: Don't expose per-cpu "cpu_state" array
[firefly-linux-kernel-4.4.55.git] / arch / powerpc / kernel / smp.c
index 981360509172472a044eac1b9ec50019def24b9c..d7f8cc18ae05f7bf4ae9f3d9329cc44d217fb9f5 100644 (file)
@@ -305,7 +305,7 @@ void __devinit smp_prepare_boot_cpu(void)
 
 #ifdef CONFIG_HOTPLUG_CPU
 /* State of each CPU during hotplug phases */
-DEFINE_PER_CPU(int, cpu_state) = { 0 };
+static DEFINE_PER_CPU(int, cpu_state) = { 0 };
 
 int generic_cpu_disable(void)
 {
@@ -317,30 +317,8 @@ int generic_cpu_disable(void)
        set_cpu_online(cpu, false);
 #ifdef CONFIG_PPC64
        vdso_data->processorCount--;
-       fixup_irqs(cpu_online_mask);
-#endif
-       return 0;
-}
-
-int generic_cpu_enable(unsigned int cpu)
-{
-       /* Do the normal bootup if we haven't
-        * already bootstrapped. */
-       if (system_state != SYSTEM_RUNNING)
-               return -ENOSYS;
-
-       /* get the target out of it's holding state */
-       per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
-       smp_wmb();
-
-       while (!cpu_online(cpu))
-               cpu_relax();
-
-#ifdef CONFIG_PPC64
-       fixup_irqs(cpu_online_mask);
-       /* counter the irq disable in fixup_irqs */
-       local_irq_enable();
 #endif
+       migrate_irqs();
        return 0;
 }
 
@@ -362,32 +340,26 @@ void generic_mach_cpu_die(void)
        unsigned int cpu;
 
        local_irq_disable();
+       idle_task_exit();
        cpu = smp_processor_id();
        printk(KERN_DEBUG "CPU%d offline\n", cpu);
        __get_cpu_var(cpu_state) = CPU_DEAD;
        smp_wmb();
        while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
                cpu_relax();
-       set_cpu_online(cpu, true);
-       local_irq_enable();
 }
-#endif
 
-static int __devinit cpu_enable(unsigned int cpu)
+void generic_set_cpu_dead(unsigned int cpu)
 {
-       if (smp_ops && smp_ops->cpu_enable)
-               return smp_ops->cpu_enable(cpu);
-
-       return -ENOSYS;
+       per_cpu(cpu_state, cpu) = CPU_DEAD;
 }
+#endif
 
 int __cpuinit __cpu_up(unsigned int cpu)
 {
        int c;
 
        secondary_ti = current_set[cpu];
-       if (!cpu_enable(cpu))
-               return 0;
 
        if (smp_ops == NULL ||
            (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
@@ -502,7 +474,7 @@ static struct device_node *cpu_to_l2cache(int cpu)
 }
 
 /* Activate a secondary processor. */
-int __devinit start_secondary(void *unused)
+void __devinit start_secondary(void *unused)
 {
        unsigned int cpu = smp_processor_id();
        struct device_node *l2_cache;
@@ -558,7 +530,8 @@ int __devinit start_secondary(void *unused)
        local_irq_enable();
 
        cpu_idle();
-       return 0;
+
+       BUG();
 }
 
 int setup_profiling_timer(unsigned int multiplier)
@@ -585,7 +558,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
 
        free_cpumask_var(old_mask);
 
+       if (smp_ops && smp_ops->bringup_done)
+               smp_ops->bringup_done();
+
        dump_numa_cpu_topology();
+
 }
 
 int arch_sd_sibling_asym_packing(void)
@@ -660,5 +637,9 @@ void cpu_die(void)
 {
        if (ppc_md.cpu_die)
                ppc_md.cpu_die();
+
+       /* If we return, we re-enter start_secondary */
+       start_secondary_resume();
 }
+
 #endif