[PATCH] sep initializing rework
authorLi Shaohua <shaohua.li@intel.com>
Sat, 25 Jun 2005 21:54:53 +0000 (14:54 -0700)
committerLinus Torvalds <torvalds@ppc970.osdl.org>
Sat, 25 Jun 2005 23:24:29 +0000 (16:24 -0700)
Make SEP init per-cpu, so it is hotplug safe.

Signed-off-by: Li Shaohua<shaohua.li@intel.com>
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
arch/i386/kernel/cpu/common.c
arch/i386/kernel/smp.c
arch/i386/kernel/smpboot.c
arch/i386/kernel/sysenter.c
arch/i386/power/cpu.c
include/asm-i386/processor.h
include/asm-i386/smp.h

index b9954248d0aaf13f8861a87c7b55e1ab9314e8a6..d58e169fbdbbe94abf813fd35222f50b9c56492d 100644 (file)
@@ -432,6 +432,9 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
 #ifdef CONFIG_X86_MCE
        mcheck_init(c);
 #endif
+       if (c == &boot_cpu_data)
+               sysenter_setup();
+       enable_sep_cpu();
 }
 
 #ifdef CONFIG_X86_HT
index 35f521612b2093e72a1b63b499f525f3f629541d..cec4bde67161254106267d6c6e4715aaeea93a3a 100644 (file)
@@ -495,6 +495,16 @@ struct call_data_struct {
        int wait;
 };
 
+void lock_ipi_call_lock(void)
+{
+       spin_lock_irq(&call_lock);
+}
+
+void unlock_ipi_call_lock(void)
+{
+       spin_unlock_irq(&call_lock);
+}
+
 static struct call_data_struct * call_data;
 
 /*
index ad74a46e9ef005aabf5cbe3ecbb1f0b4e5859f9b..c5517f3323090fceac4c8197204ccf569d34276c 100644 (file)
@@ -449,7 +449,18 @@ static void __init start_secondary(void *unused)
         * the local TLBs too.
         */
        local_flush_tlb();
+
+       /*
+        * We need to hold call_lock, so there is no inconsistency
+        * between the time smp_call_function() determines number of
+        * IPI receipients, and the time when the determination is made
+        * for which cpus receive the IPI. Holding this
+        * lock helps us to not include this cpu in a currently in progress
+        * smp_call_function().
+        */
+       lock_ipi_call_lock();
        cpu_set(smp_processor_id(), cpu_online_map);
+       unlock_ipi_call_lock();
 
        /* We can take interrupts now: we're officially "up". */
        local_irq_enable();
index 960d8bd137d0ce86b6b76ac22c3930bb2833711d..0bada1870bdf5691631e10558bf43cbb350691a1 100644 (file)
 
 extern asmlinkage void sysenter_entry(void);
 
-void enable_sep_cpu(void *info)
+void enable_sep_cpu(void)
 {
        int cpu = get_cpu();
        struct tss_struct *tss = &per_cpu(init_tss, cpu);
 
+       if (!boot_cpu_has(X86_FEATURE_SEP)) {
+               put_cpu();
+               return;
+       }
+
        tss->ss1 = __KERNEL_CS;
        tss->esp1 = sizeof(struct tss_struct) + (unsigned long) tss;
        wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
@@ -41,7 +46,7 @@ void enable_sep_cpu(void *info)
 extern const char vsyscall_int80_start, vsyscall_int80_end;
 extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
 
-static int __init sysenter_setup(void)
+int __init sysenter_setup(void)
 {
        void *page = (void *)get_zeroed_page(GFP_ATOMIC);
 
@@ -58,8 +63,5 @@ static int __init sysenter_setup(void)
               &vsyscall_sysenter_start,
               &vsyscall_sysenter_end - &vsyscall_sysenter_start);
 
-       on_each_cpu(enable_sep_cpu, NULL, 1, 1);
        return 0;
 }
-
-__initcall(sysenter_setup);
index 6f521cf19a133924b026b13bb3695218fd2a2e6b..d099d01461f45a2175235894b8dba7bb7cc19df1 100644 (file)
 #include <linux/device.h>
 #include <linux/suspend.h>
 #include <linux/acpi.h>
+
 #include <asm/uaccess.h>
 #include <asm/acpi.h>
 #include <asm/tlbflush.h>
+#include <asm/processor.h>
 
 static struct saved_context saved_context;
 
@@ -33,8 +35,6 @@ unsigned long saved_context_esp, saved_context_ebp;
 unsigned long saved_context_esi, saved_context_edi;
 unsigned long saved_context_eflags;
 
-extern void enable_sep_cpu(void *);
-
 void __save_processor_state(struct saved_context *ctxt)
 {
        kernel_fpu_begin();
@@ -136,7 +136,7 @@ void __restore_processor_state(struct saved_context *ctxt)
         * sysenter MSRs
         */
        if (boot_cpu_has(X86_FEATURE_SEP))
-               enable_sep_cpu(NULL);
+               enable_sep_cpu();
 
        fix_processor_context();
        do_fpu_end();
index c76c50e9622533caaeaf44f5e9ba35b9ff972a19..6f0f93d0d41741ea7f094a3975f7cae5843aa44f 100644 (file)
@@ -691,5 +691,7 @@ extern void select_idle_routine(const struct cpuinfo_x86 *c);
 #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
 
 extern unsigned long boot_option_idle_override;
+extern void enable_sep_cpu(void);
+extern int sysenter_setup(void);
 
 #endif /* __ASM_I386_PROCESSOR_H */
index 507f2fd39a6a00d55c268439680b118935019e5b..2451ead0ca5c92d134f80cfb1f63fd161a47a758 100644 (file)
@@ -42,6 +42,8 @@ extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
 extern void smp_invalidate_rcv(void);          /* Process an NMI */
 extern void (*mtrr_hook) (void);
 extern void zap_low_mappings (void);
+extern void lock_ipi_call_lock(void);
+extern void unlock_ipi_call_lock(void);
 
 #define MAX_APICID 256
 extern u8 x86_cpu_to_apicid[];