x86/asm/entry: Remove INIT_TSS and fold the definitions into 'cpu_tss'
authorAndy Lutomirski <luto@amacapital.net>
Fri, 6 Mar 2015 03:19:06 +0000 (19:19 -0800)
committerIngo Molnar <mingo@kernel.org>
Fri, 6 Mar 2015 07:32:58 +0000 (08:32 +0100)
The INIT_TSS is unnecessary.  Just define the initial TSS where
'cpu_tss' is defined.

While we're at it, merge the 32-bit and 64-bit definitions.  The
only syntactic change is that 32-bit kernels were computing sp0
as long, but now they compute it as unsigned long.

Verified by objdump: the contents and relocations of
.data..percpu..shared_aligned are unchanged on 32-bit and 64-bit
kernels.

Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/8fc39fa3f6c5d635e93afbdd1a0fe0678a6d7913.1425611534.git.luto@amacapital.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/processor.h
arch/x86/kernel/process.c

index 117ee65473e245771b0d87cece1b6477a00fdb89..f5e3ec63767d8b4575586c7f535d1a2d1fef9cc6 100644 (file)
@@ -818,22 +818,6 @@ static inline void spin_lock_prefetch(const void *x)
        .io_bitmap_ptr          = NULL,                                   \
 }
 
-/*
- * Note that the .io_bitmap member must be extra-big. This is because
- * the CPU will access an additional byte beyond the end of the IO
- * permission bitmap. The extra byte must be all 1 bits, and must
- * be within the limit.
- */
-#define INIT_TSS  {                                                      \
-       .x86_tss = {                                                      \
-               .sp0            = sizeof(init_stack) + (long)&init_stack, \
-               .ss0            = __KERNEL_DS,                            \
-               .ss1            = __KERNEL_CS,                            \
-               .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,               \
-        },                                                               \
-       .io_bitmap              = { [0 ... IO_BITMAP_LONGS] = ~0 },       \
-}
-
 extern unsigned long thread_saved_pc(struct task_struct *tsk);
 
 #define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
@@ -892,10 +876,6 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
        .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
 }
 
-#define INIT_TSS  { \
-       .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
-}
-
 /*
  * Return saved PC of a blocked thread.
  * What is this good for? it will be always the scheduler or ret_from_fork.
index 6f608734923174300776ff9f9f26298d806e0305..f4c0af7fc3a0213e424a6d17d0c7ab6af9ca2f3c 100644 (file)
  * section. Since TSS's are completely CPU-local, we want them
  * on exact cacheline boundaries, to eliminate cacheline ping-pong.
  */
-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = INIT_TSS;
+__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
+       .x86_tss = {
+               .sp0 = (unsigned long)&init_stack + sizeof(init_stack),
+#ifdef CONFIG_X86_32
+               .ss0 = __KERNEL_DS,
+               .ss1 = __KERNEL_CS,
+               .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
+#endif
+        },
+#ifdef CONFIG_X86_32
+        /*
+         * Note that the .io_bitmap member must be extra-big. This is because
+         * the CPU will access an additional byte beyond the end of the IO
+         * permission bitmap. The extra byte must be all 1 bits, and must
+         * be within the limit.
+         */
+       .io_bitmap              = { [0 ... IO_BITMAP_LONGS] = ~0 },
+#endif
+};
 EXPORT_PER_CPU_SYMBOL_GPL(cpu_tss);
 
 #ifdef CONFIG_X86_64