x86/asm/entry/64: Rename 'old_rsp' to 'rsp_scratch'
authorIngo Molnar <mingo@kernel.org>
Tue, 17 Mar 2015 13:42:59 +0000 (14:42 +0100)
committerIngo Molnar <mingo@kernel.org>
Tue, 17 Mar 2015 15:01:42 +0000 (16:01 +0100)
Make clear that the usage of PER_CPU(old_rsp) is purely temporary,
by renaming it to 'rsp_scratch'.

Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Will Drewry <wad@chromium.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/kernel/entry_64.S
arch/x86/kernel/process_64.c
arch/x86/xen/xen-asm_64.S

index d287f785089e6d4ba3b584f22f86420d3328496e..0c91256d73dfc01a79c440eace640bcf3b40135c 100644 (file)
@@ -237,16 +237,16 @@ ENTRY(system_call)
 GLOBAL(system_call_after_swapgs)
 
        /*
-        * We use 'old_rsp' as a scratch register, hence this block must execute
+        * We use 'rsp_scratch' as a scratch register, hence this block must execute
         * atomically in the face of possible interrupt-driven task preemption,
-        * so we can enable interrupts only after we're done with using old_rsp:
+        * so we can enable interrupts only after we're done with using rsp_scratch:
         */
-       movq    %rsp,PER_CPU_VAR(old_rsp)
+       movq    %rsp,PER_CPU_VAR(rsp_scratch)
        /* kernel_stack is set so that 5 slots (iret frame) are preallocated */
        movq    PER_CPU_VAR(kernel_stack),%rsp
        ALLOC_PT_GPREGS_ON_STACK 8              /* +8: space for orig_ax */
        movq    %rcx,RIP(%rsp)
-       movq    PER_CPU_VAR(old_rsp),%rcx
+       movq    PER_CPU_VAR(rsp_scratch),%rcx
        movq    %r11,EFLAGS(%rsp)
        movq    %rcx,RSP(%rsp)
        /*
@@ -657,7 +657,7 @@ common_interrupt:
        ASM_CLAC
        addq $-0x80,(%rsp)              /* Adjust vector to [-256,-1] range */
        interrupt do_IRQ
-       /* 0(%rsp): old_rsp */
+       /* 0(%rsp): rsp_scratch */
 ret_from_intr:
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
index 14df2be4711f56029f26a578cfdc29256a301637..97f5658290b7099f37571b35390e253bcd7ee836 100644 (file)
@@ -52,7 +52,7 @@
 
 asmlinkage extern void ret_from_fork(void);
 
-__visible DEFINE_PER_CPU(unsigned long, old_rsp);
+__visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
 
 /* Prints also some state that isn't saved in the pt_regs */
 void __show_regs(struct pt_regs *regs, int all)
index 53adefda4275330a810b6d883b6ad8b58a72730c..985fc3ee0973c85f916c67cd9a40fc9c2c73d340 100644 (file)
@@ -68,11 +68,11 @@ ENTRY(xen_sysret64)
         * We're already on the usermode stack at this point, but
         * still with the kernel gs, so we can easily switch back
         */
-       movq %rsp, PER_CPU_VAR(old_rsp)
+       movq %rsp, PER_CPU_VAR(rsp_scratch)
        movq PER_CPU_VAR(kernel_stack), %rsp
 
        pushq $__USER_DS
-       pushq PER_CPU_VAR(old_rsp)
+       pushq PER_CPU_VAR(rsp_scratch)
        pushq %r11
        pushq $__USER_CS
        pushq %rcx
@@ -87,11 +87,11 @@ ENTRY(xen_sysret32)
         * We're already on the usermode stack at this point, but
         * still with the kernel gs, so we can easily switch back
         */
-       movq %rsp, PER_CPU_VAR(old_rsp)
+       movq %rsp, PER_CPU_VAR(rsp_scratch)
        movq PER_CPU_VAR(kernel_stack), %rsp
 
        pushq $__USER32_DS
-       pushq PER_CPU_VAR(old_rsp)
+       pushq PER_CPU_VAR(rsp_scratch)
        pushq %r11
        pushq $__USER32_CS
        pushq %rcx