x86: Make alternative instruction pointers relative
authorAndy Lutomirski <luto@mit.edu>
Wed, 13 Jul 2011 13:24:10 +0000 (09:24 -0400)
committerH. Peter Anvin <hpa@linux.intel.com>
Wed, 13 Jul 2011 18:22:56 +0000 (11:22 -0700)
This save a few bytes on x86-64 and means that future patches can
apply alternatives to unrelocated code.

Signed-off-by: Andy Lutomirski <luto@mit.edu>
Link: http://lkml.kernel.org/r/ff64a6b9a1a3860ca4a7b8b6dc7b4754f9491cd7.1310563276.git.luto@mit.edu
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
arch/x86/include/asm/alternative-asm.h
arch/x86/include/asm/alternative.h
arch/x86/include/asm/cpufeature.h
arch/x86/kernel/alternative.c
arch/x86/lib/copy_page_64.S
arch/x86/lib/memmove_64.S

index 94d420b360d11cdfbbd4a39f13c19bd8d76fc7e0..4554cc6fb96afec6625814ba2fbd983eb0f7f00e 100644 (file)
@@ -17,8 +17,8 @@
 
 .macro altinstruction_entry orig alt feature orig_len alt_len
        .align 8
-       .quad \orig
-       .quad \alt
+       .long \orig - .
+       .long \alt - .
        .word \feature
        .byte \orig_len
        .byte \alt_len
index bf535f947e8c359d1ff482420ca5fca0a8e70f19..23fb6d79f2094356a5f471d321d883eb5a9c68cf 100644 (file)
@@ -43,8 +43,8 @@
 #endif
 
 struct alt_instr {
-       u8 *instr;              /* original instruction */
-       u8 *replacement;
+       s32 instr_offset;       /* original instruction */
+       s32 repl_offset;        /* offset to replacement instruction */
        u16 cpuid;              /* cpuid bit set for replacement */
        u8  instrlen;           /* length of original instruction */
        u8  replacementlen;     /* length of new instruction, <= instrlen */
@@ -84,8 +84,8 @@ static inline int alternatives_text_reserved(void *start, void *end)
       "661:\n\t" oldinstr "\n662:\n"                                   \
       ".section .altinstructions,\"a\"\n"                              \
       _ASM_ALIGN "\n"                                                  \
-      _ASM_PTR "661b\n"                                /* label           */   \
-      _ASM_PTR "663f\n"                                /* new instruction */   \
+      "         .long 661b - .\n"                      /* label           */   \
+      "         .long 663f - .\n"                      /* new instruction */   \
       "         .word " __stringify(feature) "\n"      /* feature bit     */   \
       "         .byte 662b-661b\n"                     /* sourcelen       */   \
       "         .byte 664f-663f\n"                     /* replacementlen  */   \
index 71cc3800712ca09caba33cd508e457ee017280e2..9929b35929ff5278c71f508439e6b33fb40cb546 100644 (file)
@@ -331,8 +331,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
                         "2:\n"
                         ".section .altinstructions,\"a\"\n"
                         _ASM_ALIGN "\n"
-                        _ASM_PTR "1b\n"
-                        _ASM_PTR "0\n"         /* no replacement */
+                        " .long 1b - .\n"
+                        " .long 0\n"           /* no replacement */
                         " .word %P0\n"         /* feature bit */
                         " .byte 2b - 1b\n"     /* source len */
                         " .byte 0\n"           /* replacement len */
@@ -349,8 +349,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
                             "2:\n"
                             ".section .altinstructions,\"a\"\n"
                             _ASM_ALIGN "\n"
-                            _ASM_PTR "1b\n"
-                            _ASM_PTR "3f\n"
+                            " .long 1b - .\n"
+                            " .long 3f - .\n"
                             " .word %P1\n"             /* feature bit */
                             " .byte 2b - 1b\n"         /* source len */
                             " .byte 4f - 3f\n"         /* replacement len */
index a81f2d52f869d842d9301d5f9aea5c23a74369a2..ddb207bb5f9128a0f823adb47820bef631828cfe 100644 (file)
@@ -263,6 +263,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
                                         struct alt_instr *end)
 {
        struct alt_instr *a;
+       u8 *instr, *replacement;
        u8 insnbuf[MAX_PATCH_LEN];
 
        DPRINTK("%s: alt table %p -> %p\n", __func__, start, end);
@@ -276,25 +277,29 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
         * order.
         */
        for (a = start; a < end; a++) {
-               u8 *instr = a->instr;
+               instr = (u8 *)&a->instr_offset + a->instr_offset;
+               replacement = (u8 *)&a->repl_offset + a->repl_offset;
                BUG_ON(a->replacementlen > a->instrlen);
                BUG_ON(a->instrlen > sizeof(insnbuf));
                BUG_ON(a->cpuid >= NCAPINTS*32);
                if (!boot_cpu_has(a->cpuid))
                        continue;
+
+               memcpy(insnbuf, replacement, a->replacementlen);
+
+               /* 0xe8 is a relative jump; fix the offset. */
+               if (*insnbuf == 0xe8 && a->replacementlen == 5)
+                   *(s32 *)(insnbuf + 1) += replacement - instr;
+
+               add_nops(insnbuf + a->replacementlen,
+                        a->instrlen - a->replacementlen);
+
 #ifdef CONFIG_X86_64
                /* vsyscall code is not mapped yet. resolve it manually. */
                if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
                        instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
-                       DPRINTK("%s: vsyscall fixup: %p => %p\n",
-                               __func__, a->instr, instr);
                }
 #endif
-               memcpy(insnbuf, a->replacement, a->replacementlen);
-               if (*insnbuf == 0xe8 && a->replacementlen == 5)
-                   *(s32 *)(insnbuf + 1) += a->replacement - a->instr;
-               add_nops(insnbuf + a->replacementlen,
-                        a->instrlen - a->replacementlen);
                text_poke_early(instr, insnbuf, a->instrlen);
        }
 }
index 6fec2d1cebe111a5d9a5be1528e2370743088dcc..01c805ba5359153f293bb433dc6b4f6f6674e934 100644 (file)
@@ -2,6 +2,7 @@
 
 #include <linux/linkage.h>
 #include <asm/dwarf2.h>
+#include <asm/alternative-asm.h>
 
        ALIGN
 copy_page_c:
@@ -110,10 +111,6 @@ ENDPROC(copy_page)
 2:
        .previous
        .section .altinstructions,"a"
-       .align 8
-       .quad copy_page
-       .quad 1b
-       .word X86_FEATURE_REP_GOOD
-       .byte .Lcopy_page_end - copy_page
-       .byte 2b - 1b
+       altinstruction_entry copy_page, 1b, X86_FEATURE_REP_GOOD,       \
+               .Lcopy_page_end-copy_page, 2b-1b
        .previous
index d0ec9c2936d75fb6e7c908c00f2359fd0c366dd5..ee164610ec46175e31ae937a566a59f16eabdff8 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/linkage.h>
 #include <asm/dwarf2.h>
 #include <asm/cpufeature.h>
+#include <asm/alternative-asm.h>
 
 #undef memmove
 
@@ -214,11 +215,9 @@ ENTRY(memmove)
        .previous
 
        .section .altinstructions,"a"
-       .align 8
-       .quad .Lmemmove_begin_forward
-       .quad .Lmemmove_begin_forward_efs
-       .word X86_FEATURE_ERMS
-       .byte .Lmemmove_end_forward-.Lmemmove_begin_forward
-       .byte .Lmemmove_end_forward_efs-.Lmemmove_begin_forward_efs
+       altinstruction_entry .Lmemmove_begin_forward,           \
+               .Lmemmove_begin_forward_efs,X86_FEATURE_ERMS,   \
+               .Lmemmove_end_forward-.Lmemmove_begin_forward,  \
+               .Lmemmove_end_forward_efs-.Lmemmove_begin_forward_efs
        .previous
 ENDPROC(memmove)