arm64: don't map TEXT_OFFSET bytes below the kernel if we can avoid it
authorArd Biesheuvel <ard.biesheuvel@linaro.org>
Mon, 18 Apr 2016 15:09:46 +0000 (17:09 +0200)
committerArd Biesheuvel <ard.biesheuvel@linaro.org>
Fri, 29 Jul 2016 16:59:49 +0000 (18:59 +0200)
For historical reasons, the kernel Image must be loaded into physical
memory at a 512 KB offset above a 2 MB aligned base address. The region
between the base address and the start of the kernel Image has no
significance to the kernel itself, but it is currently mapped explicitly
into the early kernel VMA range for all translation granules.

In some cases (i.e., 4 KB granule), this is unavoidable, due to the 2 MB
granularity of the early kernel mappings. However, in other cases, e.g.,
when running with larger page sizes, or in the future, with more granular
KASLR, there is no reason to map it explicitly like we do currently.

So update the logic so that the region is mapped only if that happens as
a side effect of rounding the start address of the kernel to swapper block
size, and leave it unmapped otherwise.

Since the symbol kernel_img_size now simply resolves to the memory
footprint of the kernel Image, we can drop its definition from image.h
and opencode its calculation.

Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
(cherry picked from commit 18b9c0d641938242d8bcdba3c14a8f2beec2a97e)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
arch/arm64/kernel/head.S
arch/arm64/kernel/image.h

index 86ff6950b7bd3c2f212899a4eb807baaab515418..43484433b3ce6b8655addad0e6651c499a637d2b 100644 (file)
@@ -391,12 +391,13 @@ __create_page_tables:
         * Map the kernel image (starting with PHYS_OFFSET).
         */
        mov     x0, x26                         // swapper_pg_dir
-       mov_q   x5, KIMAGE_VADDR
+       mov_q   x5, KIMAGE_VADDR + TEXT_OFFSET  // compile time __va(_text)
        add     x5, x5, x23                     // add KASLR displacement
        create_pgd_entry x0, x5, x3, x6
-       ldr     w6, =kernel_img_size
-       add     x6, x6, x5
-       mov     x3, x24                         // phys offset
+       adrp    x6, _end                        // runtime __pa(_end)
+       adrp    x3, _text                       // runtime __pa(_text)
+       sub     x6, x6, x3                      // _end - _text
+       add     x6, x6, x5                      // runtime __va(_end)
        create_block_map x0, x7, x3, x5, x6
 
        /*
index 5ff892f40a0a8faeaa11f54e0796a97b631c219b..f0be31f1dd4515a2195c153556afae1b2c002d06 100644 (file)
@@ -71,8 +71,6 @@
        DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET);      \
        DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS);
 
-kernel_img_size = _end - (_text - TEXT_OFFSET);
-
 #ifdef CONFIG_EFI
 
 __efistub_stext_offset = stext - _text;