arm64: avoid R_AARCH64_ABS64 relocations for Image header fields
[firefly-linux-kernel-4.4.55.git] / arch / arm64 / kernel / head.S
index 23cfc08fc8ba88683600d7618acb9bdb400ae2b9..f076debf392dfe3076446ea58d58117b547047a2 100644 (file)
@@ -83,9 +83,9 @@ efi_head:
        b       stext                           // branch to kernel start, magic
        .long   0                               // reserved
 #endif
-       .quad   _kernel_offset_le               // Image load offset from start of RAM, little-endian
-       .quad   _kernel_size_le                 // Effective size of kernel image, little-endian
-       .quad   _kernel_flags_le                // Informative flags, little-endian
+       le64sym _kernel_offset_le               // Image load offset from start of RAM, little-endian
+       le64sym _kernel_size_le                 // Effective size of kernel image, little-endian
+       le64sym _kernel_flags_le                // Informative flags, little-endian
        .quad   0                               // reserved
        .quad   0                               // reserved
        .quad   0                               // reserved
@@ -389,7 +389,7 @@ __create_page_tables:
         * Map the kernel image (starting with PHYS_OFFSET).
         */
        mov     x0, x26                         // swapper_pg_dir
-       mov     x5, #PAGE_OFFSET
+       ldr     x5, =KIMAGE_VADDR
        create_pgd_entry x0, x5, x3, x6
        ldr     x6, =KERNEL_END                 // __va(KERNEL_END)
        mov     x3, x24                         // phys offset
@@ -415,17 +415,24 @@ ENDPROC(__create_page_tables)
  */
        .set    initial_sp, init_thread_union + THREAD_START_SP
 __mmap_switched:
-       adr_l   x6, __bss_start
-       adr_l   x7, __bss_stop
-
-1:     cmp     x6, x7
-       b.hs    2f
-       str     xzr, [x6], #8                   // Clear BSS
-       b       1b
-2:
+       // Clear BSS
+       adr_l   x0, __bss_start
+       mov     x1, xzr
+       adr_l   x2, __bss_stop
+       sub     x2, x2, x0
+       bl      __pi_memset
+       dsb     ishst                           // Make zero page visible to PTW
+
        adr_l   sp, initial_sp, x4
+       mov     x4, sp
+       and     x4, x4, #~(THREAD_SIZE - 1)
+       msr     sp_el0, x4                      // Save thread_info
        str_l   x21, __fdt_pointer, x5          // Save FDT pointer
-       str_l   x24, memstart_addr, x6          // Save PHYS_OFFSET
+
+       ldr     x4, =KIMAGE_VADDR               // Save the offset between
+       sub     x4, x4, x24                     // the kernel virtual and
+       str_l   x4, kimage_voffset, x5          // physical mappings
+
        mov     x29, #0
 #ifdef CONFIG_KASAN
        bl      kasan_early_init
@@ -512,9 +519,14 @@ CPU_LE(    movk    x0, #0x30d0, lsl #16    )       // Clear EE and E0E on LE systems
 #endif
 
        /* EL2 debug */
+       mrs     x0, id_aa64dfr0_el1             // Check ID_AA64DFR0_EL1 PMUVer
+       sbfx    x0, x0, #8, #4
+       cmp     x0, #1
+       b.lt    4f                              // Skip if no PMU present
        mrs     x0, pmcr_el0                    // Disable debug access traps
        ubfx    x0, x0, #11, #5                 // to EL2 and allow access to
        msr     mdcr_el2, x0                    // all PMU counters from EL1
+4:
 
        /* Stage-2 translation */
        msr     vttbr_el2, xzr
@@ -606,6 +618,8 @@ ENDPROC(secondary_startup)
 ENTRY(__secondary_switched)
        ldr     x0, [x21]                       // get secondary_data.stack
        mov     sp, x0
+       and     x0, x0, #~(THREAD_SIZE - 1)
+       msr     sp_el0, x0                      // save thread_info
        mov     x29, #0
        b       secondary_start_kernel
 ENDPROC(__secondary_switched)