Merge tag 'v4.4.41' into linux-linaro-lsk-v4.4
[firefly-linux-kernel-4.4.55.git] / arch / arm64 / include / asm / assembler.h
index 9ea846ded55cc6aee3532e392445c539245fe29f..290e13428f4a1c6b1b9425938a80dcd69460e592 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Based on arch/arm/include/asm/assembler.h
+ * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
  *
  * Copyright (C) 1996-2000 Russell King
  * Copyright (C) 2012 ARM Ltd.
@@ -23,6 +23,9 @@
 #ifndef __ASM_ASSEMBLER_H
 #define __ASM_ASSEMBLER_H
 
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+#include <asm/pgtable-hwdef.h>
 #include <asm/ptrace.h>
 #include <asm/thread_info.h>
 
@@ -211,6 +214,102 @@ lr        .req    x30             // link register
        add     \reg, \reg, \tmp
        .endm
 
+/*
+ * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
+ */
+       .macro  vma_vm_mm, rd, rn
+       ldr     \rd, [\rn, #VMA_VM_MM]
+       .endm
+
+/*
+ * mmid - get context id from mm pointer (mm->context.id)
+ */
+       .macro  mmid, rd, rn
+       ldr     \rd, [\rn, #MM_CONTEXT_ID]
+       .endm
+
+/*
+ * dcache_line_size - get the minimum D-cache line size from the CTR register.
+ */
+       .macro  dcache_line_size, reg, tmp
+       mrs     \tmp, ctr_el0                   // read CTR
+       ubfm    \tmp, \tmp, #16, #19            // cache line size encoding
+       mov     \reg, #4                        // bytes per word
+       lsl     \reg, \reg, \tmp                // actual cache line size
+       .endm
+
+/*
+ * icache_line_size - get the minimum I-cache line size from the CTR register.
+ */
+       .macro  icache_line_size, reg, tmp
+       mrs     \tmp, ctr_el0                   // read CTR
+       and     \tmp, \tmp, #0xf                // cache line size encoding
+       mov     \reg, #4                        // bytes per word
+       lsl     \reg, \reg, \tmp                // actual cache line size
+       .endm
+
+/*
+ * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
+ */
+       .macro  tcr_set_idmap_t0sz, valreg, tmpreg
+#ifndef CONFIG_ARM64_VA_BITS_48
+       ldr_l   \tmpreg, idmap_t0sz
+       bfi     \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
+#endif
+       .endm
+
+/*
+ * Macro to perform a data cache maintenance for the interval
+ * [kaddr, kaddr + size)
+ *
+ *     op:             operation passed to dc instruction
+ *     domain:         domain used in dsb instruciton
+ *     kaddr:          starting virtual address of the region
+ *     size:           size of the region
+ *     Corrupts:       kaddr, size, tmp1, tmp2
+ */
+       .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
+       dcache_line_size \tmp1, \tmp2
+       add     \size, \kaddr, \size
+       sub     \tmp2, \tmp1, #1
+       bic     \kaddr, \kaddr, \tmp2
+9998:  dc      \op, \kaddr
+       add     \kaddr, \kaddr, \tmp1
+       cmp     \kaddr, \size
+       b.lo    9998b
+       dsb     \domain
+       .endm
+
+/*
+ * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
+ */
+       .macro  reset_pmuserenr_el0, tmpreg
+       mrs     \tmpreg, id_aa64dfr0_el1        // Check ID_AA64DFR0_EL1 PMUVer
+       sbfx    \tmpreg, \tmpreg, #8, #4
+       cmp     \tmpreg, #1                     // Skip if no PMU present
+       b.lt    9000f
+       msr     pmuserenr_el0, xzr              // Disable PMU access from EL0
+9000:
+       .endm
+
+/*
+ * copy_page - copy src to dest using temp registers t1-t8
+ */
+       .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
+9998:  ldp     \t1, \t2, [\src]
+       ldp     \t3, \t4, [\src, #16]
+       ldp     \t5, \t6, [\src, #32]
+       ldp     \t7, \t8, [\src, #48]
+       add     \src, \src, #64
+       stnp    \t1, \t2, [\dest]
+       stnp    \t3, \t4, [\dest, #16]
+       stnp    \t5, \t6, [\dest, #32]
+       stnp    \t7, \t8, [\dest, #48]
+       add     \dest, \dest, #64
+       tst     \src, #(PAGE_SIZE - 1)
+       b.ne    9998b
+       .endm
+
 /*
  * Annotate a function as position independent, i.e., safe to be called before
  * the kernel virtual mapping is activated.