2 * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
4 * Copyright (C) 1996-2000 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #error "Only include this from assembly code"
23 #ifndef __ASM_ASSEMBLER_H
24 #define __ASM_ASSEMBLER_H
26 #include <asm/asm-offsets.h>
27 #include <asm/pgtable-hwdef.h>
28 #include <asm/ptrace.h>
29 #include <asm/thread_info.h>
32 * Stack pushing/popping (register pairs only). Equivalent to store decrement
33 * before, load increment after.
35 .macro push, xreg1, xreg2
36 stp \xreg1, \xreg2, [sp, #-16]!
39 .macro pop, xreg1, xreg2
40 ldp \xreg1, \xreg2, [sp], #16
44 * Enable and disable interrupts.
55 * Enable and disable debug exceptions.
65 .macro disable_step_tsk, flgs, tmp
66 tbz \flgs, #TIF_SINGLESTEP, 9990f
70 isb // Synchronise with enable_dbg
74 .macro enable_step_tsk, flgs, tmp
75 tbz \flgs, #TIF_SINGLESTEP, 9990f
84 * Enable both debug exceptions and interrupts. This is likely to be
85 * faster than two daifclr operations, since writes to this register
86 * are self-synchronising.
88 .macro enable_dbg_and_irq
93 * SMP data memory barrier
99 #define USER(l, x...) \
101 .section __ex_table,"a"; \
109 lr .req x30 // link register
120 * Select code when configured for BE.
122 #ifdef CONFIG_CPU_BIG_ENDIAN
123 #define CPU_BE(code...) code
125 #define CPU_BE(code...)
129 * Select code when configured for LE.
131 #ifdef CONFIG_CPU_BIG_ENDIAN
132 #define CPU_LE(code...)
134 #define CPU_LE(code...) code
138 * Define a macro that constructs a 64-bit value by concatenating two
139 * 32-bit registers. Note that on big endian systems the order of the
140 * registers is swapped.
142 #ifndef CONFIG_CPU_BIG_ENDIAN
143 .macro regs_to_64, rd, lbits, hbits
145 .macro regs_to_64, rd, hbits, lbits
147 orr \rd, \lbits, \hbits, lsl #32
151 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
152 * <symbol> is within the range +/- 4 GB of the PC.
155 * @dst: destination register (64 bit wide)
156 * @sym: name of the symbol
157 * @tmp: optional scratch register to be used if <dst> == sp, which
158 * is not allowed in an adrp instruction
160 .macro adr_l, dst, sym, tmp=
163 add \dst, \dst, :lo12:\sym
166 add \dst, \tmp, :lo12:\sym
171 * @dst: destination register (32 or 64 bit wide)
172 * @sym: name of the symbol
173 * @tmp: optional 64-bit scratch register to be used if <dst> is a
174 * 32-bit wide register, in which case it cannot be used to hold
177 .macro ldr_l, dst, sym, tmp=
180 ldr \dst, [\dst, :lo12:\sym]
183 ldr \dst, [\tmp, :lo12:\sym]
188 * @src: source register (32 or 64 bit wide)
189 * @sym: name of the symbol
190 * @tmp: mandatory 64-bit scratch register to calculate the address
191 * while <src> needs to be preserved.
193 .macro str_l, src, sym, tmp
195 str \src, [\tmp, :lo12:\sym]
199 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
201 .macro vma_vm_mm, rd, rn
202 ldr \rd, [\rn, #VMA_VM_MM]
206 * mmid - get context id from mm pointer (mm->context.id)
209 ldr \rd, [\rn, #MM_CONTEXT_ID]
213 * dcache_line_size - get the minimum D-cache line size from the CTR register.
215 .macro dcache_line_size, reg, tmp
216 mrs \tmp, ctr_el0 // read CTR
217 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
218 mov \reg, #4 // bytes per word
219 lsl \reg, \reg, \tmp // actual cache line size
223 * icache_line_size - get the minimum I-cache line size from the CTR register.
225 .macro icache_line_size, reg, tmp
226 mrs \tmp, ctr_el0 // read CTR
227 and \tmp, \tmp, #0xf // cache line size encoding
228 mov \reg, #4 // bytes per word
229 lsl \reg, \reg, \tmp // actual cache line size
233 * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
235 .macro tcr_set_idmap_t0sz, valreg, tmpreg
236 #ifndef CONFIG_ARM64_VA_BITS_48
237 ldr_l \tmpreg, idmap_t0sz
238 bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
243 * Macro to perform a data cache maintenance for the interval
244 * [kaddr, kaddr + size)
246 * op: operation passed to dc instruction
247 * domain: domain used in dsb instruciton
248 * kaddr: starting virtual address of the region
249 * size: size of the region
250 * Corrupts: kaddr, size, tmp1, tmp2
252 .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
253 dcache_line_size \tmp1, \tmp2
254 add \size, \kaddr, \size
256 bic \kaddr, \kaddr, \tmp2
258 add \kaddr, \kaddr, \tmp1
265 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
267 .macro reset_pmuserenr_el0, tmpreg
268 mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
269 sbfx \tmpreg, \tmpreg, #8, #4
270 cmp \tmpreg, #1 // Skip if no PMU present
272 msr pmuserenr_el0, xzr // Disable PMU access from EL0
277 * Annotate a function as position independent, i.e., safe to be called before
278 * the kernel virtual mapping is activated.
280 #define ENDPIPROC(x) \
282 .type __pi_##x, %function; \
284 .size __pi_##x, . - x; \
287 #endif /* __ASM_ASSEMBLER_H */