2 * Based on arch/arm/include/asm/atomic.h
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #ifndef __ASM_ATOMIC_LSE_H
22 #define __ASM_ATOMIC_LSE_H
24 #ifndef __ARM64_IN_ATOMIC_IMPL
25 #error "please don't include this file directly"
28 #define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
30 static inline void atomic_andnot(int i, atomic_t *v)
32 register int w0 asm ("w0") = i;
33 register atomic_t *x1 asm ("x1") = v;
35 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(andnot),
36 " stclr %w[i], %[v]\n")
37 : [i] "+r" (w0), [v] "+Q" (v->counter)
42 static inline void atomic_or(int i, atomic_t *v)
44 register int w0 asm ("w0") = i;
45 register atomic_t *x1 asm ("x1") = v;
47 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(or),
48 " stset %w[i], %[v]\n")
49 : [i] "+r" (w0), [v] "+Q" (v->counter)
54 static inline void atomic_xor(int i, atomic_t *v)
56 register int w0 asm ("w0") = i;
57 register atomic_t *x1 asm ("x1") = v;
59 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(xor),
60 " steor %w[i], %[v]\n")
61 : [i] "+r" (w0), [v] "+Q" (v->counter)
66 static inline void atomic_add(int i, atomic_t *v)
68 register int w0 asm ("w0") = i;
69 register atomic_t *x1 asm ("x1") = v;
71 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(add),
72 " stadd %w[i], %[v]\n")
73 : [i] "+r" (w0), [v] "+Q" (v->counter)
78 static inline int atomic_add_return(int i, atomic_t *v)
80 register int w0 asm ("w0") = i;
81 register atomic_t *x1 asm ("x1") = v;
83 asm volatile(ARM64_LSE_ATOMIC_INSN(
86 __LL_SC_ATOMIC(add_return),
88 " ldaddal %w[i], w30, %[v]\n"
89 " add %w[i], %w[i], w30")
90 : [i] "+r" (w0), [v] "+Q" (v->counter)
97 static inline void atomic_and(int i, atomic_t *v)
99 register int w0 asm ("w0") = i;
100 register atomic_t *x1 asm ("x1") = v;
102 asm volatile(ARM64_LSE_ATOMIC_INSN(
107 " mvn %w[i], %w[i]\n"
108 " stclr %w[i], %[v]")
109 : [i] "+r" (w0), [v] "+Q" (v->counter)
114 static inline void atomic_sub(int i, atomic_t *v)
116 register int w0 asm ("w0") = i;
117 register atomic_t *x1 asm ("x1") = v;
119 asm volatile(ARM64_LSE_ATOMIC_INSN(
124 " neg %w[i], %w[i]\n"
125 " stadd %w[i], %[v]")
126 : [i] "+r" (w0), [v] "+Q" (v->counter)
131 static inline int atomic_sub_return(int i, atomic_t *v)
133 register int w0 asm ("w0") = i;
134 register atomic_t *x1 asm ("x1") = v;
136 asm volatile(ARM64_LSE_ATOMIC_INSN(
139 __LL_SC_ATOMIC(sub_return)
142 " neg %w[i], %w[i]\n"
143 " ldaddal %w[i], w30, %[v]\n"
144 " add %w[i], %w[i], w30")
145 : [i] "+r" (w0), [v] "+Q" (v->counter)
152 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
154 register unsigned long x0 asm ("x0") = (unsigned long)ptr;
155 register int w1 asm ("w1") = old;
156 register int w2 asm ("w2") = new;
158 asm volatile(ARM64_LSE_ATOMIC_INSN(
161 __LL_SC_ATOMIC(cmpxchg)
164 " mov w30, %w[old]\n"
165 " casal w30, %w[new], %[v]\n"
167 : [ret] "+r" (x0), [v] "+Q" (ptr->counter)
168 : [old] "r" (w1), [new] "r" (w2)
169 : "x30", "cc", "memory");
174 #undef __LL_SC_ATOMIC
176 #define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
178 static inline void atomic64_andnot(long i, atomic64_t *v)
180 register long x0 asm ("x0") = i;
181 register atomic64_t *x1 asm ("x1") = v;
183 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(andnot),
184 " stclr %[i], %[v]\n")
185 : [i] "+r" (x0), [v] "+Q" (v->counter)
190 static inline void atomic64_or(long i, atomic64_t *v)
192 register long x0 asm ("x0") = i;
193 register atomic64_t *x1 asm ("x1") = v;
195 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(or),
196 " stset %[i], %[v]\n")
197 : [i] "+r" (x0), [v] "+Q" (v->counter)
202 static inline void atomic64_xor(long i, atomic64_t *v)
204 register long x0 asm ("x0") = i;
205 register atomic64_t *x1 asm ("x1") = v;
207 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(xor),
208 " steor %[i], %[v]\n")
209 : [i] "+r" (x0), [v] "+Q" (v->counter)
214 static inline void atomic64_add(long i, atomic64_t *v)
216 register long x0 asm ("x0") = i;
217 register atomic64_t *x1 asm ("x1") = v;
219 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(add),
220 " stadd %[i], %[v]\n")
221 : [i] "+r" (x0), [v] "+Q" (v->counter)
226 static inline long atomic64_add_return(long i, atomic64_t *v)
228 register long x0 asm ("x0") = i;
229 register atomic64_t *x1 asm ("x1") = v;
231 asm volatile(ARM64_LSE_ATOMIC_INSN(
234 __LL_SC_ATOMIC64(add_return),
236 " ldaddal %[i], x30, %[v]\n"
237 " add %[i], %[i], x30")
238 : [i] "+r" (x0), [v] "+Q" (v->counter)
245 static inline void atomic64_and(long i, atomic64_t *v)
247 register long x0 asm ("x0") = i;
248 register atomic64_t *x1 asm ("x1") = v;
250 asm volatile(ARM64_LSE_ATOMIC_INSN(
253 __LL_SC_ATOMIC64(and),
257 : [i] "+r" (x0), [v] "+Q" (v->counter)
262 static inline void atomic64_sub(long i, atomic64_t *v)
264 register long x0 asm ("x0") = i;
265 register atomic64_t *x1 asm ("x1") = v;
267 asm volatile(ARM64_LSE_ATOMIC_INSN(
270 __LL_SC_ATOMIC64(sub),
274 : [i] "+r" (x0), [v] "+Q" (v->counter)
279 static inline long atomic64_sub_return(long i, atomic64_t *v)
281 register long x0 asm ("x0") = i;
282 register atomic64_t *x1 asm ("x1") = v;
284 asm volatile(ARM64_LSE_ATOMIC_INSN(
287 __LL_SC_ATOMIC64(sub_return)
291 " ldaddal %[i], x30, %[v]\n"
292 " add %[i], %[i], x30")
293 : [i] "+r" (x0), [v] "+Q" (v->counter)
299 static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
301 register unsigned long x0 asm ("x0") = (unsigned long)ptr;
302 register long x1 asm ("x1") = old;
303 register long x2 asm ("x2") = new;
305 asm volatile(ARM64_LSE_ATOMIC_INSN(
308 __LL_SC_ATOMIC64(cmpxchg)
312 " casal x30, %[new], %[v]\n"
314 : [ret] "+r" (x0), [v] "+Q" (ptr->counter)
315 : [old] "r" (x1), [new] "r" (x2)
316 : "x30", "cc", "memory");
321 static inline long atomic64_dec_if_positive(atomic64_t *v)
323 register long x0 asm ("x0") = (long)v;
325 asm volatile(ARM64_LSE_ATOMIC_INSN(
328 __LL_SC_ATOMIC64(dec_if_positive)
336 " subs %[ret], x30, #1\n"
338 " casal x30, %[ret], %[v]\n"
339 " sub x30, x30, #1\n"
340 " sub x30, x30, %[ret]\n"
343 : [ret] "+&r" (x0), [v] "+Q" (v->counter)
345 : "x30", "cc", "memory");
350 #undef __LL_SC_ATOMIC64
352 #define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op)
354 #define __CMPXCHG_CASE(w, sz, name, mb, cl...) \
355 static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
359 register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
360 register unsigned long x1 asm ("x1") = old; \
361 register unsigned long x2 asm ("x2") = new; \
363 asm volatile(ARM64_LSE_ATOMIC_INSN( \
366 __LL_SC_CMPXCHG(name) \
369 " mov " #w "30, %" #w "[old]\n" \
370 " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \
371 " mov %" #w "[ret], " #w "30") \
372 : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
373 : [old] "r" (x1), [new] "r" (x2) \
379 __CMPXCHG_CASE(w, b, 1, )
380 __CMPXCHG_CASE(w, h, 2, )
381 __CMPXCHG_CASE(w, , 4, )
382 __CMPXCHG_CASE(x, , 8, )
383 __CMPXCHG_CASE(w, b, mb_1, al, "memory")
384 __CMPXCHG_CASE(w, h, mb_2, al, "memory")
385 __CMPXCHG_CASE(w, , mb_4, al, "memory")
386 __CMPXCHG_CASE(x, , mb_8, al, "memory")
388 #undef __LL_SC_CMPXCHG
389 #undef __CMPXCHG_CASE
391 #endif /* __ASM_ATOMIC_LSE_H */