2 * Copyright (C) 2012 ARM Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #ifndef __ASM_SPINLOCK_H
17 #define __ASM_SPINLOCK_H
20 #include <asm/spinlock_types.h>
21 #include <asm/processor.h>
24 * Spinlock implementation.
26 * The memory barriers are implicit with the load-acquire and store-release
29 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
32 arch_spinlock_t lockval;
38 " eor %w1, %w0, %w0, ror #16\n"
40 /* Serialise against any concurrent lockers */
41 ARM64_LSE_ATOMIC_INSN(
43 " stxr %w1, %w0, %2\n"
49 " eor %w1, %w1, %w0\n")
51 : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
56 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
58 static inline void arch_spin_lock(arch_spinlock_t *lock)
61 arch_spinlock_t lockval, newval;
64 /* Atomically increment the next ticket. */
65 ARM64_LSE_ATOMIC_INSN(
67 " prfm pstl1strm, %3\n"
69 " add %w1, %w0, %w5\n"
70 " stxr %w2, %w1, %3\n"
74 " ldadda %w2, %w0, %3\n"
80 /* Did we get the lock? */
81 " eor %w1, %w0, %w0, ror #16\n"
84 * No: spin on the owner. Send a local event to avoid missing an
85 * unlock before the exclusive load.
90 " eor %w1, %w2, %w0, lsr #16\n"
92 /* We got the lock. Critical section starts here. */
94 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
95 : "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
99 static inline int arch_spin_trylock(arch_spinlock_t *lock)
102 arch_spinlock_t lockval;
104 asm volatile(ARM64_LSE_ATOMIC_INSN(
106 " prfm pstl1strm, %2\n"
108 " eor %w1, %w0, %w0, ror #16\n"
110 " add %w0, %w0, %3\n"
111 " stxr %w1, %w0, %2\n"
116 " eor %w1, %w0, %w0, ror #16\n"
118 " add %w1, %w0, %3\n"
119 " casa %w0, %w1, %2\n"
120 " and %w1, %w1, #0xffff\n"
121 " eor %w1, %w1, %w0, lsr #16\n"
123 : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
124 : "I" (1 << TICKET_SHIFT)
130 static inline void arch_spin_unlock(arch_spinlock_t *lock)
134 asm volatile(ARM64_LSE_ATOMIC_INSN(
137 " add %w1, %w1, #1\n"
143 : "=Q" (lock->owner), "=&r" (tmp)
148 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
150 return lock.owner == lock.next;
153 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
155 return !arch_spin_value_unlocked(READ_ONCE(*lock));
158 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
160 arch_spinlock_t lockval = READ_ONCE(*lock);
161 return (lockval.next - lockval.owner) > 1;
163 #define arch_spin_is_contended arch_spin_is_contended
166 * Write lock implementation.
168 * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is
171 * The memory barriers are implicit with the load-acquire and store-release
175 static inline void arch_write_lock(arch_rwlock_t *rw)
179 asm volatile(ARM64_LSE_ATOMIC_INSN(
185 " stxr %w0, %w2, %1\n"
190 "2: casa %w0, %w2, %1\n"
197 : "=&r" (tmp), "+Q" (rw->lock)
202 static inline int arch_write_trylock(arch_rwlock_t *rw)
206 asm volatile(ARM64_LSE_ATOMIC_INSN(
210 " stxr %w0, %w2, %1\n"
215 " casa %w0, %w2, %1\n"
218 : "=&r" (tmp), "+Q" (rw->lock)
225 static inline void arch_write_unlock(arch_rwlock_t *rw)
227 asm volatile(ARM64_LSE_ATOMIC_INSN(
229 " swpl wzr, wzr, %0")
230 : "=Q" (rw->lock) :: "memory");
233 /* write_can_lock - would write_trylock() succeed? */
234 #define arch_write_can_lock(x) ((x)->lock == 0)
237 * Read lock implementation.
239 * It exclusively loads the lock value, increments it and stores the new value
240 * back if positive and the CPU still exclusively owns the location. If the
241 * value is negative, the lock is already held.
243 * During unlocking there may be multiple active read locks but no write lock.
245 * The memory barriers are implicit with the load-acquire and store-release
248 * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC
249 * and LSE implementations may exhibit different behaviour (although this
250 * will have no effect on lockdep).
252 static inline void arch_read_lock(arch_rwlock_t *rw)
254 unsigned int tmp, tmp2;
258 ARM64_LSE_ATOMIC_INSN(
262 " add %w0, %w0, #1\n"
263 " tbnz %w0, #31, 1b\n"
264 " stxr %w1, %w0, %2\n"
270 " adds %w1, %w0, #1\n"
271 " tbnz %w1, #31, 1b\n"
272 " casa %w0, %w1, %2\n"
273 " sbc %w0, %w1, %w0\n"
275 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
280 static inline void arch_read_unlock(arch_rwlock_t *rw)
282 unsigned int tmp, tmp2;
284 asm volatile(ARM64_LSE_ATOMIC_INSN(
287 " sub %w0, %w0, #1\n"
288 " stlxr %w1, %w0, %2\n"
295 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
300 static inline int arch_read_trylock(arch_rwlock_t *rw)
302 unsigned int tmp, tmp2;
304 asm volatile(ARM64_LSE_ATOMIC_INSN(
308 " add %w0, %w0, #1\n"
309 " tbnz %w0, #31, 2f\n"
310 " stxr %w1, %w0, %2\n"
315 " adds %w1, %w0, #1\n"
316 " tbnz %w1, #31, 1f\n"
317 " casa %w0, %w1, %2\n"
318 " sbc %w1, %w1, %w0\n"
321 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
328 /* read_can_lock - would read_trylock() succeed? */
329 #define arch_read_can_lock(x) ((x)->lock < 0x80000000)
331 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
332 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
334 #define arch_spin_relax(lock) cpu_relax()
335 #define arch_read_relax(lock) cpu_relax()
336 #define arch_write_relax(lock) cpu_relax()
339 * Accesses appearing in program order before a spin_lock() operation
340 * can be reordered with accesses inside the critical section, by virtue
341 * of arch_spin_lock being constructed using acquire semantics.
343 * In cases where this is problematic (e.g. try_to_wake_up), an
344 * smp_mb__before_spinlock() can restore the required ordering.
346 #define smp_mb__before_spinlock() smp_mb()
348 #endif /* __ASM_SPINLOCK_H */