1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #if __LINUX_ARM_ARCH__ < 6
5 #error SMP not supported on pre-ARMv6 CPUs
8 #include <asm/processor.h>
11 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
12 * extensions, so when running on UP, we have to patch these instructions away.
14 #define ALT_SMP(smp, up) \
16 " .pushsection \".alt.smp.init\", \"a\"\n" \
21 #ifdef CONFIG_THUMB2_KERNEL
22 #define SEV ALT_SMP("sev.w", "nop.w")
24 * For Thumb-2, special care is needed to ensure that the conditional WFE
25 * instruction really does assemble to exactly 4 bytes (as required by
26 * the SMP_ON_UP fixup code). By itself "wfene" might cause the
27 * assembler to insert a extra (16-bit) IT instruction, depending on the
28 * presence or absence of neighbouring conditional instructions.
30 * To avoid this unpredictableness, an approprite IT is inserted explicitly:
31 * the assembler won't change IT instructions which are explicitly present
34 #define WFE(cond) ALT_SMP( \
41 #define SEV ALT_SMP("sev", "nop")
42 #define WFE(cond) ALT_SMP("wfe" cond, "nop")
45 static inline void dsb_sev(void)
47 #if __LINUX_ARM_ARCH__ >= 7
48 __asm__ __volatile__ (
53 __asm__ __volatile__ (
54 "mcr p15, 0, %0, c7, c10, 4\n"
62 * ARMv6 ticket-based spin-locking.
64 * A memory barrier is required after we get a lock, and before we
65 * release it, because V6 CPUs are assumed to have weakly ordered
69 #define arch_spin_unlock_wait(lock) \
70 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
72 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
74 static inline void arch_spin_lock(arch_spinlock_t *lock)
78 arch_spinlock_t lockval;
83 " strex %2, %1, [%3]\n"
86 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
87 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
90 while (lockval.tickets.next != lockval.tickets.owner) {
92 lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
98 static inline int arch_spin_trylock(arch_spinlock_t *lock)
103 __asm__ __volatile__(
105 " subs %1, %0, %0, ror #16\n"
106 " addeq %0, %0, %3\n"
107 " strexeq %1, %0, [%2]"
108 : "=&r" (slock), "=&r" (tmp)
109 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
120 static inline void arch_spin_unlock(arch_spinlock_t *lock)
123 lock->tickets.owner++;
127 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
129 struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
130 return tickets.owner != tickets.next;
133 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
135 struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
136 return (tickets.next - tickets.owner) > 1;
138 #define arch_spin_is_contended arch_spin_is_contended
144 * Write locks are easy - we just set bit 31. When unlocking, we can
145 * just write zero since the lock is exclusively held.
148 static inline void arch_write_lock(arch_rwlock_t *rw)
152 __asm__ __volatile__(
153 "1: ldrex %0, [%1]\n"
156 " strexeq %0, %2, [%1]\n"
160 : "r" (&rw->lock), "r" (0x80000000)
166 static inline int arch_write_trylock(arch_rwlock_t *rw)
170 __asm__ __volatile__(
173 " strexeq %0, %2, [%1]"
175 : "r" (&rw->lock), "r" (0x80000000)
186 static inline void arch_write_unlock(arch_rwlock_t *rw)
190 __asm__ __volatile__(
193 : "r" (&rw->lock), "r" (0)
199 /* write_can_lock - would write_trylock() succeed? */
200 #define arch_write_can_lock(x) ((x)->lock == 0)
203 * Read locks are a bit more hairy:
204 * - Exclusively load the lock value.
206 * - Store new lock value if positive, and we still own this location.
207 * If the value is negative, we've already failed.
208 * - If we failed to store the value, we want a negative result.
209 * - If we failed, try again.
210 * Unlocking is similarly hairy. We may have multiple read locks
211 * currently active. However, we know we won't have any write
214 static inline void arch_read_lock(arch_rwlock_t *rw)
216 unsigned long tmp, tmp2;
218 __asm__ __volatile__(
219 "1: ldrex %0, [%2]\n"
221 " strexpl %1, %0, [%2]\n"
223 " rsbpls %0, %1, #0\n"
225 : "=&r" (tmp), "=&r" (tmp2)
232 static inline void arch_read_unlock(arch_rwlock_t *rw)
234 unsigned long tmp, tmp2;
238 __asm__ __volatile__(
239 "1: ldrex %0, [%2]\n"
241 " strex %1, %0, [%2]\n"
244 : "=&r" (tmp), "=&r" (tmp2)
252 static inline int arch_read_trylock(arch_rwlock_t *rw)
254 unsigned long tmp, tmp2 = 1;
256 __asm__ __volatile__(
259 " strexpl %1, %0, [%2]\n"
260 : "=&r" (tmp), "+r" (tmp2)
268 /* read_can_lock - would read_trylock() succeed? */
269 #define arch_read_can_lock(x) ((x)->lock < 0x80000000)
271 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
272 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
274 #define arch_spin_relax(lock) cpu_relax()
275 #define arch_read_relax(lock) cpu_relax()
276 #define arch_write_relax(lock) cpu_relax()
278 #endif /* __ASM_SPINLOCK_H */