1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #include <asm/processor.h>
5 #include <asm/spinlock_types.h>
7 static inline int arch_spin_is_locked(arch_spinlock_t *x)
9 volatile unsigned int *a = __ldcw_align(x);
13 #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
14 #define arch_spin_unlock_wait(x) \
15 do { cpu_relax(); } while (arch_spin_is_locked(x))
17 static inline void arch_spin_lock_flags(arch_spinlock_t *x,
20 volatile unsigned int *a;
24 while (__ldcw(a) == 0)
26 if (flags & PSW_SM_I) {
35 static inline void arch_spin_unlock(arch_spinlock_t *x)
37 volatile unsigned int *a;
44 static inline int arch_spin_trylock(arch_spinlock_t *x)
46 volatile unsigned int *a;
58 * Read-write spinlocks, allowing multiple readers but only one writer.
59 * Linux rwlocks are unfair to writers; they can be starved for an indefinite
60 * time by readers. With care, they can also be taken in interrupt context.
62 * In the PA-RISC implementation, we have a spinlock and a counter.
63 * Readers use the lock to serialise their access to the counter (which
64 * records how many readers currently hold the lock).
65 * Writers hold the spinlock, preventing any readers or other writers from
66 * grabbing the rwlock.
69 /* Note that we have to ensure interrupts are disabled in case we're
70 * interrupted by some other code that wants to grab the same read lock */
71 static __inline__ void arch_read_lock(arch_rwlock_t *rw)
74 local_irq_save(flags);
75 arch_spin_lock_flags(&rw->lock, flags);
77 arch_spin_unlock(&rw->lock);
78 local_irq_restore(flags);
81 /* Note that we have to ensure interrupts are disabled in case we're
82 * interrupted by some other code that wants to grab the same read lock */
83 static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
86 local_irq_save(flags);
87 arch_spin_lock_flags(&rw->lock, flags);
89 arch_spin_unlock(&rw->lock);
90 local_irq_restore(flags);
93 /* Note that we have to ensure interrupts are disabled in case we're
94 * interrupted by some other code that wants to grab the same read lock */
95 static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
99 local_irq_save(flags);
100 if (arch_spin_trylock(&rw->lock)) {
102 arch_spin_unlock(&rw->lock);
103 local_irq_restore(flags);
107 local_irq_restore(flags);
108 /* If write-locked, we fail to acquire the lock */
112 /* Wait until we have a realistic chance at the lock */
113 while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
119 /* Note that we have to ensure interrupts are disabled in case we're
120 * interrupted by some other code that wants to read_trylock() this lock */
121 static __inline__ void arch_write_lock(arch_rwlock_t *rw)
125 local_irq_save(flags);
126 arch_spin_lock_flags(&rw->lock, flags);
128 if (rw->counter != 0) {
129 arch_spin_unlock(&rw->lock);
130 local_irq_restore(flags);
132 while (rw->counter != 0)
138 rw->counter = -1; /* mark as write-locked */
140 local_irq_restore(flags);
143 static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
146 arch_spin_unlock(&rw->lock);
149 /* Note that we have to ensure interrupts are disabled in case we're
150 * interrupted by some other code that wants to read_trylock() this lock */
151 static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
156 local_irq_save(flags);
157 if (arch_spin_trylock(&rw->lock)) {
158 if (rw->counter == 0) {
162 /* Read-locked. Oh well. */
163 arch_spin_unlock(&rw->lock);
166 local_irq_restore(flags);
172 * read_can_lock - would read_trylock() succeed?
173 * @lock: the rwlock in question.
175 static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
177 return rw->counter >= 0;
181 * write_can_lock - would write_trylock() succeed?
182 * @lock: the rwlock in question.
184 static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
189 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
190 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
192 #define arch_spin_relax(lock) cpu_relax()
193 #define arch_read_relax(lock) cpu_relax()
194 #define arch_write_relax(lock) cpu_relax()
196 #endif /* __ASM_SPINLOCK_H */