Merge branch 'v3.10/topic/misc' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / arch / arm / include / asm / spinlock.h
1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
3
4 #if __LINUX_ARM_ARCH__ < 6
5 #error SMP not supported on pre-ARMv6 CPUs
6 #endif
7
8 #include <asm/processor.h>
9
10 /*
11  * sev and wfe are ARMv6K extensions.  Uniprocessor ARMv6 may not have the K
12  * extensions, so when running on UP, we have to patch these instructions away.
13  */
14 #define ALT_SMP(smp, up)                                        \
15         "9998:  " smp "\n"                                      \
16         "       .pushsection \".alt.smp.init\", \"a\"\n"        \
17         "       .long   9998b\n"                                \
18         "       " up "\n"                                       \
19         "       .popsection\n"
20
21 #ifdef CONFIG_THUMB2_KERNEL
22 #define SEV             ALT_SMP("sev.w", "nop.w")
23 /*
24  * For Thumb-2, special care is needed to ensure that the conditional WFE
25  * instruction really does assemble to exactly 4 bytes (as required by
26  * the SMP_ON_UP fixup code).   By itself "wfene" might cause the
27  * assembler to insert a extra (16-bit) IT instruction, depending on the
28  * presence or absence of neighbouring conditional instructions.
29  *
30  * To avoid this unpredictableness, an approprite IT is inserted explicitly:
31  * the assembler won't change IT instructions which are explicitly present
32  * in the input.
33  */
34 #define WFE(cond)       ALT_SMP(                \
35         "it " cond "\n\t"                       \
36         "wfe" cond ".n",                        \
37                                                 \
38         "nop.w"                                 \
39 )
40 #else
41 #define SEV             ALT_SMP("sev", "nop")
42 #define WFE(cond)       ALT_SMP("wfe" cond, "nop")
43 #endif
44
45 static inline void dsb_sev(void)
46 {
47 #if __LINUX_ARM_ARCH__ >= 7
48         __asm__ __volatile__ (
49                 "dsb\n"
50                 SEV
51         );
52 #else
53         __asm__ __volatile__ (
54                 "mcr p15, 0, %0, c7, c10, 4\n"
55                 SEV
56                 : : "r" (0)
57         );
58 #endif
59 }
60
61 /*
62  * ARMv6 ticket-based spin-locking.
63  *
64  * A memory barrier is required after we get a lock, and before we
65  * release it, because V6 CPUs are assumed to have weakly ordered
66  * memory.
67  */
68
69 #define arch_spin_unlock_wait(lock) \
70         do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
71
72 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
73
74 static inline void arch_spin_lock(arch_spinlock_t *lock)
75 {
76         unsigned long tmp;
77         u32 newval;
78         arch_spinlock_t lockval;
79
80         __asm__ __volatile__(
81 "1:     ldrex   %0, [%3]\n"
82 "       add     %1, %0, %4\n"
83 "       strex   %2, %1, [%3]\n"
84 "       teq     %2, #0\n"
85 "       bne     1b"
86         : "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
87         : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
88         : "cc");
89
90         while (lockval.tickets.next != lockval.tickets.owner) {
91                 wfe();
92                 lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
93         }
94
95         smp_mb();
96 }
97
98 static inline int arch_spin_trylock(arch_spinlock_t *lock)
99 {
100         unsigned long contended, res;
101         u32 slock;
102
103         do {
104                 __asm__ __volatile__(
105                 "       ldrex   %0, [%3]\n"
106                 "       mov     %2, #0\n"
107                 "       subs    %1, %0, %0, ror #16\n"
108                 "       addeq   %0, %0, %4\n"
109                 "       strexeq %2, %0, [%3]"
110                 : "=&r" (slock), "=&r" (contended), "=&r" (res)
111                 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
112                 : "cc");
113         } while (res);
114
115         if (!contended) {
116                 smp_mb();
117                 return 1;
118         } else {
119                 return 0;
120         }
121 }
122
123 static inline void arch_spin_unlock(arch_spinlock_t *lock)
124 {
125         smp_mb();
126         lock->tickets.owner++;
127         dsb_sev();
128 }
129
130 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
131 {
132         struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
133         return tickets.owner != tickets.next;
134 }
135
136 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
137 {
138         struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
139         return (tickets.next - tickets.owner) > 1;
140 }
141 #define arch_spin_is_contended  arch_spin_is_contended
142
143 /*
144  * RWLOCKS
145  *
146  *
147  * Write locks are easy - we just set bit 31.  When unlocking, we can
148  * just write zero since the lock is exclusively held.
149  */
150
151 static inline void arch_write_lock(arch_rwlock_t *rw)
152 {
153         unsigned long tmp;
154
155         __asm__ __volatile__(
156 "1:     ldrex   %0, [%1]\n"
157 "       teq     %0, #0\n"
158         WFE("ne")
159 "       strexeq %0, %2, [%1]\n"
160 "       teq     %0, #0\n"
161 "       bne     1b"
162         : "=&r" (tmp)
163         : "r" (&rw->lock), "r" (0x80000000)
164         : "cc");
165
166         smp_mb();
167 }
168
169 static inline int arch_write_trylock(arch_rwlock_t *rw)
170 {
171         unsigned long contended, res;
172
173         do {
174                 __asm__ __volatile__(
175                 "       ldrex   %0, [%2]\n"
176                 "       mov     %1, #0\n"
177                 "       teq     %0, #0\n"
178                 "       strexeq %1, %3, [%2]"
179                 : "=&r" (contended), "=&r" (res)
180                 : "r" (&rw->lock), "r" (0x80000000)
181                 : "cc");
182         } while (res);
183
184         if (!contended) {
185                 smp_mb();
186                 return 1;
187         } else {
188                 return 0;
189         }
190 }
191
192 static inline void arch_write_unlock(arch_rwlock_t *rw)
193 {
194         smp_mb();
195
196         __asm__ __volatile__(
197         "str    %1, [%0]\n"
198         :
199         : "r" (&rw->lock), "r" (0)
200         : "cc");
201
202         dsb_sev();
203 }
204
205 /* write_can_lock - would write_trylock() succeed? */
206 #define arch_write_can_lock(x)          ((x)->lock == 0)
207
208 /*
209  * Read locks are a bit more hairy:
210  *  - Exclusively load the lock value.
211  *  - Increment it.
212  *  - Store new lock value if positive, and we still own this location.
213  *    If the value is negative, we've already failed.
214  *  - If we failed to store the value, we want a negative result.
215  *  - If we failed, try again.
216  * Unlocking is similarly hairy.  We may have multiple read locks
217  * currently active.  However, we know we won't have any write
218  * locks.
219  */
220 static inline void arch_read_lock(arch_rwlock_t *rw)
221 {
222         unsigned long tmp, tmp2;
223
224         __asm__ __volatile__(
225 "1:     ldrex   %0, [%2]\n"
226 "       adds    %0, %0, #1\n"
227 "       strexpl %1, %0, [%2]\n"
228         WFE("mi")
229 "       rsbpls  %0, %1, #0\n"
230 "       bmi     1b"
231         : "=&r" (tmp), "=&r" (tmp2)
232         : "r" (&rw->lock)
233         : "cc");
234
235         smp_mb();
236 }
237
238 static inline void arch_read_unlock(arch_rwlock_t *rw)
239 {
240         unsigned long tmp, tmp2;
241
242         smp_mb();
243
244         __asm__ __volatile__(
245 "1:     ldrex   %0, [%2]\n"
246 "       sub     %0, %0, #1\n"
247 "       strex   %1, %0, [%2]\n"
248 "       teq     %1, #0\n"
249 "       bne     1b"
250         : "=&r" (tmp), "=&r" (tmp2)
251         : "r" (&rw->lock)
252         : "cc");
253
254         if (tmp == 0)
255                 dsb_sev();
256 }
257
258 static inline int arch_read_trylock(arch_rwlock_t *rw)
259 {
260         unsigned long contended, res;
261
262         do {
263                 __asm__ __volatile__(
264                 "       ldrex   %0, [%2]\n"
265                 "       mov     %1, #0\n"
266                 "       adds    %0, %0, #1\n"
267                 "       strexpl %1, %0, [%2]"
268                 : "=&r" (contended), "=&r" (res)
269                 : "r" (&rw->lock)
270                 : "cc");
271         } while (res);
272
273         /* If the lock is negative, then it is already held for write. */
274         if (contended < 0x80000000) {
275                 smp_mb();
276                 return 1;
277         } else {
278                 return 0;
279         }
280 }
281
282 /* read_can_lock - would read_trylock() succeed? */
283 #define arch_read_can_lock(x)           ((x)->lock < 0x80000000)
284
285 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
286 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
287
288 #define arch_spin_relax(lock)   cpu_relax()
289 #define arch_read_relax(lock)   cpu_relax()
290 #define arch_write_relax(lock)  cpu_relax()
291
292 #endif /* __ASM_SPINLOCK_H */