2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #ifndef _ASM_ARC_ATOMIC_H
10 #define _ASM_ARC_ATOMIC_H
14 #include <linux/types.h>
15 #include <linux/compiler.h>
16 #include <asm/cmpxchg.h>
17 #include <asm/barrier.h>
20 #define atomic_read(v) ((v)->counter)
22 #ifdef CONFIG_ARC_HAS_LLSC
24 #define atomic_set(v, i) (((v)->counter) = (i))
26 #ifdef CONFIG_ARC_STAR_9000923308
28 #define SCOND_FAIL_RETRY_VAR_DEF \
29 unsigned int delay = 1, tmp; \
31 #define SCOND_FAIL_RETRY_ASM \
33 " ; --- scond fail delay --- \n" \
34 " mov %[tmp], %[delay] \n" /* tmp = delay */ \
35 "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
36 " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
37 " rol %[delay], %[delay] \n" /* delay *= 2 */ \
38 " b 1b \n" /* start over */ \
39 "4: ; --- success --- \n" \
41 #define SCOND_FAIL_RETRY_VARS \
42 ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
44 #else /* !CONFIG_ARC_STAR_9000923308 */
46 #define SCOND_FAIL_RETRY_VAR_DEF
48 #define SCOND_FAIL_RETRY_ASM \
51 #define SCOND_FAIL_RETRY_VARS
55 #define ATOMIC_OP(op, c_op, asm_op) \
56 static inline void atomic_##op(int i, atomic_t *v) \
59 SCOND_FAIL_RETRY_VAR_DEF \
61 __asm__ __volatile__( \
62 "1: llock %[val], [%[ctr]] \n" \
63 " " #asm_op " %[val], %[val], %[i] \n" \
64 " scond %[val], [%[ctr]] \n" \
66 SCOND_FAIL_RETRY_ASM \
68 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
69 SCOND_FAIL_RETRY_VARS \
70 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
75 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
76 static inline int atomic_##op##_return(int i, atomic_t *v) \
79 SCOND_FAIL_RETRY_VAR_DEF \
82 * Explicit full memory barrier needed before/after as \
83 * LLOCK/SCOND thmeselves don't provide any such semantics \
87 __asm__ __volatile__( \
88 "1: llock %[val], [%[ctr]] \n" \
89 " " #asm_op " %[val], %[val], %[i] \n" \
90 " scond %[val], [%[ctr]] \n" \
92 SCOND_FAIL_RETRY_ASM \
95 SCOND_FAIL_RETRY_VARS \
96 : [ctr] "r" (&v->counter), \
105 #else /* !CONFIG_ARC_HAS_LLSC */
109 /* violating atomic_xxx API locking protocol in UP for optimization sake */
110 #define atomic_set(v, i) (((v)->counter) = (i))
114 static inline void atomic_set(atomic_t *v, int i)
117 * Independent of hardware support, all of the atomic_xxx() APIs need
118 * to follow the same locking rules to make sure that a "hardware"
119 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
122 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
123 * requires the locking.
127 atomic_ops_lock(flags);
129 atomic_ops_unlock(flags);
135 * Non hardware assisted Atomic-R-M-W
136 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
139 #define ATOMIC_OP(op, c_op, asm_op) \
140 static inline void atomic_##op(int i, atomic_t *v) \
142 unsigned long flags; \
144 atomic_ops_lock(flags); \
146 atomic_ops_unlock(flags); \
149 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
150 static inline int atomic_##op##_return(int i, atomic_t *v) \
152 unsigned long flags; \
153 unsigned long temp; \
156 * spin lock/unlock provides the needed smp_mb() before/after \
158 atomic_ops_lock(flags); \
162 atomic_ops_unlock(flags); \
167 #endif /* !CONFIG_ARC_HAS_LLSC */
169 #define ATOMIC_OPS(op, c_op, asm_op) \
170 ATOMIC_OP(op, c_op, asm_op) \
171 ATOMIC_OP_RETURN(op, c_op, asm_op)
173 ATOMIC_OPS(add, +=, add)
174 ATOMIC_OPS(sub, -=, sub)
175 ATOMIC_OP(and, &=, and)
177 #define atomic_clear_mask(mask, v) atomic_and(~(mask), (v))
180 #undef ATOMIC_OP_RETURN
182 #undef SCOND_FAIL_RETRY_VAR_DEF
183 #undef SCOND_FAIL_RETRY_ASM
184 #undef SCOND_FAIL_RETRY_VARS
187 * __atomic_add_unless - add unless the number is a given value
188 * @v: pointer of type atomic_t
189 * @a: the amount to add to v...
190 * @u: ...unless v is equal to u.
192 * Atomically adds @a to @v, so long as it was not @u.
193 * Returns the old value of @v
195 #define __atomic_add_unless(v, a, u) \
200 * Explicit full memory barrier needed before/after as \
201 * LLOCK/SCOND thmeselves don't provide any such semantics \
205 c = atomic_read(v); \
206 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
214 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
216 #define atomic_inc(v) atomic_add(1, v)
217 #define atomic_dec(v) atomic_sub(1, v)
219 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
220 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
221 #define atomic_inc_return(v) atomic_add_return(1, (v))
222 #define atomic_dec_return(v) atomic_sub_return(1, (v))
223 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
225 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
227 #define ATOMIC_INIT(i) { (i) }
229 #include <asm-generic/atomic64.h>