3 #ifndef CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC32_H
4 #define CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC32_H
7 #include <cds/details/is_aligned.h>
10 namespace cds { namespace cxx11_atomic {
11 namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace gcc { CDS_CXX11_INLINE_NAMESPACE namespace x86 {
13 static inline void fence_before( memory_order order ) CDS_NOEXCEPT
16 case memory_order_relaxed:
17 case memory_order_acquire:
18 case memory_order_consume:
20 case memory_order_release:
21 case memory_order_acq_rel:
22 CDS_COMPILER_RW_BARRIER;
24 case memory_order_seq_cst:
25 CDS_COMPILER_RW_BARRIER;
30 static inline void fence_after( memory_order order ) CDS_NOEXCEPT
33 case memory_order_acquire:
34 case memory_order_acq_rel:
35 CDS_COMPILER_RW_BARRIER;
37 case memory_order_relaxed:
38 case memory_order_consume:
39 case memory_order_release:
41 case memory_order_seq_cst:
42 CDS_COMPILER_RW_BARRIER;
48 static inline void fence_after_load(memory_order order) CDS_NOEXCEPT
51 case memory_order_relaxed:
52 case memory_order_release:
54 case memory_order_acquire:
55 case memory_order_acq_rel:
56 CDS_COMPILER_RW_BARRIER;
58 case memory_order_consume:
60 case memory_order_seq_cst:
61 __asm__ __volatile__ ( "mfence" ::: "memory" );
67 //-----------------------------------------------------------------------------
69 //-----------------------------------------------------------------------------
70 static inline void thread_fence(memory_order order) CDS_NOEXCEPT
74 case memory_order_relaxed:
75 case memory_order_consume:
77 case memory_order_release:
78 case memory_order_acquire:
79 case memory_order_acq_rel:
80 CDS_COMPILER_RW_BARRIER;
82 case memory_order_seq_cst:
83 __asm__ __volatile__ ( "mfence" ::: "memory" );
89 static inline void signal_fence(memory_order order) CDS_NOEXCEPT
91 // C++11: 29.8.8: only compiler optimization, no hardware instructions
94 case memory_order_relaxed:
96 case memory_order_consume:
97 case memory_order_release:
98 case memory_order_acquire:
99 case memory_order_acq_rel:
100 case memory_order_seq_cst:
101 CDS_COMPILER_RW_BARRIER;
107 //-----------------------------------------------------------------------------
109 //-----------------------------------------------------------------------------
111 template <typename T>
112 static inline bool cas8_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
114 static_assert( sizeof(T) == 1, "Illegal size of operand" );
117 fence_before(mo_success);
118 __asm__ __volatile__ (
119 "lock ; cmpxchgb %[desired], %[pDest]"
120 : [prev] "+a" (prev), [pDest] "+m" (*pDest)
121 : [desired] "q" (desired)
123 bool success = (prev == expected);
126 fence_after(mo_success);
128 fence_after(mo_fail);
132 template <typename T>
133 static inline bool cas8_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
135 return cas8_strong( pDest, expected, desired, mo_success, mo_fail );
138 template <typename T>
139 static inline T exchange8( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT
141 static_assert( sizeof(T) == 1, "Illegal size of operand" );
144 __asm__ __volatile__ (
145 "xchgb %[v], %[pDest]"
146 : [v] "+q" (v), [pDest] "+m" (*pDest)
152 template <typename T>
153 static inline void store8( T volatile * pDest, T src, memory_order order ) CDS_NOEXCEPT
155 static_assert( sizeof(T) == 1, "Illegal size of operand" );
156 assert( order == memory_order_relaxed
157 || order == memory_order_release
158 || order == memory_order_seq_cst
160 assert( pDest != NULL );
162 if ( order != memory_order_seq_cst ) {
163 fence_before( order );
167 exchange8( pDest, src, order );
171 template <typename T>
172 static inline T load8( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT
174 static_assert( sizeof(T) == 1, "Illegal size of operand" );
175 assert( order == memory_order_relaxed
176 || order == memory_order_consume
177 || order == memory_order_acquire
178 || order == memory_order_seq_cst
180 assert( pSrc != NULL );
183 fence_after_load( order );
187 # define CDS_ATOMIC_fetch8_add_defined
188 template <typename T>
189 static inline T fetch8_add( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT
192 __asm__ __volatile__ (
193 "lock ; xaddb %[val], %[pDest]"
194 : [val] "+q" (val), [pDest] "+m" (*pDest)
200 # define CDS_ATOMIC_fetch8_sub_defined
201 template <typename T>
202 static inline T fetch8_sub( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT
205 __asm__ __volatile__ (
207 "lock ; xaddb %[val], %[pDest]"
208 : [val] "+q" (val), [pDest] "+m" (*pDest)
214 //-----------------------------------------------------------------------------
215 // atomic flag primitives
216 //-----------------------------------------------------------------------------
218 typedef bool atomic_flag_type;
219 static inline bool atomic_flag_tas( atomic_flag_type volatile * pFlag, memory_order order ) CDS_NOEXCEPT
221 return exchange8( pFlag, true, order );
224 static inline void atomic_flag_clear( atomic_flag_type volatile * pFlag, memory_order order ) CDS_NOEXCEPT
226 store8( pFlag, false, order );
229 //-----------------------------------------------------------------------------
231 //-----------------------------------------------------------------------------
233 template <typename T>
234 static inline T exchange16( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT
236 static_assert( sizeof(T) == 2, "Illegal size of operand" );
237 assert( cds::details::is_aligned( pDest, 2 ));
240 __asm__ __volatile__ (
241 "xchgw %[v], %[pDest]"
242 : [v] "+q" (v), [pDest] "+m" (*pDest)
248 template <typename T>
249 static inline void store16( T volatile * pDest, T src, memory_order order ) CDS_NOEXCEPT
251 static_assert( sizeof(T) == 2, "Illegal size of operand" );
252 assert( order == memory_order_relaxed
253 || order == memory_order_release
254 || order == memory_order_seq_cst
256 assert( pDest != NULL );
257 assert( cds::details::is_aligned( pDest, 2 ));
259 if ( order != memory_order_seq_cst ) {
260 fence_before( order );
264 exchange16( pDest, src, order );
268 template <typename T>
269 static inline T load16( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT
271 static_assert( sizeof(T) == 2, "Illegal size of operand" );
272 assert( order == memory_order_relaxed
273 || order == memory_order_consume
274 || order == memory_order_acquire
275 || order == memory_order_seq_cst
277 assert( pSrc != NULL );
278 assert( cds::details::is_aligned( pSrc, 2 ));
281 fence_after_load( order );
285 template <typename T>
286 static inline bool cas16_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
288 static_assert( sizeof(T) == 2, "Illegal size of operand" );
289 assert( cds::details::is_aligned( pDest, 2 ));
292 fence_before(mo_success);
293 __asm__ __volatile__ (
294 "lock ; cmpxchgw %[desired], %[pDest]"
295 : [prev] "+a" (prev), [pDest] "+m" (*pDest)
296 : [desired] "q" (desired)
298 bool success = prev == expected;
300 fence_after(mo_success);
302 fence_after(mo_fail);
309 template <typename T>
310 static inline bool cas16_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
312 return cas16_strong( pDest, expected, desired, mo_success, mo_fail );
315 # define CDS_ATOMIC_fetch16_add_defined
316 template <typename T>
317 static inline T fetch16_add( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT
319 static_assert( sizeof(T) == 2, "Illegal size of operand" );
320 assert( cds::details::is_aligned( pDest, 2 ));
323 __asm__ __volatile__ (
324 "lock ; xaddw %[val], %[pDest]"
325 : [val] "+q" (val), [pDest] "+m" (*pDest)
331 # define CDS_ATOMIC_fetch16_sub_defined
332 template <typename T>
333 static inline T fetch16_sub( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT
335 static_assert( sizeof(T) == 2, "Illegal size of operand" );
336 assert( cds::details::is_aligned( pDest, 2 ));
339 __asm__ __volatile__ (
341 "lock ; xaddw %[val], %[pDest]"
342 : [val] "+q" (val), [pDest] "+m" (*pDest)
348 //-----------------------------------------------------------------------------
350 //-----------------------------------------------------------------------------
352 template <typename T>
353 static inline T exchange32( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT
355 static_assert( sizeof(T) == 4, "Illegal size of operand" );
356 assert( cds::details::is_aligned( pDest, 4 ));
359 __asm__ __volatile__ (
360 "xchgl %[v], %[pDest]"
361 : [v] "+r" (v), [pDest] "+m" (*pDest)
367 template <typename T>
368 static inline void store32( T volatile * pDest, T src, memory_order order ) CDS_NOEXCEPT
370 static_assert( sizeof(T) == 4, "Illegal size of operand" );
371 assert( order == memory_order_relaxed
372 || order == memory_order_release
373 || order == memory_order_seq_cst
375 assert( pDest != NULL );
376 assert( cds::details::is_aligned( pDest, 4 ));
378 if ( order != memory_order_seq_cst ) {
379 fence_before( order );
383 exchange32( pDest, src, order );
387 template <typename T>
388 static inline T load32( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT
390 static_assert( sizeof(T) == 4, "Illegal size of operand" );
391 assert( order == memory_order_relaxed
392 || order == memory_order_consume
393 || order == memory_order_acquire
394 || order == memory_order_seq_cst
396 assert( pSrc != NULL );
397 assert( cds::details::is_aligned( pSrc, 4 ));
400 fence_after_load( order );
404 template <typename T>
405 static inline bool cas32_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
407 static_assert( sizeof(T) == 4, "Illegal size of operand" );
408 assert( cds::details::is_aligned( pDest, 4 ));
411 fence_before(mo_success);
412 __asm__ __volatile__ (
413 "lock ; cmpxchgl %[desired], %[pDest]"
414 : [prev] "+a" (prev), [pDest] "+m" (*pDest)
415 : [desired] "r" (desired)
417 bool success = prev == expected;
419 fence_after(mo_success);
421 fence_after(mo_fail);
427 template <typename T>
428 static inline bool cas32_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
430 return cas32_strong( pDest, expected, desired, mo_success, mo_fail );
433 // fetch_xxx may be emulated via cas32
434 // If the platform has special fetch_xxx instruction
435 // then it should define CDS_ATOMIC_fetch32_xxx_defined macro
437 # define CDS_ATOMIC_fetch32_add_defined
438 template <typename T>
439 static inline T fetch32_add( T volatile * pDest, T v, memory_order order) CDS_NOEXCEPT
441 static_assert( sizeof(T) == 4, "Illegal size of operand" );
442 assert( cds::details::is_aligned( pDest, 4 ));
445 __asm__ __volatile__ (
446 "lock ; xaddl %[v], %[pDest]"
447 : [v] "+r" (v), [pDest] "+m" (*pDest)
453 # define CDS_ATOMIC_fetch32_sub_defined
454 template <typename T>
455 static inline T fetch32_sub( T volatile * pDest, T v, memory_order order) CDS_NOEXCEPT
457 static_assert( sizeof(T) == 4, "Illegal size of operand" );
458 assert( cds::details::is_aligned( pDest, 4 ));
461 __asm__ __volatile__ (
463 "lock ; xaddl %[v], %[pDest]"
464 : [v] "+r" (v), [pDest] "+m" (*pDest)
470 }}} // namespace platform::gcc::x86
471 }} // namespace cds::cxx11_atomic
474 #endif // #ifndef CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC32_H