issue#11: cds: changed __CDS_ guard prefix to CDSLIB_ for all .h files
[libcds.git] / cds / compiler / gcc / amd64 / cxx11_atomic.h
1 //$$CDS-header$$
2
3 #ifndef CDSLIB_COMPILER_GCC_AMD64_CXX11_ATOMIC_H
4 #define CDSLIB_COMPILER_GCC_AMD64_CXX11_ATOMIC_H
5
6 #include <cstdint>
7 #include <cds/compiler/gcc/x86/cxx11_atomic32.h>
8
9 //@cond
10 namespace cds { namespace cxx11_atomic {
11     namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace gcc { CDS_CXX11_INLINE_NAMESPACE namespace amd64 {
12 #   ifndef CDS_CXX11_INLINE_NAMESPACE_SUPPORT
13         // primitives up to 32bit + fences
14         using namespace cds::cxx11_atomic::platform::gcc::x86;
15 #   endif
16
17         //-----------------------------------------------------------------------------
18         // 64bit primitives
19         //-----------------------------------------------------------------------------
20
21         template <typename T>
22         static inline bool cas64_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
23         {
24             static_assert( sizeof(T) == 8, "Illegal size of operand" );
25             assert( cds::details::is_aligned( pDest, 8 ));
26
27             T prev = expected;
28             fence_before(mo_success);
29             __asm__ __volatile__ (
30                 "lock ; cmpxchgq %[desired], %[pDest]"
31                 : [prev] "+a" (prev), [pDest] "+m" (*pDest)
32                 : [desired] "r" (desired)
33                 );
34             bool success = (prev == expected);
35             expected = prev;
36             if (success)
37                 fence_after(mo_success);
38             else
39                 fence_after(mo_fail);
40             return success;
41         }
42
43         template <typename T>
44         static inline bool cas64_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
45         {
46             return cas64_strong( pDest, expected, desired, mo_success, mo_fail );
47         }
48
49         template <typename T>
50         static inline T load64( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT
51         {
52             static_assert( sizeof(T) == 8, "Illegal size of operand" );
53             assert( order ==  memory_order_relaxed
54                 || order ==  memory_order_consume
55                 || order ==  memory_order_acquire
56                 || order ==  memory_order_seq_cst
57                 );
58             assert( pSrc );
59             assert( cds::details::is_aligned( pSrc, 8 ));
60
61             T v = *pSrc;
62             fence_after_load( order );
63             return v;
64         }
65
66
67         template <typename T>
68         static inline T exchange64( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT
69         {
70             static_assert( sizeof(T) == 8, "Illegal size of operand" );
71             assert( cds::details::is_aligned( pDest, 8 ));
72
73             fence_before(order);
74             __asm__ __volatile__ (
75                 "xchgq %[v], %[pDest]"
76                 : [v] "+r" (v), [pDest] "+m" (*pDest)
77                 );
78             fence_after(order);
79             return v;
80         }
81
82         template <typename T>
83         static inline void store64( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT
84         {
85             static_assert( sizeof(T) == 8, "Illegal size of operand" );
86             assert( order ==  memory_order_relaxed
87                 || order ==  memory_order_release
88                 || order == memory_order_seq_cst
89                 );
90             assert( pDest );
91             assert( cds::details::is_aligned( pDest, 8 ));
92
93             if (order != memory_order_seq_cst) {
94                 fence_before(order);
95                 *pDest = val;
96             }
97             else {
98                 exchange64( pDest, val, order);
99             }
100         }
101
102 #       define CDS_ATOMIC_fetch64_add_defined
103         template <typename T>
104         static inline T fetch64_add( T volatile * pDest, T v, memory_order order) CDS_NOEXCEPT
105         {
106             static_assert( sizeof(T) == 8, "Illegal size of operand" );
107             assert( cds::details::is_aligned( pDest, 8 ));
108
109             fence_before(order);
110             __asm__ __volatile__ (
111                 "lock ; xaddq %[v], %[pDest]"
112                 : [v] "+r" (v), [pDest] "+m" (*pDest)
113                 );
114             fence_after(order);
115             return v;
116         }
117
118 #       define CDS_ATOMIC_fetch64_sub_defined
119         template <typename T>
120         static inline T fetch64_sub( T volatile * pDest, T v, memory_order order) CDS_NOEXCEPT
121         {
122             static_assert( sizeof(T) == 8, "Illegal size of operand" );
123             assert( cds::details::is_aligned( pDest, 8 ));
124
125             fence_before(order);
126             __asm__ __volatile__ (
127                 "negq   %[v] ; \n"
128                 "lock ; xaddq %[v], %[pDest]"
129                 : [v] "+r" (v), [pDest] "+m" (*pDest)
130                 );
131             fence_after(order);
132             return v;
133         }
134
135
136         //-----------------------------------------------------------------------------
137         // pointer primitives
138         //-----------------------------------------------------------------------------
139
140         template <typename T>
141         static inline T * exchange_ptr( T * volatile * pDest, T * v, memory_order order ) CDS_NOEXCEPT
142         {
143             static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
144
145             return (T *) exchange64( (uint64_t volatile *) pDest, (uint64_t) v, order );
146         }
147
148         template <typename T>
149         static inline void store_ptr( T * volatile * pDest, T * src, memory_order order ) CDS_NOEXCEPT
150         {
151             static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
152             assert( order ==  memory_order_relaxed
153                 || order ==  memory_order_release
154                 || order == memory_order_seq_cst
155                 );
156             assert( pDest );
157
158             if ( order != memory_order_seq_cst ) {
159                 fence_before( order );
160                 *pDest = src;
161             }
162             else {
163                 exchange_ptr( pDest, src, order );
164             }
165         }
166
167         template <typename T>
168         static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) CDS_NOEXCEPT
169         {
170             static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
171             assert( order ==  memory_order_relaxed
172                 || order ==  memory_order_consume
173                 || order ==  memory_order_acquire
174                 || order ==  memory_order_seq_cst
175                 );
176             assert( pSrc );
177
178             T * v = *pSrc;
179             fence_after_load( order );
180             return v;
181         }
182
183         template <typename T>
184         static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
185         {
186             static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
187
188             return cas64_strong( (uint64_t volatile *) pDest, *reinterpret_cast<uint64_t *>( &expected ), (uint64_t) desired, mo_success, mo_fail );
189         }
190
191         template <typename T>
192         static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
193         {
194             return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail );
195         }
196
197     }} // namespace gcc::amd64
198
199 #ifndef CDS_CXX11_INLINE_NAMESPACE_SUPPORT
200     using namespace gcc::amd64;
201 #endif
202     }   // namespace platform
203
204 }}  // namespace cds::cxx11_atomic
205 //@endcond
206
207 #endif // #ifndef CDSLIB_COMPILER_GCC_AMD64_CXX11_ATOMIC_H