+
+// cds atomic fetch sub
+uint8_t cds_atomic_fetch_sub8(void* addr, int atomic_index, uint8_t val) {
+ _ATOMIC_RMW_( -= , 8, addr, atomic_index, val);
+}
+uint16_t cds_atomic_fetch_sub16(void* addr, int atomic_index, uint16_t val) {
+ _ATOMIC_RMW_( -= , 16, addr, atomic_index, val);
+}
+uint32_t cds_atomic_fetch_sub32(void* addr, int atomic_index, uint32_t val) {
+ _ATOMIC_RMW_( -= , 32, addr, atomic_index, val);
+}
+uint64_t cds_atomic_fetch_sub64(void* addr, int atomic_index, uint64_t val) {
+ _ATOMIC_RMW_( -= , 64, addr, atomic_index, val);
+}
+
+// cds atomic fetch and
+uint8_t cds_atomic_fetch_and8(void* addr, int atomic_index, uint8_t val) {
+ _ATOMIC_RMW_( &= , 8, addr, atomic_index, val);
+}
+uint16_t cds_atomic_fetch_and16(void* addr, int atomic_index, uint16_t val) {
+ _ATOMIC_RMW_( &= , 16, addr, atomic_index, val);
+}
+uint32_t cds_atomic_fetch_and32(void* addr, int atomic_index, uint32_t val) {
+ _ATOMIC_RMW_( &= , 32, addr, atomic_index, val);
+}
+uint64_t cds_atomic_fetch_and64(void* addr, int atomic_index, uint64_t val) {
+ _ATOMIC_RMW_( &= , 64, addr, atomic_index, val);
+}
+
+// cds atomic fetch or
+uint8_t cds_atomic_fetch_or8(void* addr, int atomic_index, uint8_t val) {
+ _ATOMIC_RMW_( |= , 8, addr, atomic_index, val);
+}
+uint16_t cds_atomic_fetch_or16(void* addr, int atomic_index, uint16_t val) {
+ _ATOMIC_RMW_( |= , 16, addr, atomic_index, val);
+}
+uint32_t cds_atomic_fetch_or32(void* addr, int atomic_index, uint32_t val) {
+ _ATOMIC_RMW_( |= , 32, addr, atomic_index, val);
+}
+uint64_t cds_atomic_fetch_or64(void* addr, int atomic_index, uint64_t val) {
+ _ATOMIC_RMW_( |= , 64, addr, atomic_index, val);
+}
+
+// cds atomic fetch xor
+uint8_t cds_atomic_fetch_xor8(void* addr, int atomic_index, uint8_t val) {
+ _ATOMIC_RMW_( ^= , 8, addr, atomic_index, val);
+}
+uint16_t cds_atomic_fetch_xor16(void* addr, int atomic_index, uint16_t val) {
+ _ATOMIC_RMW_( ^= , 16, addr, atomic_index, val);
+}
+uint32_t cds_atomic_fetch_xor32(void* addr, int atomic_index, uint32_t val) {
+ _ATOMIC_RMW_( ^= , 32, addr, atomic_index, val);
+}
+uint64_t cds_atomic_fetch_xor64(void* addr, int atomic_index, uint64_t val) {
+ _ATOMIC_RMW_( ^= , 64, addr, atomic_index, val);
+}
+
+// cds atomic compare and exchange
+// In order to accomodate the LLVM PASS, the return values are not true or false.
+
+#define _ATOMIC_CMPSWP_WEAK_ _ATOMIC_CMPSWP_
+#define _ATOMIC_CMPSWP_(size, addr, expected, desired, atomic_index) \
+({ \
+ uint##size##_t _desired = desired; \
+ uint##size##_t _expected = expected; \
+ uint##size##_t _old = model_rmwr_action_helper(addr, atomic_index); \
+ if (_old == _expected ) { \
+ model_rmw_action_helper(addr, atomic_index, (uint64_t) _desired ); return _expected; } \
+ else { \
+ model_rmwc_action_helper(addr, atomic_index); _expected = _old; return _old; } \
+})
+
+// expected is supposed to be a pointer to an address, but the CmpOperand
+// extracted from LLVM IR is an integer type.
+
+uint8_t cds_atomic_compare_exchange8(void* addr, uint8_t expected,
+ uint8_t desired, int atomic_index_succ, int atomic_index_fail ) {
+ _ATOMIC_CMPSWP_(8, addr, expected, desired, atomic_index_succ );
+}
+uint16_t cds_atomic_compare_exchange16(void* addr, uint16_t expected,
+ uint16_t desired, int atomic_index_succ, int atomic_index_fail ) {
+ _ATOMIC_CMPSWP_(16, addr, expected, desired, atomic_index_succ );
+}
+uint32_t cds_atomic_compare_exchange32(void* addr, uint32_t expected,
+ uint32_t desired, int atomic_index_succ, int atomic_index_fail ) {
+ _ATOMIC_CMPSWP_(32, addr, expected, desired, atomic_index_succ );
+}
+uint64_t cds_atomic_compare_exchange64(void* addr, uint64_t expected,
+ uint64_t desired, int atomic_index_succ, int atomic_index_fail ) {
+ _ATOMIC_CMPSWP_(64, addr, expected, desired, atomic_index_succ );
+}
+
+// cds atomic thread fence
+
+void cds_atomic_thread_fence(int atomic_index) {
+ model->switch_to_master(new ModelAction(ATOMIC_FENCE, orders[atomic_index], FENCE_LOCATION));
+}
+
+/*
+#define _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ) \
+ ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \
+ __typeof__(__e__) __q__ = (__e__); \
+ __typeof__(__m__) __v__ = (__m__); \
+ bool __r__; \
+ __typeof__((__a__)->__f__) __t__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__); \
+ if (__t__ == * __q__ ) { \
+ model_rmw_action((void *)__p__, __x__, (uint64_t) __v__); __r__ = true; } \
+ else { model_rmwc_action((void *)__p__, __x__); *__q__ = __t__; __r__ = false;} \
+ __r__; })
+
+#define _ATOMIC_FENCE_( __x__ ) \
+ ({ model_fence_action(__x__);})
+*/
+
+/*
+
+#define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ ) \
+ ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \
+ __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__); \
+ __typeof__(__m__) __v__ = (__m__); \
+ __typeof__((__a__)->__f__) __copy__= __old__; \
+ __copy__ __o__ __v__; \
+ model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__); \
+ __old__ = __old__; Silence clang (-Wunused-value) \
+ })
+*/