({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \
__typeof__(__m__) __v__ = (__m__); \
model_write_action((void *) __p__, __x__, (uint64_t) __v__); \
- __v__; })
+ __v__ = __v__; /* Silence clang (-Wunused-value) */ \
+ })
#define _ATOMIC_INIT_( __a__, __m__ ) \
({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \
__typeof__(__m__) __v__ = (__m__); \
model_init_action((void *) __p__, (uint64_t) __v__); \
- __v__; })
+ __v__ = __v__; /* Silence clang (-Wunused-value) */ \
+ })
#define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ ) \
({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \
__typeof__((__a__)->__f__) __copy__= __old__; \
__copy__ __o__ __v__; \
model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__); \
- __old__; })
+ __old__ = __old__; /* Silence clang (-Wunused-value) */ \
+ })
/* No spurious failure for now */
#define _ATOMIC_CMPSWP_WEAK_ _ATOMIC_CMPSWP_
inline void* atomic_fetch_add_explicit
( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
{
- void* volatile* __p__ = &((__a__)->__f__);
- void* __r__ = (void *) model_rmwr_action((void *)__p__, __x__);
- model_rmw_action((void *)__p__, __x__, (uint64_t) ((char*)(*__p__) + __m__));
- return __r__; }
+ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__);
+ __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__);
+ __typeof__((__a__)->__f__) __copy__= __old__;
+ __copy__ = (void *) (((char *)__copy__) + __m__);
+ model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__);
+ return __old__;
+}
-inline void* atomic_fetch_add
+ inline void* atomic_fetch_add
( volatile atomic_address* __a__, ptrdiff_t __m__ )
{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline void* atomic_fetch_sub_explicit
( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
-{
- void* volatile* __p__ = &((__a__)->__f__);
- void* __r__ = (void *) model_rmwr_action((void *)__p__, __x__);
- model_rmw_action((void *)__p__, __x__, (uint64_t)((char*)(*__p__) - __m__));
- return __r__; }
+{ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__);
+ __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__);
+ __typeof__((__a__)->__f__) __copy__= __old__;
+ __copy__ = (void *) (((char *)__copy__) - __m__);
+ model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__);
+ return __old__;
+}
inline void* atomic_fetch_sub
( volatile atomic_address* __a__, ptrdiff_t __m__ )