switch rest over to model checker... might work now
[c11tester.git] / include / impatomic.h
index ad169c276f7630263133d3a323efe44584b4e1a0..472e6638a603a70a09db0d06b28a9b40b3fb1001 100644 (file)
@@ -51,9 +51,6 @@ extern void __atomic_flag_wait__
 ( volatile atomic_flag* );
 extern void __atomic_flag_wait_explicit__
 ( volatile atomic_flag*, memory_order );
-extern volatile atomic_flag* __atomic_flag_for_address__
-( const volatile void* __z__ )
-__attribute__((const));
 
 #ifdef __cplusplus
 }
@@ -73,56 +70,65 @@ inline void atomic_flag::fence( memory_order __x__ ) const volatile
 #endif
 
 
-#define _ATOMIC_LOAD_( __a__, __x__ ) \
-({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
-   volatile atomic_flag* __g__ = __atomic_flag_for_address__( __p__ ); \
-   __atomic_flag_wait_explicit__( __g__, __x__ ); \
-   __typeof__((__a__)->__f__) __r__ = *__p__; \
-   atomic_flag_clear_explicit( __g__, __x__ ); \
-   __r__; })
-
-#define _ATOMIC_STORE_( __a__, __m__, __x__ ) \
-({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
-   __typeof__(__m__) __v__ = (__m__); \
-   volatile atomic_flag* __g__ = __atomic_flag_for_address__( __p__ ); \
-   __atomic_flag_wait_explicit__( __g__, __x__ ); \
-   *__p__ = __v__; \
-   atomic_flag_clear_explicit( __g__, __x__ ); \
-   __v__; })
-
-#define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ ) \
-({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
-   __typeof__(__m__) __v__ = (__m__); \
-   volatile atomic_flag* __g__ = __atomic_flag_for_address__( __p__ ); \
-   __atomic_flag_wait_explicit__( __g__, __x__ ); \
-   __typeof__((__a__)->__f__) __r__ = *__p__; \
-   *__p__ __o__ __v__; \
-   atomic_flag_clear_explicit( __g__, __x__ ); \
-   __r__; })
-
-#define _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ) \
-({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
-   __typeof__(__e__) __q__ = (__e__); \
-   __typeof__(__m__) __v__ = (__m__); \
-   bool __r__; \
-   volatile atomic_flag* __g__ = __atomic_flag_for_address__( __p__ ); \
-   __atomic_flag_wait_explicit__( __g__, __x__ ); \
-   __typeof__((__a__)->__f__) __t__ = *__p__; \
-   if ( __t__ == *__q__ ) { *__p__ = __v__; __r__ = true; } \
-   else { *__q__ = __t__; __r__ = false; } \
-   atomic_flag_clear_explicit( __g__, __x__ ); \
-   __r__; })
-
-#define _ATOMIC_FENCE_( __a__, __x__ ) \
-({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
-   volatile atomic_flag* __g__ = __atomic_flag_for_address__( __p__ ); \
-   atomic_flag_fence( __g__, __x__ ); \
-   })
-
-
-#define ATOMIC_INTEGRAL_LOCK_FREE 0
-#define ATOMIC_ADDRESS_LOCK_FREE 0
-
+/*
+        The remainder of the example implementation uses the following
+        macros. These macros exploit GNU extensions for value-returning
+        blocks (AKA statement expressions) and __typeof__.
+
+        The macros rely on data fields of atomic structs being named __f__.
+        Other symbols used are __a__=atomic, __e__=expected, __f__=field,
+        __g__=flag, __m__=modified, __o__=operation, __r__=result,
+        __p__=pointer to field, __v__=value (for single evaluation),
+        __x__=memory-ordering, and __y__=memory-ordering.
+*/
+
+#define _ATOMIC_LOAD_( __a__, __x__ )                                                                                                                                          \
+       ({ volatile __typeof__((__a__)->__f__)* __p__ = ((__a__)->__f__);                       \
+               model->switch_to_master(new ModelAction(ATOMIC_READ, __x__, __p__)); \
+               ((__typeof__((__a__)->__f__)) (thread_current()->get_return_value())); \
+       })
+
+
+#define _ATOMIC_STORE_( __a__, __m__, __x__ )                                                                                                          \
+       ({ volatile __typeof__((__a__)->__f__)* __p__ = ((__a__)->__f__);                       \
+               __typeof__(__m__) __v__ = (__m__);                                                                                                                                      \
+               model->switch_to_master(new ModelAction(ATOMIC_WRITE, __x__, __p__, __v__)); \
+               __v__; })
+
+#define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ )                                                                          \
+       ({ volatile __typeof__((__a__)->__f__)* __p__ = ((__a__)->__f__);                       \
+    model->switch_to_master(new ModelAction(ATOMIC_READ, __x__, __p__)); \
+               __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) thread_current()->get_return_value();   \
+               __typeof__(__m__) __v__ = (__m__);                                                                                                                                      \
+               __typeof__((__a__)->__f__) __copy__= __old__;                                                                                           \
+               __copy__ __o__ __v__;                                                                                                                                                                                           \
+               model->switch_to_master(new ModelAction(ATOMIC_RMW, __x__, __p__, __copy__));   \
+               __old__; })
+
+#define _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ )                                                                          \
+       ({ volatile __typeof__((__a__)->__f__)* __p__ = ((__a__)->__f__);                       \
+               __typeof__(__e__) __q__ = (__e__);                                                                                                                                      \
+               __typeof__(__m__) __v__ = (__m__);                                                                                                                                      \
+               bool __r__;                                                                                                                                                                                                                                     \
+               model->switch_to_master(new ModelAction(ATOMIC_READ, __x__, __p__)); \
+               __typeof__((__a__)->__f__) __t__=(__typeof__((__a__)->__f__)) thread_current()->get_return_value();     \
+               if (__t__ == * __q__ ) {                                                                                                                                                                                \
+                       model->switch_to_master(new ModelAction(ATOMIC_RMW, __x__, __p__, __v__)); __r__ = true; } \
+               else {  *__q__ = __t__;  __r__ = false;}                                                                                                                \
+               __r__; })
+
+#define _ATOMIC_FENCE_( __a__, __x__ )                                 \
+       ({ ASSERT(0);})
+
+#define ATOMIC_CHAR_LOCK_FREE 1
+#define ATOMIC_CHAR16_T_LOCK_FREE 1
+#define ATOMIC_CHAR32_T_LOCK_FREE 1
+#define ATOMIC_WCHAR_T_LOCK_FREE 1
+#define ATOMIC_SHORT_LOCK_FREE 1
+#define ATOMIC_INT_LOCK_FREE 1
+#define ATOMIC_LONG_LOCK_FREE 1
+#define ATOMIC_LLONG_LOCK_FREE 1
+#define ATOMIC_ADDRESS_LOCK_FREE 1
 
 typedef struct atomic_bool
 {
@@ -2118,11 +2124,9 @@ inline void atomic_fence
 inline void* atomic_fetch_add_explicit
 ( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
 { void* volatile* __p__ = &((__a__)->__f__);
-  volatile atomic_flag* __g__ = __atomic_flag_for_address__( __p__ );
-  __atomic_flag_wait_explicit__( __g__, __x__ );
-  void* __r__ = *__p__;
-  *__p__ = (void*)((char*)(*__p__) + __m__);
-  atomic_flag_clear_explicit( __g__, __x__ );
+       model->switch_to_master(new ModelAction(ATOMIC_READ, __x__, __p__));
+       void* __r__ = (void *) thread_current()->get_return_value();
+       model->switch_to_master(new ModelAction(ATOMIC_RMW, __x__, __p__, (void*)((char*)(*__p__) + __m__)));
   return __r__; }
 
 inline void* atomic_fetch_add
@@ -2133,11 +2137,9 @@ inline void* atomic_fetch_add
 inline void* atomic_fetch_sub_explicit
 ( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
 { void* volatile* __p__ = &((__a__)->__f__);
-  volatile atomic_flag* __g__ = __atomic_flag_for_address__( __p__ );
-  __atomic_flag_wait_explicit__( __g__, __x__ );
-  void* __r__ = *__p__;
-  *__p__ = (void*)((char*)(*__p__) - __m__);
-  atomic_flag_clear_explicit( __g__, __x__ );
+       model->switch_to_master(new ModelAction(ATOMIC_READ, __x__, __p__));
+       void* __r__ = (void *) thread_current()->get_return_value();
+       model->switch_to_master(new ModelAction(ATOMIC_RMW, __x__, __p__, (void*)((char*)(*__p__) - __m__)));
   return __r__; }
 
 inline void* atomic_fetch_sub