9 #include "snapshot-interface.h"
10 #include "threads-model.h"
13 memory_order orders[8] = {
14 memory_order_relaxed, memory_order_consume, memory_order_acquire,
15 memory_order_release, memory_order_acq_rel, memory_order_seq_cst,
19 static void ensureModel() {
21 snapshot_system_init(10000, 1024, 1024, 40000);
22 model = new ModelChecker();
23 model->startChecker();
27 /** Performs a read action.*/
28 uint64_t model_read_action(void * obj, memory_order ord) {
29 return model->switch_to_master(new ModelAction(ATOMIC_READ, ord, obj));
32 /** Performs a write action.*/
33 void model_write_action(void * obj, memory_order ord, uint64_t val) {
34 model->switch_to_master(new ModelAction(ATOMIC_WRITE, ord, obj, val));
37 /** Performs an init action. */
38 void model_init_action(void * obj, uint64_t val) {
39 model->switch_to_master(new ModelAction(ATOMIC_INIT, memory_order_relaxed, obj, val));
43 * Performs the read part of a RMW action. The next action must either be the
44 * write part of the RMW action or an explicit close out of the RMW action w/o
47 uint64_t model_rmwr_action(void *obj, memory_order ord) {
48 return model->switch_to_master(new ModelAction(ATOMIC_RMWR, ord, obj));
52 * Performs the read part of a RMW CAS action. The next action must
53 * either be the write part of the RMW action or an explicit close out
54 * of the RMW action w/o a write.
56 uint64_t model_rmwrcas_action(void *obj, memory_order ord, uint64_t oldval, int size) {
57 return model->switch_to_master(new ModelAction(ATOMIC_RMWRCAS, ord, obj, oldval, size));
61 /** Performs the write part of a RMW action. */
62 void model_rmw_action(void *obj, memory_order ord, uint64_t val) {
63 model->switch_to_master(new ModelAction(ATOMIC_RMW, ord, obj, val));
66 /** Closes out a RMW action without doing a write. */
67 void model_rmwc_action(void *obj, memory_order ord) {
68 model->switch_to_master(new ModelAction(ATOMIC_RMWC, ord, obj));
71 /** Issues a fence operation. */
72 void model_fence_action(memory_order ord) {
73 model->switch_to_master(new ModelAction(ATOMIC_FENCE, ord, FENCE_LOCATION));
76 /* --- helper functions --- */
77 uint64_t model_rmwrcas_action_helper(void *obj, int atomic_index, uint64_t oldval, int size, const char *position) {
79 return model->switch_to_master(new ModelAction(ATOMIC_RMWRCAS, position, orders[atomic_index], obj, oldval, size));
82 uint64_t model_rmwr_action_helper(void *obj, int atomic_index, const char *position) {
84 return model->switch_to_master(new ModelAction(ATOMIC_RMWR, position, orders[atomic_index], obj));
87 void model_rmw_action_helper(void *obj, uint64_t val, int atomic_index, const char * position) {
89 model->switch_to_master(new ModelAction(ATOMIC_RMW, position, orders[atomic_index], obj, val));
92 void model_rmwc_action_helper(void *obj, int atomic_index, const char *position) {
94 model->switch_to_master(new ModelAction(ATOMIC_RMWC, position, orders[atomic_index], obj));
98 uint8_t cds_volatile_load8(void * obj, int atomic_index, const char * position) {
100 return (uint8_t) model->switch_to_master(
101 new ModelAction(VOLATILE_READ, position, orders[atomic_index], obj));
103 uint16_t cds_volatile_load16(void * obj, int atomic_index, const char * position) {
105 return (uint16_t) model->switch_to_master(
106 new ModelAction(VOLATILE_READ, position, orders[atomic_index], obj));
108 uint32_t cds_volatile_load32(void * obj, int atomic_index, const char * position) {
110 return (uint32_t) model->switch_to_master(
111 new ModelAction(VOLATILE_READ, position, orders[atomic_index], obj)
114 uint64_t cds_volatile_load64(void * obj, int atomic_index, const char * position) {
116 return model->switch_to_master(
117 new ModelAction(VOLATILE_READ, position, orders[atomic_index], obj));
120 // cds volatile stores
121 void cds_volatile_store8(void * obj, uint8_t val, int atomic_index, const char * position) {
123 model->switch_to_master(new ModelAction(VOLATILE_WRITE, position, orders[atomic_index], obj, (uint64_t) val));
125 void cds_volatile_store16(void * obj, uint16_t val, int atomic_index, const char * position) {
127 model->switch_to_master(new ModelAction(VOLATILE_WRITE, position, orders[atomic_index], obj, (uint64_t) val));
129 void cds_volatile_store32(void * obj, uint32_t val, int atomic_index, const char * position) {
131 model->switch_to_master(new ModelAction(VOLATILE_WRITE, position, orders[atomic_index], obj, (uint64_t) val));
133 void cds_volatile_store64(void * obj, uint64_t val, int atomic_index, const char * position) {
135 model->switch_to_master(new ModelAction(VOLATILE_WRITE, position, orders[atomic_index], obj, (uint64_t) val));
139 #define CDSATOMICINT(size) \
140 void cds_atomic_init ## size (void * obj, uint ## size ## _t val, const char * position) { \
142 model->switch_to_master(new ModelAction(ATOMIC_INIT, position, memory_order_relaxed, obj, (uint64_t) val)); \
143 *((uint ## size ## _t *)obj) = val; \
144 thread_id_t tid = thread_current()->get_id(); \
145 for(int i=0;i < size / 8;i++) { \
146 recordWrite(tid, (void *)(((char *)obj)+i)); \
156 #define CDSATOMICLOAD(size) \
157 uint ## size ## _t cds_atomic_load ## size(void * obj, int atomic_index, const char * position) { \
159 return (uint ## size ## _t)model->switch_to_master( \
160 new ModelAction(ATOMIC_READ, position, orders[atomic_index], obj)); \
169 #define CDSATOMICSTORE(size) \
170 void cds_atomic_store ## size(void * obj, uint ## size ## _t val, int atomic_index, const char * position) { \
172 model->switch_to_master(new ModelAction(ATOMIC_WRITE, position, orders[atomic_index], obj, (uint64_t) val)); \
173 *((uint ## size ## _t *)obj) = val; \
174 thread_id_t tid = thread_current()->get_id(); \
175 for(int i=0;i < size / 8;i++) { \
176 recordWrite(tid, (void *)(((char *)obj)+i)); \
186 #define _ATOMIC_RMW_(__op__, size, addr, val, atomic_index, position) \
188 uint ## size ## _t _old = model_rmwr_action_helper(addr, atomic_index, position); \
189 uint ## size ## _t _copy = _old; \
190 uint ## size ## _t _val = val; \
192 model_rmw_action_helper(addr, (uint64_t) _copy, atomic_index, position); \
193 *((uint ## size ## _t *)addr) = _copy; \
194 thread_id_t tid = thread_current()->get_id(); \
195 for(int i=0;i < size / 8;i++) { \
196 recordWrite(tid, (void *)(((char *)addr)+i)); \
201 // cds atomic exchange
202 #define CDSATOMICEXCHANGE(size) \
203 uint ## size ## _t cds_atomic_exchange ## size(void* addr, uint ## size ## _t val, int atomic_index, const char * position) { \
204 _ATOMIC_RMW_( =, size, addr, val, atomic_index, position); \
208 CDSATOMICEXCHANGE(16)
209 CDSATOMICEXCHANGE(32)
210 CDSATOMICEXCHANGE(64)
212 // cds atomic fetch add
213 #define CDSATOMICADD(size) \
214 uint ## size ## _t cds_atomic_fetch_add ## size(void* addr, uint ## size ## _t val, int atomic_index, const char * position) { \
215 _ATOMIC_RMW_( +=, size, addr, val, atomic_index, position); \
223 // cds atomic fetch sub
224 #define CDSATOMICSUB(size) \
225 uint ## size ## _t cds_atomic_fetch_sub ## size(void* addr, uint ## size ## _t val, int atomic_index, const char * position) { \
226 _ATOMIC_RMW_( -=, size, addr, val, atomic_index, position); \
234 // cds atomic fetch and
235 #define CDSATOMICAND(size) \
236 uint ## size ## _t cds_atomic_fetch_and ## size(void* addr, uint ## size ## _t val, int atomic_index, const char * position) { \
237 _ATOMIC_RMW_( &=, size, addr, val, atomic_index, position); \
245 // cds atomic fetch or
246 #define CDSATOMICOR(size) \
247 uint ## size ## _t cds_atomic_fetch_or ## size(void* addr, uint ## size ## _t val, int atomic_index, const char * position) { \
248 _ATOMIC_RMW_( |=, size, addr, val, atomic_index, position); \
256 // cds atomic fetch xor
257 #define CDSATOMICXOR(size) \
258 uint ## size ## _t cds_atomic_fetch_xor ## size(void* addr, uint ## size ## _t val, int atomic_index, const char * position) { \
259 _ATOMIC_RMW_( ^=, size, addr, val, atomic_index, position); \
267 // cds atomic compare and exchange
268 // In order to accomodate the LLVM PASS, the return values are not true or false.
270 #define _ATOMIC_CMPSWP_WEAK_ _ATOMIC_CMPSWP_
271 #define _ATOMIC_CMPSWP_(size, addr, expected, desired, atomic_index, position) \
273 uint ## size ## _t _desired = desired; \
274 uint ## size ## _t _expected = expected; \
275 uint ## size ## _t _old = model_rmwrcas_action_helper(addr, atomic_index, _expected, sizeof(_expected), position); \
276 if (_old == _expected) { \
277 model_rmw_action_helper(addr, (uint64_t) _desired, atomic_index, position); \
278 *((uint ## size ## _t *)addr) = desired; \
279 thread_id_t tid = thread_current()->get_id(); \
280 for(int i=0;i < size / 8;i++) { \
281 recordWrite(tid, (void *)(((char *)addr)+i)); \
283 return _expected; } \
285 model_rmwc_action_helper(addr, atomic_index, position); _expected = _old; return _old; } \
288 // atomic_compare_exchange version 1: the CmpOperand (corresponds to expected)
289 // extracted from LLVM IR is an integer type.
290 #define CDSATOMICCASV1(size) \
291 uint ## size ## _t cds_atomic_compare_exchange ## size ## _v1(void* addr, uint ## size ## _t expected, uint ## size ## _t desired, int atomic_index_succ, int atomic_index_fail, const char *position) { \
292 _ATOMIC_CMPSWP_(size, addr, expected, desired, atomic_index_succ, position); \
300 // atomic_compare_exchange version 2
301 #define CDSATOMICCASV2(size) \
302 bool cds_atomic_compare_exchange ## size ## _v2(void* addr, uint ## size ## _t* expected, uint ## size ## _t desired, int atomic_index_succ, int atomic_index_fail, const char *position) { \
303 uint ## size ## _t ret = cds_atomic_compare_exchange ## size ## _v1(addr, *expected, desired, atomic_index_succ, atomic_index_fail, position); \
304 if (ret == *expected) {return true;} else {return false;} \
312 // cds atomic thread fence
314 void cds_atomic_thread_fence(int atomic_index, const char * position) {
315 model->switch_to_master(
316 new ModelAction(ATOMIC_FENCE, position, orders[atomic_index], FENCE_LOCATION)
321 #define _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ) \
322 ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \
323 __typeof__(__e__) __q__ = (__e__); \
324 __typeof__(__m__) __v__ = (__m__); \
326 __typeof__((__a__)->__f__) __t__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__); \
327 if (__t__ == * __q__ ) { \
328 model_rmw_action((void *)__p__, __x__, (uint64_t) __v__); __r__ = true; } \
329 else { model_rmwc_action((void *)__p__, __x__); *__q__ = __t__; __r__ = false;} \
332 #define _ATOMIC_FENCE_( __x__ ) \
333 ({ model_fence_action(__x__);})
338 #define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ ) \
339 ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \
340 __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__); \
341 __typeof__(__m__) __v__ = (__m__); \
342 __typeof__((__a__)->__f__) __copy__= __old__; \
343 __copy__ __o__ __v__; \
344 model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__); \
345 __old__ = __old__; Silence clang (-Wunused-value) \
349 void cds_func_entry(const char * funcName) {
352 Thread * th = thread_current();
355 ModelHistory *history = model->get_history();
356 if ( !history->getFuncMap()->contains(funcName) ) {
357 // add func id to func map
358 func_id = history->get_func_counter();
359 history->incr_func_counter();
360 history->getFuncMap()->put(funcName, func_id);
362 // add func id to reverse func map
363 ModelVector<const char *> * func_map_rev = history->getFuncMapRev();
364 if ( func_map_rev->size() <= func_id )
365 func_map_rev->resize( func_id + 1 );
366 func_map_rev->at(func_id) = funcName;
368 func_id = history->getFuncMap()->get(funcName);
371 history->enter_function(func_id, th->get_id());
374 void cds_func_exit(const char * funcName) {
377 Thread * th = thread_current();
380 ModelHistory *history = model->get_history();
381 func_id = history->getFuncMap()->get(funcName);
383 /* func_id not found; this could happen in the case where a function calls cds_func_entry
384 * when the model has been defined yet, but then an atomic inside the function initializes
385 * the model. And then cds_func_exit is called upon the function exiting.
390 history->exit_function(func_id, th->get_id());