remove old release sequences
[c11tester.git] / cmodelint.cc
1 #include <stdio.h>
2 #include "model.h"
3 #include "action.h"
4 #include "cmodelint.h"
5 #include "threads-model.h"
6
7 memory_order orders[6] = {
8         memory_order_relaxed, memory_order_consume, memory_order_acquire,
9         memory_order_release, memory_order_acq_rel, memory_order_seq_cst
10 };
11
12 /** Performs a read action.*/
13 uint64_t model_read_action(void * obj, memory_order ord) {
14         return model->switch_to_master(new ModelAction(ATOMIC_READ, ord, obj));
15 }
16
17 /** Performs a write action.*/
18 void model_write_action(void * obj, memory_order ord, uint64_t val) {
19         model->switch_to_master(new ModelAction(ATOMIC_WRITE, ord, obj, val));
20 }
21
22 /** Performs an init action. */
23 void model_init_action(void * obj, uint64_t val) {
24         model->switch_to_master(new ModelAction(ATOMIC_INIT, memory_order_relaxed, obj, val));
25 }
26
27 /**
28  * Performs the read part of a RMW action. The next action must either be the
29  * write part of the RMW action or an explicit close out of the RMW action w/o
30  * a write.
31  */
32 uint64_t model_rmwr_action(void *obj, memory_order ord) {
33         return model->switch_to_master(new ModelAction(ATOMIC_RMWR, ord, obj));
34 }
35
36 /** Performs the write part of a RMW action. */
37 void model_rmw_action(void *obj, memory_order ord, uint64_t val) {
38         model->switch_to_master(new ModelAction(ATOMIC_RMW, ord, obj, val));
39 }
40
41 /** Closes out a RMW action without doing a write. */
42 void model_rmwc_action(void *obj, memory_order ord) {
43         model->switch_to_master(new ModelAction(ATOMIC_RMWC, ord, obj));
44 }
45
46 /** Issues a fence operation. */
47 void model_fence_action(memory_order ord) {
48         model->switch_to_master(new ModelAction(ATOMIC_FENCE, ord, FENCE_LOCATION));
49 }
50
51 // --------------------- helper functions --------------------------------
52 uint64_t model_rmwr_action_helper(void *obj, int atomic_index) {
53         return model->switch_to_master(new ModelAction(ATOMIC_RMWR, orders[atomic_index], obj));
54 }
55
56 void model_rmw_action_helper(void *obj, int atomic_index, uint64_t val) {
57         model->switch_to_master(new ModelAction(ATOMIC_RMW, orders[atomic_index], obj, val));
58 }
59
60 void model_rmwc_action_helper(void *obj, int atomic_index) {
61         model->switch_to_master(new ModelAction(ATOMIC_RMWC, orders[atomic_index], obj));
62 }
63
64 void model_fence_action_helper(int atomic_index) {
65         model->switch_to_master(new ModelAction(ATOMIC_FENCE, orders[atomic_index], FENCE_LOCATION));
66 }
67
68 // cds atomic loads
69 uint8_t cds_atomic_load8(void * obj, int atomic_index) {        
70         return (uint8_t) ( model->switch_to_master(new ModelAction(ATOMIC_READ, orders[atomic_index], obj)) );
71 }
72 uint16_t cds_atomic_load16(void * obj, int atomic_index) {
73         return (uint16_t) ( model->switch_to_master(new ModelAction(ATOMIC_READ, orders[atomic_index], obj)) );
74 }
75 uint32_t cds_atomic_load32(void * obj, int atomic_index) {
76         return (uint32_t) ( model->switch_to_master(new ModelAction(ATOMIC_READ, orders[atomic_index], obj)) );
77 }
78 uint64_t cds_atomic_load64(void * obj, int atomic_index) {
79         return model->switch_to_master(new ModelAction(ATOMIC_READ, orders[atomic_index], obj));
80 }
81
82 // cds atomic stores
83 void cds_atomic_store8(void * obj, int atomic_index, uint8_t val) {
84   model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, (uint64_t) val));
85 }
86 void cds_atomic_store16(void * obj, int atomic_index, uint16_t val) {
87   model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, (uint64_t) val));
88 }
89 void cds_atomic_store32(void * obj, int atomic_index, uint32_t val) {
90   model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, (uint64_t) val));
91 }
92 void cds_atomic_store64(void * obj, int atomic_index, uint64_t val) {
93   model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, val));
94 }
95
96 /*
97 #define _ATOMIC_RMW_(__op__, size, addr, atomic_index, val )            \
98 ({                                                                      \
99   uint##size##_t _old = model_rmwr_action_helper(addr, atomic_index);   \
100   uint##size##_t _copy = _old;                                          \
101   _copy __op__ ( uint##size##_t ) _val;                                 \
102   model_rmw_action_helper(addr, atomic_index, (uint64_t) _copy);        \
103   return _old;                                                          \
104 })*/
105
106 #define _ATOMIC_RMW_(__op__, size, addr, atomic_index, val )            \
107 ({                                                                      \
108   uint##size##_t _old = model_rmwr_action_helper(addr, atomic_index);   \
109   uint##size##_t _copy = _old;                                          \
110   uint##size##_t _val = val;                                            \
111   _copy __op__ _val;                                                    \
112   model_rmw_action_helper(addr, atomic_index, (uint64_t) _copy);        \
113   return _old;                                                          \
114 })
115
116 // cds atomic exchange
117 uint8_t cds_atomic_exchange8(void* addr, int atomic_index, uint8_t val) {
118   _ATOMIC_RMW_( = , 8, addr, atomic_index, val);
119 }
120 uint16_t cds_atomic_exchange16(void* addr, int atomic_index, uint16_t val) {
121   _ATOMIC_RMW_( = , 16, addr, atomic_index, val);
122 }
123 uint32_t cds_atomic_exchange32(void* addr, int atomic_index, uint32_t val) {
124   _ATOMIC_RMW_( = , 32, addr, atomic_index, val);
125 }
126 uint64_t cds_atomic_exchange64(void* addr, int atomic_index, uint64_t val) {
127   _ATOMIC_RMW_( = , 64, addr, atomic_index, val);
128 }
129
130 // cds atomic fetch add
131 uint8_t cds_atomic_fetch_add8(void* addr, int atomic_index, uint8_t val) {
132   _ATOMIC_RMW_( += , 8, addr, atomic_index, val);
133 }
134 uint16_t cds_atomic_fetch_add16(void* addr, int atomic_index, uint16_t val) {
135   _ATOMIC_RMW_( += , 16, addr, atomic_index, val);
136 }
137 uint32_t cds_atomic_fetch_add32(void* addr, int atomic_index, uint32_t val) {
138   _ATOMIC_RMW_( += , 32, addr, atomic_index, val);
139 }
140 uint64_t cds_atomic_fetch_add64(void* addr, int atomic_index, uint64_t val) {
141   _ATOMIC_RMW_( += , 64, addr, atomic_index, val);
142 }
143
144 // cds atomic fetch sub
145 uint8_t cds_atomic_fetch_sub8(void* addr, int atomic_index, uint8_t val) {
146   _ATOMIC_RMW_( -= , 8, addr, atomic_index, val);
147 }
148 uint16_t cds_atomic_fetch_sub16(void* addr, int atomic_index, uint16_t val) {
149   _ATOMIC_RMW_( -= , 16, addr, atomic_index, val);
150 }
151 uint32_t cds_atomic_fetch_sub32(void* addr, int atomic_index, uint32_t val) {
152   _ATOMIC_RMW_( -= , 32, addr, atomic_index, val);
153 }
154 uint64_t cds_atomic_fetch_sub64(void* addr, int atomic_index, uint64_t val) {
155   _ATOMIC_RMW_( -= , 64, addr, atomic_index, val);
156 }
157
158 // cds atomic fetch and
159 uint8_t cds_atomic_fetch_and8(void* addr, int atomic_index, uint8_t val) {
160   _ATOMIC_RMW_( &= , 8, addr, atomic_index, val);
161 }
162 uint16_t cds_atomic_fetch_and16(void* addr, int atomic_index, uint16_t val) {
163   _ATOMIC_RMW_( &= , 16, addr, atomic_index, val);
164 }
165 uint32_t cds_atomic_fetch_and32(void* addr, int atomic_index, uint32_t val) {
166   _ATOMIC_RMW_( &= , 32, addr, atomic_index, val);
167 }
168 uint64_t cds_atomic_fetch_and64(void* addr, int atomic_index, uint64_t val) {
169   _ATOMIC_RMW_( &= , 64, addr, atomic_index, val);
170 }
171
172 // cds atomic fetch or
173 uint8_t cds_atomic_fetch_or8(void* addr, int atomic_index, uint8_t val) {
174   _ATOMIC_RMW_( |= , 8, addr, atomic_index, val);
175 }
176 uint16_t cds_atomic_fetch_or16(void* addr, int atomic_index, uint16_t val) {
177   _ATOMIC_RMW_( |= , 16, addr, atomic_index, val);
178 }
179 uint32_t cds_atomic_fetch_or32(void* addr, int atomic_index, uint32_t val) {
180   _ATOMIC_RMW_( |= , 32, addr, atomic_index, val);
181 }
182 uint64_t cds_atomic_fetch_or64(void* addr, int atomic_index, uint64_t val) {
183   _ATOMIC_RMW_( |= , 64, addr, atomic_index, val);
184 }
185
186 // cds atomic fetch xor
187 uint8_t cds_atomic_fetch_xor8(void* addr, int atomic_index, uint8_t val) {
188   _ATOMIC_RMW_( ^= , 8, addr, atomic_index, val);
189 }
190 uint16_t cds_atomic_fetch_xor16(void* addr, int atomic_index, uint16_t val) {
191   _ATOMIC_RMW_( ^= , 16, addr, atomic_index, val);
192 }
193 uint32_t cds_atomic_fetch_xor32(void* addr, int atomic_index, uint32_t val) {
194   _ATOMIC_RMW_( ^= , 32, addr, atomic_index, val);
195 }
196 uint64_t cds_atomic_fetch_xor64(void* addr, int atomic_index, uint64_t val) {
197   _ATOMIC_RMW_( ^= , 64, addr, atomic_index, val);
198 }
199
200 // cds atomic compare and exchange
201 // In order to accomodate the LLVM PASS, the return values are not true or false. 
202
203 #define _ATOMIC_CMPSWP_WEAK_ _ATOMIC_CMPSWP_
204 #define _ATOMIC_CMPSWP_(size, addr, expected, desired, atomic_index)                            \
205 ({                                                                                              \
206   uint##size##_t _desired = desired;                                                            \
207   uint##size##_t _expected = expected;                                                          \
208   uint##size##_t _old = model_rmwr_action_helper(addr, atomic_index);                           \
209   if (_old == _expected  ) {                                                                    \
210     model_rmw_action_helper(addr, atomic_index, (uint64_t) _desired ); return _expected; }      \
211   else {                                                                                        \
212     model_rmwc_action_helper(addr, atomic_index); _expected = _old; return _old; }              \
213 })
214
215 // expected is supposed to be a pointer to an address, but the CmpOperand
216 // extracted from LLVM IR is an integer type. 
217
218 uint8_t cds_atomic_compare_exchange8(void* addr, uint8_t expected, 
219                 uint8_t desired, int atomic_index_succ, int atomic_index_fail ) {
220   _ATOMIC_CMPSWP_(8, addr, expected, desired, atomic_index_succ );
221 }
222 uint16_t cds_atomic_compare_exchange16(void* addr, uint16_t expected,
223                 uint16_t desired, int atomic_index_succ, int atomic_index_fail ) {
224   _ATOMIC_CMPSWP_(16, addr, expected, desired, atomic_index_succ );
225 }
226 uint32_t cds_atomic_compare_exchange32(void* addr, uint32_t expected, 
227                 uint32_t desired, int atomic_index_succ, int atomic_index_fail ) {
228   _ATOMIC_CMPSWP_(32, addr, expected, desired, atomic_index_succ );
229 }
230 uint64_t cds_atomic_compare_exchange64(void* addr, uint64_t expected, 
231                 uint64_t desired, int atomic_index_succ, int atomic_index_fail ) {
232   _ATOMIC_CMPSWP_(64, addr, expected, desired, atomic_index_succ );
233 }
234
235 // cds atomic thread fence
236
237 void cds_atomic_thread_fence(int atomic_index) {
238         model->switch_to_master(new ModelAction(ATOMIC_FENCE, orders[atomic_index], FENCE_LOCATION));
239 }
240
241 /*
242 #define _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ )                         \
243         ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__);   \
244                 __typeof__(__e__) __q__ = (__e__);                            \
245                 __typeof__(__m__) __v__ = (__m__);                            \
246                 bool __r__;                                                   \
247                 __typeof__((__a__)->__f__) __t__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__); \
248                 if (__t__ == * __q__ ) {                                      \
249                         model_rmw_action((void *)__p__, __x__, (uint64_t) __v__); __r__ = true; } \
250                 else {  model_rmwc_action((void *)__p__, __x__); *__q__ = __t__;  __r__ = false;} \
251                 __r__; })
252
253 #define _ATOMIC_FENCE_( __x__ ) \
254         ({ model_fence_action(__x__);})
255 */
256
257 /*
258
259 #define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ )                         \
260         ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__);   \
261         __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__); \
262         __typeof__(__m__) __v__ = (__m__);                                    \
263         __typeof__((__a__)->__f__) __copy__= __old__;                         \
264         __copy__ __o__ __v__;                                                 \
265         model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__);          \
266         __old__ = __old__;  Silence clang (-Wunused-value)                    \
267          })                           
268 */