add support for function calls 'atomic_init' and 'atomic_compare_exchange()'
[c11tester.git] / cmodelint.cc
1 #include <stdio.h>
2 #include "model.h"
3 #include "action.h"
4 #include "cmodelint.h"
5 #include "threads-model.h"
6
7 memory_order orders[6] = {
8         memory_order_relaxed, memory_order_consume, memory_order_acquire,
9         memory_order_release, memory_order_acq_rel, memory_order_seq_cst
10 };
11
12 /** Performs a read action.*/
13 uint64_t model_read_action(void * obj, memory_order ord) {
14         return model->switch_to_master(new ModelAction(ATOMIC_READ, ord, obj));
15 }
16
17 /** Performs a write action.*/
18 void model_write_action(void * obj, memory_order ord, uint64_t val) {
19         model->switch_to_master(new ModelAction(ATOMIC_WRITE, ord, obj, val));
20 }
21
22 /** Performs an init action. */
23 void model_init_action(void * obj, uint64_t val) {
24         model->switch_to_master(new ModelAction(ATOMIC_INIT, memory_order_relaxed, obj, val));
25 }
26
27 /**
28  * Performs the read part of a RMW action. The next action must either be the
29  * write part of the RMW action or an explicit close out of the RMW action w/o
30  * a write.
31  */
32 uint64_t model_rmwr_action(void *obj, memory_order ord) {
33         return model->switch_to_master(new ModelAction(ATOMIC_RMWR, ord, obj));
34 }
35
36 /** Performs the write part of a RMW action. */
37 void model_rmw_action(void *obj, memory_order ord, uint64_t val) {
38         model->switch_to_master(new ModelAction(ATOMIC_RMW, ord, obj, val));
39 }
40
41 /** Closes out a RMW action without doing a write. */
42 void model_rmwc_action(void *obj, memory_order ord) {
43         model->switch_to_master(new ModelAction(ATOMIC_RMWC, ord, obj));
44 }
45
46 /** Issues a fence operation. */
47 void model_fence_action(memory_order ord) {
48         model->switch_to_master(new ModelAction(ATOMIC_FENCE, ord, FENCE_LOCATION));
49 }
50
51 // --------------------- helper functions --------------------------------
52 uint64_t model_rmwr_action_helper(void *obj, int atomic_index) {
53         return model->switch_to_master(new ModelAction(ATOMIC_RMWR, orders[atomic_index], obj));
54 }
55
56 void model_rmw_action_helper(void *obj, int atomic_index, uint64_t val) {
57         model->switch_to_master(new ModelAction(ATOMIC_RMW, orders[atomic_index], obj, val));
58 }
59
60 void model_rmwc_action_helper(void *obj, int atomic_index) {
61         model->switch_to_master(new ModelAction(ATOMIC_RMWC, orders[atomic_index], obj));
62 }
63
64 void model_fence_action_helper(int atomic_index) {
65         model->switch_to_master(new ModelAction(ATOMIC_FENCE, orders[atomic_index], FENCE_LOCATION));
66 }
67
68 // cds atomic inits
69 void cds_atomic_init8(void * obj, uint8_t val) {
70         model->switch_to_master(new ModelAction(ATOMIC_INIT, memory_order_relaxed, obj, (uint64_t) val));
71 }
72 void cds_atomic_init16(void * obj, uint16_t val) {
73         model->switch_to_master(new ModelAction(ATOMIC_INIT, memory_order_relaxed, obj, (uint64_t) val));
74 }
75 void cds_atomic_init32(void * obj, uint32_t val) {
76         model->switch_to_master(new ModelAction(ATOMIC_INIT, memory_order_relaxed, obj, (uint64_t) val));
77 }
78 void cds_atomic_init64(void * obj, uint64_t val) {
79         model->switch_to_master(new ModelAction(ATOMIC_INIT, memory_order_relaxed, obj, val));
80 }
81
82
83 // cds atomic loads
84 uint8_t cds_atomic_load8(void * obj, int atomic_index) {        
85         return (uint8_t) ( model->switch_to_master(new ModelAction(ATOMIC_READ, orders[atomic_index], obj)) );
86 }
87 uint16_t cds_atomic_load16(void * obj, int atomic_index) {
88         return (uint16_t) ( model->switch_to_master(new ModelAction(ATOMIC_READ, orders[atomic_index], obj)) );
89 }
90 uint32_t cds_atomic_load32(void * obj, int atomic_index) {
91         return (uint32_t) ( model->switch_to_master(new ModelAction(ATOMIC_READ, orders[atomic_index], obj)) );
92 }
93 uint64_t cds_atomic_load64(void * obj, int atomic_index) {
94         return model->switch_to_master(new ModelAction(ATOMIC_READ, orders[atomic_index], obj));
95 }
96
97 // cds atomic stores
98 void cds_atomic_store8(void * obj, uint8_t val, int atomic_index) {
99         model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, (uint64_t) val));
100 }
101 void cds_atomic_store16(void * obj, uint16_t val, int atomic_index) {
102         model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, (uint64_t) val));
103 }
104 void cds_atomic_store32(void * obj, uint32_t val, int atomic_index) {
105         model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, (uint64_t) val));
106 }
107 void cds_atomic_store64(void * obj, uint64_t val, int atomic_index) {
108         model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, val));
109 }
110
111 /*
112 #define _ATOMIC_RMW_(__op__, size, addr, atomic_index, val )            \
113 ({                                                                      \
114   uint##size##_t _old = model_rmwr_action_helper(addr, atomic_index);   \
115   uint##size##_t _copy = _old;                                          \
116   _copy __op__ ( uint##size##_t ) _val;                                 \
117   model_rmw_action_helper(addr, atomic_index, (uint64_t) _copy);        \
118   return _old;                                                          \
119 })*/
120
121 #define _ATOMIC_RMW_(__op__, size, addr, atomic_index, val )            \
122 ({                                                                      \
123   uint##size##_t _old = model_rmwr_action_helper(addr, atomic_index);   \
124   uint##size##_t _copy = _old;                                          \
125   uint##size##_t _val = val;                                            \
126   _copy __op__ _val;                                                    \
127   model_rmw_action_helper(addr, atomic_index, (uint64_t) _copy);        \
128   return _old;                                                          \
129 })
130
131 // cds atomic exchange
132 uint8_t cds_atomic_exchange8(void* addr, uint8_t val, int atomic_index) {
133         _ATOMIC_RMW_( = , 8, addr, atomic_index, val);
134 }
135 uint16_t cds_atomic_exchange16(void* addr, uint16_t val, int atomic_index) {
136         _ATOMIC_RMW_( = , 16, addr, atomic_index, val);
137 }
138 uint32_t cds_atomic_exchange32(void* addr, uint32_t val, int atomic_index) {
139         _ATOMIC_RMW_( = , 32, addr, atomic_index, val);
140 }
141 uint64_t cds_atomic_exchange64(void* addr, uint64_t val, int atomic_index) {
142         _ATOMIC_RMW_( = , 64, addr, atomic_index, val);
143 }
144
145 // cds atomic fetch add
146 uint8_t cds_atomic_fetch_add8(void* addr, uint8_t val, int atomic_index) {
147         _ATOMIC_RMW_( += , 8, addr, atomic_index, val);
148 }
149 uint16_t cds_atomic_fetch_add16(void* addr, uint16_t val, int atomic_index) {
150         _ATOMIC_RMW_( += , 16, addr, atomic_index, val);
151 }
152 uint32_t cds_atomic_fetch_add32(void* addr, uint32_t val, int atomic_index) {
153         _ATOMIC_RMW_( += , 32, addr, atomic_index, val);
154 }
155 uint64_t cds_atomic_fetch_add64(void* addr, uint64_t val, int atomic_index) {
156         _ATOMIC_RMW_( += , 64, addr, atomic_index, val);
157 }
158
159 // cds atomic fetch sub
160 uint8_t cds_atomic_fetch_sub8(void* addr, uint8_t val, int atomic_index) {
161         _ATOMIC_RMW_( -= , 8, addr, atomic_index, val);
162 }
163 uint16_t cds_atomic_fetch_sub16(void* addr, uint16_t val, int atomic_index) {
164         _ATOMIC_RMW_( -= , 16, addr, atomic_index, val);
165 }
166 uint32_t cds_atomic_fetch_sub32(void* addr, uint32_t val, int atomic_index) {
167         _ATOMIC_RMW_( -= , 32, addr, atomic_index, val);
168 }
169 uint64_t cds_atomic_fetch_sub64(void* addr, uint64_t val, int atomic_index) {
170         _ATOMIC_RMW_( -= , 64, addr, atomic_index, val);
171 }
172
173 // cds atomic fetch and
174 uint8_t cds_atomic_fetch_and8(void* addr, uint8_t val, int atomic_index) {
175         _ATOMIC_RMW_( &= , 8, addr, atomic_index, val);
176 }
177 uint16_t cds_atomic_fetch_and16(void* addr, uint16_t val, int atomic_index) {
178         _ATOMIC_RMW_( &= , 16, addr, atomic_index, val);
179 }
180 uint32_t cds_atomic_fetch_and32(void* addr, uint32_t val, int atomic_index) {
181         _ATOMIC_RMW_( &= , 32, addr, atomic_index, val);
182 }
183 uint64_t cds_atomic_fetch_and64(void* addr, uint64_t val, int atomic_index) {
184         _ATOMIC_RMW_( &= , 64, addr, atomic_index, val);
185 }
186
187 // cds atomic fetch or
188 uint8_t cds_atomic_fetch_or8(void* addr, uint8_t val, int atomic_index) {
189         _ATOMIC_RMW_( |= , 8, addr, atomic_index, val);
190 }
191 uint16_t cds_atomic_fetch_or16(void* addr, uint16_t val, int atomic_index) {
192         _ATOMIC_RMW_( |= , 16, addr, atomic_index, val);
193 }
194 uint32_t cds_atomic_fetch_or32(void* addr, uint32_t val, int atomic_index) {
195         _ATOMIC_RMW_( |= , 32, addr, atomic_index, val);
196 }
197 uint64_t cds_atomic_fetch_or64(void* addr, uint64_t val, int atomic_index) {
198         _ATOMIC_RMW_( |= , 64, addr, atomic_index, val);
199 }
200
201 // cds atomic fetch xor
202 uint8_t cds_atomic_fetch_xor8(void* addr, uint8_t val, int atomic_index) {
203         _ATOMIC_RMW_( ^= , 8, addr, atomic_index, val);
204 }
205 uint16_t cds_atomic_fetch_xor16(void* addr, uint16_t val, int atomic_index) {
206         _ATOMIC_RMW_( ^= , 16, addr, atomic_index, val);
207 }
208 uint32_t cds_atomic_fetch_xor32(void* addr, uint32_t val, int atomic_index) {
209         _ATOMIC_RMW_( ^= , 32, addr, atomic_index, val);
210 }
211 uint64_t cds_atomic_fetch_xor64(void* addr, uint64_t val, int atomic_index) {
212         _ATOMIC_RMW_( ^= , 64, addr, atomic_index, val);
213 }
214
215 // cds atomic compare and exchange
216 // In order to accomodate the LLVM PASS, the return values are not true or false. 
217
218 #define _ATOMIC_CMPSWP_WEAK_ _ATOMIC_CMPSWP_
219 #define _ATOMIC_CMPSWP_(size, addr, expected, desired, atomic_index)                            \
220 ({                                                                                              \
221   uint##size##_t _desired = desired;                                                            \
222   uint##size##_t _expected = expected;                                                          \
223   uint##size##_t _old = model_rmwr_action_helper(addr, atomic_index);                           \
224   if (_old == _expected  ) {                                                                    \
225     model_rmw_action_helper(addr, atomic_index, (uint64_t) _desired ); return _expected; }      \
226   else {                                                                                        \
227     model_rmwc_action_helper(addr, atomic_index); _expected = _old; return _old; }              \
228 })
229
230 // atomic_compare_exchange version 1: the CmpOperand (corresponds to expected)
231 // extracted from LLVM IR is an integer type. 
232
233 uint8_t cds_atomic_compare_exchange8_v1(void* addr, uint8_t expected, 
234                 uint8_t desired, int atomic_index_succ, int atomic_index_fail ) {
235         _ATOMIC_CMPSWP_(8, addr, expected, desired, atomic_index_succ );
236 }
237 uint16_t cds_atomic_compare_exchange16_v1(void* addr, uint16_t expected,
238                 uint16_t desired, int atomic_index_succ, int atomic_index_fail ) {
239         _ATOMIC_CMPSWP_(16, addr, expected, desired, atomic_index_succ );
240 }
241 uint32_t cds_atomic_compare_exchange32_v1(void* addr, uint32_t expected, 
242                 uint32_t desired, int atomic_index_succ, int atomic_index_fail ) {
243         _ATOMIC_CMPSWP_(32, addr, expected, desired, atomic_index_succ );
244 }
245 uint64_t cds_atomic_compare_exchange64_v1(void* addr, uint64_t expected, 
246                 uint64_t desired, int atomic_index_succ, int atomic_index_fail ) {
247         _ATOMIC_CMPSWP_(64, addr, expected, desired, atomic_index_succ );
248 }
249
250 // atomic_compare_exchange version 2
251 bool cds_atomic_compare_exchange8_v2(void* addr, uint8_t* expected, 
252                 uint8_t desired, int atomic_index_succ, int atomic_index_fail ) {
253         uint8_t ret = cds_atomic_compare_exchange8_v1(addr, *expected,
254                                 desired, atomic_index_succ, atomic_index_fail );
255         if (ret == *expected) return true;
256         else return false; 
257 }
258 bool cds_atomic_compare_exchange16_v2(void* addr, uint16_t* expected,
259                 uint16_t desired, int atomic_index_succ, int atomic_index_fail ) {
260         uint16_t ret = cds_atomic_compare_exchange16_v1(addr, *expected,
261                                 desired, atomic_index_succ, atomic_index_fail );
262         if (ret == *expected) return true;
263         else return false; 
264 }
265 bool cds_atomic_compare_exchange32_v2(void* addr, uint32_t* expected, 
266                 uint32_t desired, int atomic_index_succ, int atomic_index_fail ) {
267         uint32_t ret = cds_atomic_compare_exchange32_v1(addr, *expected,
268                                 desired, atomic_index_succ, atomic_index_fail );
269         if (ret == *expected) return true;
270         else return false; 
271 }
272 bool cds_atomic_compare_exchange64_v2(void* addr, uint64_t* expected, 
273                 uint64_t desired, int atomic_index_succ, int atomic_index_fail ) {
274         uint64_t ret = cds_atomic_compare_exchange64_v1(addr, *expected,
275                                 desired, atomic_index_succ, atomic_index_fail );
276         if (ret == *expected) return true;
277         else return false; 
278 }
279
280
281 // cds atomic thread fence
282
283 void cds_atomic_thread_fence(int atomic_index) {
284         model->switch_to_master(new ModelAction(ATOMIC_FENCE, orders[atomic_index], FENCE_LOCATION));
285 }
286
287 /*
288 #define _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ )                         \
289         ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__);   \
290                 __typeof__(__e__) __q__ = (__e__);                            \
291                 __typeof__(__m__) __v__ = (__m__);                            \
292                 bool __r__;                                                   \
293                 __typeof__((__a__)->__f__) __t__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__); \
294                 if (__t__ == * __q__ ) {                                      \
295                         model_rmw_action((void *)__p__, __x__, (uint64_t) __v__); __r__ = true; } \
296                 else {  model_rmwc_action((void *)__p__, __x__); *__q__ = __t__;  __r__ = false;} \
297                 __r__; })
298
299 #define _ATOMIC_FENCE_( __x__ ) \
300         ({ model_fence_action(__x__);})
301 */
302
303 /*
304
305 #define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ )                         \
306         ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__);   \
307         __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__); \
308         __typeof__(__m__) __v__ = (__m__);                                    \
309         __typeof__((__a__)->__f__) __copy__= __old__;                         \
310         __copy__ __o__ __v__;                                                 \
311         model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__);          \
312         __old__ = __old__;  Silence clang (-Wunused-value)                    \
313          })                           
314 */