edits
[cdsspec-compiler.git] / benchmark / linuxrwlocks / testcase2.c
1 #include <stdio.h>
2 #include <threads.h>
3 #include <stdatomic.h>
4
5 #include <spec_lib.h>
6 #include <stdlib.h>
7 #include <cdsannotate.h>
8 #include <specannotation.h>
9 #include <model_memory.h>
10 #include "common.h"
11
12 #include "librace.h"
13
14 #define RW_LOCK_BIAS            0x00100000
15 #define WRITE_LOCK_CMP          RW_LOCK_BIAS
16
17 typedef union {
18         atomic_int lock;
19 } rwlock_t;
20
21 /** Example implementation of linux rw lock along with 2 thread test
22  *  driver... */
23
24 /**
25         Properties to check:
26         1. At most 1 thread can acquire the write lock, and at the same time, no
27                 other threads can acquire any lock (including read/write lock).
28         2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
29         3. A read_unlock release 1 read lock, and a write_unlock release the write
30         lock. They can not release a lock that they don't acquire.
31         ###
32         4. Read_lock and write_lock can not be grabbed at the same time.
33         5. Happpens-before relationship should be checked and guaranteed, which
34         should be as the following:
35                 a. read_unlock hb-> write_lock
36                 b. write_unlock hb-> write_lock
37                 c. write_unlock hb-> read_lock
38 */
39
40 /**
41         Interesting point for locks:
42         a. If the users unlock() before any lock(), then the model checker will fail.
43         For this case, we can not say that the data structure is buggy, how can we
44         tell them from a real data structure bug???
45         b. We should specify that for a specific thread, successful locks and
46         unlocks should always come in pairs. We could make this check as an
47         auxiliary check since it is an extra rule for how the interfaces should called.
48 */
49
50 /**
51         @Begin
52         @Options:
53                 LANG = C;
54         @Global_define:
55                 @DeclareVar:
56                         bool writer_lock_acquired;
57                         int reader_lock_cnt;
58                 @InitVar:
59                         writer_lock_acquired = false;
60                         reader_lock_cnt = 0;
61         @Happens_before:
62                 # Since commit_point_set has no ID attached, A -> B means that for any B,
63                 # the previous A happens before B.
64                 #Read_Unlock -> Read_Lock
65                 Read_Unlock -> Write_Lock
66                 Read_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
67                 #Read_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
68
69                 Write_Unlock -> Write_Lock
70                 Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
71
72                 Write_Unlock -> Read_Lock
73                 Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
74         
75         @Commutativity: Read_Lock <-> Read_Lock: true
76         @Commutativity: Read_Lock <-> Read_Unlock: true
77         @Commutativity: Read_Lock <-> Read_Trylock: true
78         @Commutativity: Read_Lock <-> Write_Trylock: !_Method2.__RET__
79         
80         @Commutativity: Read_Unlock <-> Read_Unlock: true
81         @Commutativity: Read_Unlock <-> Read_Trylock: true
82         @Commutativity: Read_Unlock <-> Write_Trylock: !_Method2.__RET__
83
84         @Commutativity: Read_Trylock <-> Read_Trylock: true
85         @Commutativity: Read_Trylock <-> Write_Trylock: !_Method1.__RET__ || !_Method2.__RET__
86         @Commutativity: Read_Trylock <-> Write_Lock: !_Method1.__RET__
87         @Commutativity: Read_Trylock <-> Write_Unlock: !_Method1.__RET__
88
89         @Commutativity: Write_Trylock <-> Write_Trylock: !_Method1.__RET__ || !_Method2.__RET__
90         @Commutativity: Write_Trylock <-> Write_Unlock: !_Method1.__RET__
91         @Commutativity: Write_Trylock <-> Write_Lock: !_Method1.__RET__
92         @End
93 */
94
95 static inline int read_can_lock(rwlock_t *lock)
96 {
97         return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
98 }
99
100 static inline int write_can_lock(rwlock_t *lock)
101 {
102         return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
103 }
104
105
106 /**
107         @Begin
108         @Interface: Read_Lock
109         @Commit_point_set: Read_Lock_Success_1 | Read_Lock_Success_2
110         @Check: !writer_lock_acquired
111         @Action: reader_lock_cnt++;
112         @End
113 */
114 static inline void read_lock(rwlock_t *rw)
115 {
116         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
117         /**
118                 @Begin
119                 @Commit_point_define_check: priorvalue > 0
120                 @Label:Read_Lock_Success_1
121                 @End
122         */
123         while (priorvalue <= 0) {
124                 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
125                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
126                         thrd_yield();
127                 }
128                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
129                 /**
130                         @Begin
131                         @Commit_point_define_check: priorvalue > 0
132                         @Label:Read_Lock_Success_2
133                         @End
134                 */
135         }
136 }
137
138
139 /**
140         @Begin
141         @Interface: Write_Lock
142         @Commit_point_set: Write_Lock_Success_1 | Write_Lock_Success_2
143         @Check: !writer_lock_acquired && reader_lock_cnt == 0
144         @Action: writer_lock_acquired = true;
145         @End
146 */
147 static inline void write_lock(rwlock_t *rw)
148 {
149         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
150         /**
151                 @Begin
152                 @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
153                 @Label: Write_Lock_Success_1
154                 @End
155         */
156         while (priorvalue != RW_LOCK_BIAS) {
157                 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
158                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
159                         thrd_yield();
160                 }
161                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
162                 /**
163                         @Begin
164                         @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
165                         @Label: Write_Lock_Success_2
166                         @End
167                 */
168         }
169 }
170
171 /**
172         @Begin
173         @Interface: Read_Trylock
174         @Commit_point_set: Read_Trylock_Succ_Point | Read_Trylock_Fail_Point
175         //@Condition: writer_lock_acquired == false
176         @HB_condition: HB_Read_Trylock_Succ :: __RET__ == 1
177         @Action:
178                 if (__RET__)
179                         reader_lock_cnt++;
180         @Post_check: __RET__ == !writer_lock_acquired || !__RET__
181         @End
182 */
183 static inline int read_trylock(rwlock_t *rw)
184 {
185         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
186         /**
187                 @Begin
188                 @Potential_commit_point_define: true
189                 @Label: Potential_Read_Trylock_Point
190                 @End
191         */
192         if (priorvalue > 0) {
193                 /**
194                         @Begin
195                         @Commit_point_define: true
196                         @Potential_commit_point_label: Potential_Read_Trylock_Point
197                         @Label:  Read_Trylock_Succ_Point
198                         @End
199                 */
200                 return 1;
201         }
202         /**
203                 @Begin
204                 @Commit_point_define: true
205                 @Potential_commit_point_label: Potential_Read_Trylock_Point
206                 @Label:  Read_Trylock_Fail_Point
207                 @End
208         */
209         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
210         return 0;
211 }
212
213 /**
214         @Begin
215         @Interface: Write_Trylock
216         @Commit_point_set: Write_Trylock_Succ_Point | Write_Trylock_Fail_Point
217         @HB_condition: HB_Write_Trylock_Succ :: __RET__ == 1
218         @Action:
219                 if (__RET__ == 1)
220                         writer_lock_acquired = true;
221         @End
222 */
223 static inline int write_trylock(rwlock_t *rw)
224 {
225         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
226         //model_print("write_trylock: priorvalue: %d\n", priorvalue);
227         /**
228                 @Begin
229                 @Potential_commit_point_define: true
230                 @Label: Potential_Write_Trylock_Point
231                 @End
232         */
233         if (priorvalue == RW_LOCK_BIAS) {
234                 /**
235                         @Begin
236                         @Commit_point_define: true
237                         @Potential_commit_point_label: Potential_Write_Trylock_Point
238                         @Label: Write_Trylock_Succ_Point
239                         @End
240                 */
241                 return 1;
242         }
243         /**
244                 @Begin
245                 @Commit_point_define: true
246                 @Potential_commit_point_label: Potential_Write_Trylock_Point
247                 @Label: Write_Trylock_Fail_Point
248                 @End
249         */
250         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
251         return 0;
252 }
253
254 /**
255         @Begin
256         @Interface: Read_Unlock
257         @Commit_point_set: Read_Unlock_Point
258         @Check: reader_lock_cnt > 0 && !writer_lock_acquired
259         @Action: reader_lock_cnt--;
260         @End
261 */
262 static inline void read_unlock(rwlock_t *rw)
263 {
264         /**********  Admissibility  **********/
265         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
266         /**
267                 @Begin
268                 @Commit_point_define_check: true
269                 @Label: Read_Unlock_Point
270                 @End
271         */
272 }
273
274 /**
275         @Begin
276         @Interface: Write_Unlock
277         @Commit_point_set: Write_Unlock_Point | Write_Unlock_Clear
278         @Check: reader_lock_cnt == 0 && writer_lock_acquired
279         @Action: writer_lock_acquired = false;
280         @End
281 */
282 static inline void write_unlock(rwlock_t *rw)
283 {
284         /**********  Admissibility  **********/
285         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
286         /**
287                 @Begin
288                 @Commit_point_define_check: true
289                 @Label: Write_Unlock_Point
290                 @End
291         */
292
293         /**
294                 //@Begin
295                 @Commit_point_clear: true
296                 @Label: Write_Unlock_Clear
297                 @End
298         */
299 }
300
301 rwlock_t mylock;
302 int shareddata;
303
304 static void a(void *obj)
305 {
306         write_lock(&mylock);
307         //store_32(&shareddata,(unsigned int)i);
308         shareddata = 47;
309         write_unlock(&mylock);
310 }
311
312 static void b(void *obj)
313 {
314         if (write_trylock(&mylock)) {
315                 //store_32(&shareddata,(unsigned int)i);
316                 shareddata = 47;
317                 write_unlock(&mylock);
318         }
319
320         read_lock(&mylock);
321         //load_32(&shareddata);
322         //printf("%d\n", shareddata);
323         read_unlock(&mylock);
324 }
325
326 int user_main(int argc, char **argv)
327 {
328         /**
329                 @Begin
330                 @Entry_point
331                 @End
332         */
333         thrd_t t1, t2;
334         atomic_init(&mylock.lock, RW_LOCK_BIAS);
335
336         thrd_create(&t1, (thrd_start_t)&a, NULL);
337         thrd_create(&t2, (thrd_start_t)&b, NULL);
338
339         thrd_join(t1);
340         thrd_join(t2);
341
342         return 0;
343 }