1f7059f828004926926c0b0682b52cf909804c9d
[cdsspec-compiler.git] / benchmark / linuxrwlocks / linuxrwlocks.c
1 #include <stdio.h>
2 #include <threads.h>
3 #include <stdatomic.h>
4
5 #include <spec_lib.h>
6 #include <stdlib.h>
7 #include <cdsannotate.h>
8 #include <specannotation.h>
9 #include <model_memory.h>
10 #include "common.h"
11
12 #include "librace.h"
13
14 #define RW_LOCK_BIAS            0x00100000
15 #define WRITE_LOCK_CMP          RW_LOCK_BIAS
16
17 typedef union {
18         atomic_int lock;
19 } rwlock_t;
20
21 /** Example implementation of linux rw lock along with 2 thread test
22  *  driver... */
23
24 /**
25         Properties to check:
26         1. At most 1 thread can acquire the write lock, and at the same time, no
27                 other threads can acquire any lock (including read/write lock).
28         2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
29         3. A read_unlock release 1 read lock, and a write_unlock release the write
30         lock. They can not release a lock that they don't acquire.
31         ###
32         4. Read_lock and write_lock can not be grabbed at the same time.
33         5. Happpens-before relationship should be checked and guaranteed, which
34         should be as the following:
35                 a. read_unlock hb-> write_lock
36                 b. write_unlock hb-> write_lock
37                 c. write_unlock hb-> read_lock
38 */
39
40 /**
41         Interesting point for locks:
42         a. If the users unlock() before any lock(), then the model checker will fail.
43         For this case, we can not say that the data structure is buggy, how can we
44         tell them from a real data structure bug???
45         b. We should specify that for a specific thread, successful locks and
46         unlocks should always come in pairs. We could make this check as an
47         auxiliary check since it is an extra rule for how the interfaces should called.
48 */
49
50 /**
51         @Begin
52         @Options:
53                 LANG = C;
54         @Global_define:
55                 @DeclareVar:
56                         bool writer_lock_acquired;
57                         int reader_lock_cnt;
58                 @InitVar:
59                         writer_lock_acquired = false;
60                         reader_lock_cnt = 0;
61         @Happens_before:
62                 # Since commit_point_set has no ID attached, A -> B means that for any B,
63                 # the previous A happens before B.
64                 #Read_Unlock -> Read_Lock
65                 Read_Unlock -> Write_Lock
66                 Read_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
67                 #Read_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
68
69                 Write_Unlock -> Write_Lock
70                 Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
71
72                 Write_Unlock -> Read_Lock
73                 Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
74         
75         @Commutativity: Read_Lock <-> Read_Lock: true
76         @Commutativity: Read_Lock <-> Read_Unlock: true
77         @Commutativity: Read_Lock <-> Read_Trylock: true
78         @Commutativity: Read_Lock <-> Write_Trylock: !_Method2.__RET__
79         
80         @Commutativity: Read_Unlock <-> Read_Unlock: true
81         @Commutativity: Read_Unlock <-> Read_Trylock: true
82         @Commutativity: Read_Unlock <-> Write_Trylock: !_Method2.__RET__
83
84         @Commutativity: Read_Trylock <-> Read_Trylock: true
85         @Commutativity: Read_Trylock <-> Write_Trylock: !_Method1.__RET__ || !_Method2.__RET__
86         @Commutativity: Read_Trylock <-> Write_Lock: !_Method1.__RET__
87         @Commutativity: Read_Trylock <-> Write_Unlock: !_Method1.__RET__
88
89         @Commutativity: Write_Trylock <-> Write_Trylock: !_Method1.__RET__ || !_Method2.__RET__
90         @Commutativity: Write_Trylock <-> Write_Unlock: !_Method1.__RET__
91         @Commutativity: Write_Trylock <-> Write_Lock: !_Method1.__RET__
92         @End
93 */
94
95 static inline int read_can_lock(rwlock_t *lock)
96 {
97         return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
98 }
99
100 static inline int write_can_lock(rwlock_t *lock)
101 {
102         return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
103 }
104
105
106 /**
107         @Begin
108         @Interface: Read_Lock
109         @Commit_point_set: Read_Lock_Success_1 | Read_Lock_Success_2
110         @Check: !writer_lock_acquired
111         @Action: reader_lock_cnt++;
112         @End
113 */
114 static inline void read_lock(rwlock_t *rw)
115 {
116         
117         /**********  Admissibility  **********/
118         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
119         /**
120                 @Begin
121                 @Commit_point_define_check: priorvalue > 0
122                 @Label:Read_Lock_Success_1
123                 @End
124         */
125         while (priorvalue <= 0) {
126                 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
127                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
128                         thrd_yield();
129                 }
130                 /**********  Admissibility  **********/
131                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
132                 /**
133                         @Begin
134                         @Commit_point_define_check: priorvalue > 0
135                         @Label:Read_Lock_Success_2
136                         @End
137                 */
138         }
139 }
140
141
142 /**
143         @Begin
144         @Interface: Write_Lock
145         @Commit_point_set: Write_Lock_Success_1 | Write_Lock_Success_2
146         @Check: !writer_lock_acquired && reader_lock_cnt == 0
147         @Action: writer_lock_acquired = true;
148         @End
149 */
150 static inline void write_lock(rwlock_t *rw)
151 {
152         /**********  Admissibility  **********/
153         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
154         /**
155                 @Begin
156                 @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
157                 @Label: Write_Lock_Success_1
158                 @End
159         */
160         while (priorvalue != RW_LOCK_BIAS) {
161                 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
162                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
163                         thrd_yield();
164                 }
165                 /**********  Admissibility  **********/
166                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
167                 /**
168                         @Begin
169                         @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
170                         @Label: Write_Lock_Success_2
171                         @End
172                 */
173         }
174 }
175
176 /**
177         @Begin
178         @Interface: Read_Trylock
179         @Commit_point_set: Read_Trylock_Succ_Point | Read_Trylock_Fail_Point
180         //@Condition: writer_lock_acquired == false
181         @HB_condition: HB_Read_Trylock_Succ :: __RET__ == 1
182         @Action:
183                 if (__RET__)
184                         reader_lock_cnt++;
185         @Post_check: __RET__ == !writer_lock_acquired || !__RET__
186         @End
187 */
188 static inline int read_trylock(rwlock_t *rw)
189 {
190         /**********  Admissibility  **********/
191         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
192         /**
193                 @Begin
194                 @Potential_commit_point_define: true
195                 @Label: Potential_Read_Trylock_Point
196                 @End
197         */
198         if (priorvalue > 0) {
199                 /**
200                         @Begin
201                         @Commit_point_define: true
202                         @Potential_commit_point_label: Potential_Read_Trylock_Point
203                         @Label:  Read_Trylock_Succ_Point
204                         @End
205                 */
206                 return 1;
207         }
208         /**
209                 @Begin
210                 @Commit_point_define: true
211                 @Potential_commit_point_label: Potential_Read_Trylock_Point
212                 @Label:  Read_Trylock_Fail_Point
213                 @End
214         */
215         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
216         return 0;
217 }
218
219 /**
220         @Begin
221         @Interface: Write_Trylock
222         @Commit_point_set: Write_Trylock_Succ_Point | Write_Trylock_Fail_Point
223         @HB_condition: HB_Write_Trylock_Succ :: __RET__ == 1
224         @Action:
225                 if (__RET__ == 1)
226                         writer_lock_acquired = true;
227         @End
228 */
229 static inline int write_trylock(rwlock_t *rw)
230 {
231         /**********  Admissibility  **********/
232         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
233         //model_print("write_trylock: priorvalue: %d\n", priorvalue);
234         /**
235                 @Begin
236                 @Potential_commit_point_define: true
237                 @Label: Potential_Write_Trylock_Point
238                 @End
239         */
240         if (priorvalue == RW_LOCK_BIAS) {
241                 /**
242                         @Begin
243                         @Commit_point_define: true
244                         @Potential_commit_point_label: Potential_Write_Trylock_Point
245                         @Label: Write_Trylock_Succ_Point
246                         @End
247                 */
248                 return 1;
249         }
250         /**
251                 @Begin
252                 @Commit_point_define: true
253                 @Potential_commit_point_label: Potential_Write_Trylock_Point
254                 @Label: Write_Trylock_Fail_Point
255                 @End
256         */
257         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
258         return 0;
259 }
260
261 /**
262         @Begin
263         @Interface: Read_Unlock
264         @Commit_point_set: Read_Unlock_Point
265         @Check: reader_lock_cnt > 0 && !writer_lock_acquired
266         @Action: reader_lock_cnt--;
267         @End
268 */
269 static inline void read_unlock(rwlock_t *rw)
270 {
271         /**********  Admissibility  **********/
272         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
273         /**
274                 @Begin
275                 @Commit_point_define_check: true
276                 @Label: Read_Unlock_Point
277                 @End
278         */
279 }
280
281 /**
282         @Begin
283         @Interface: Write_Unlock
284         @Commit_point_set: Write_Unlock_Point | Write_Unlock_Clear
285         @Check: reader_lock_cnt == 0 && writer_lock_acquired
286         @Action: writer_lock_acquired = false;
287         @End
288 */
289 static inline void write_unlock(rwlock_t *rw)
290 {
291         /**********  Admissibility  **********/
292         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
293         /**
294                 @Begin
295                 @Commit_point_define_check: true
296                 @Label: Write_Unlock_Point
297                 @End
298         */
299
300         /**
301                 //@Begin
302                 @Commit_point_clear: true
303                 @Label: Write_Unlock_Clear
304                 @End
305         */
306 }
307
308 rwlock_t mylock;
309 int shareddata;
310
311 static void a(void *obj)
312 {
313         read_lock(&mylock);
314         //load_32(&shareddata);
315         //printf("%d\n", shareddata);
316         read_unlock(&mylock);
317         
318         write_lock(&mylock);
319         //store_32(&shareddata,(unsigned int)i);
320         shareddata = 47;
321         write_unlock(&mylock);
322 }
323
324 static void b(void *obj)
325 {
326         if (read_trylock(&mylock) == 1) {
327                 //printf("%d\n", shareddata);
328                 read_unlock(&mylock);
329         }
330         
331         if (write_trylock(&mylock) == 1) {
332                 //shareddata = 47;
333                 write_unlock(&mylock);
334         }
335 }
336
337 int user_main(int argc, char **argv)
338 {
339         /**
340                 @Begin
341                 @Entry_point
342                 @End
343         */
344         thrd_t t1, t2;
345         atomic_init(&mylock.lock, RW_LOCK_BIAS);
346
347         thrd_create(&t1, (thrd_start_t)&a, NULL);
348         thrd_create(&t2, (thrd_start_t)&b, NULL);
349
350         thrd_join(t1);
351         thrd_join(t2);
352
353         return 0;
354 }