save
[cdsspec-compiler.git] / benchmark / linuxrwlocks / linuxrwlocks.c
1 #include <stdio.h>
2 #include <threads.h>
3 #include <stdatomic.h>
4
5 #include <spec_lib.h>
6 #include <stdlib.h>
7 #include <cdsannotate.h>
8 #include <specannotation.h>
9 #include <model_memory.h>
10 #include "common.h"
11
12 #include "librace.h"
13
14 #define RW_LOCK_BIAS            0x00100000
15 #define WRITE_LOCK_CMP          RW_LOCK_BIAS
16
17 typedef union {
18         atomic_int lock;
19 } rwlock_t;
20
21 /** Example implementation of linux rw lock along with 2 thread test
22  *  driver... */
23
24 /**
25         Properties to check:
26         1. At most 1 thread can acquire the write lock, and at the same time, no
27                 other threads can acquire any lock (including read/write lock).
28         2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
29         3. A read_unlock release 1 read lock, and a write_unlock release the write
30         lock. They can not release a lock that they don't acquire.
31         ###
32         4. Read_lock and write_lock can not be grabbed at the same time.
33         5. Happpens-before relationship should be checked and guaranteed, which
34         should be as the following:
35                 a. read_unlock hb-> write_lock
36                 b. write_unlock hb-> write_lock
37                 c. write_unlock hb-> read_lock
38 */
39
40 /**
41         Interesting point for locks:
42         a. If the users unlock() before any lock(), then the model checker will fail.
43         For this case, we can not say that the data structure is buggy, how can we
44         tell them from a real data structure bug???
45         b. We should specify that for a specific thread, successful locks and
46         unlocks should always come in pairs. We could make this check as an
47         auxiliary check since it is an extra rule for how the interfaces should called.
48 */
49
50 /**
51         @Begin
52         @Options:
53                 LANG = C;
54         @Global_define:
55                 @DeclareVar:
56                         bool writer_lock_acquired;
57                         int reader_lock_cnt;
58                 @InitVar:
59                         writer_lock_acquired = false;
60                         reader_lock_cnt = 0;
61         @Happens_before:
62                 # Since commit_point_set has no ID attached, A -> B means that for any B,
63                 # the previous A happens before B.
64                 Write_Unlock -> Write_Lock
65                 Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
66
67                 Write_Unlock -> Read_Lock
68                 Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
69         @End
70 */
71
72 static inline int read_can_lock(rwlock_t *lock)
73 {
74         return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
75 }
76
77 static inline int write_can_lock(rwlock_t *lock)
78 {
79         return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
80 }
81
82
83 /**
84         @Begin
85         @Interface: Read_Lock
86         @Commit_point_set:
87                 Read_Lock_Success_1 | Read_Lock_Success_2
88         @Check:
89                 !writer_lock_acquired
90         @Action:
91                 reader_lock_cnt++;
92         @End
93 */
94 static inline void read_lock(rwlock_t *rw)
95 {
96         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
97         /**
98                 @Begin
99                 @Commit_point_define_check: priorvalue > 0
100                 @Label:Read_Lock_Success_1
101                 @End
102         */
103         while (priorvalue <= 0) {
104                 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
105                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
106                         thrd_yield();
107                 }
108                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
109                 /**
110                         @Begin
111                         @Commit_point_define_check: priorvalue > 0
112                         @Label:Read_Lock_Success_2
113                         @End
114                 */
115         }
116 }
117
118
119 /**
120         @Begin
121         @Interface: Write_Lock
122         @Commit_point_set:
123                 Write_Lock_Success_1 | Write_Lock_Success_2
124         @Check:
125                 !writer_lock_acquired && reader_lock_cnt == 0
126         @Action:
127                 writer_lock_acquired = true;
128         @End
129 */
130 static inline void write_lock(rwlock_t *rw)
131 {
132         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
133         /**
134                 @Begin
135                 @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
136                 @Label: Write_Lock_Success_1
137                 @End
138         */
139         while (priorvalue != RW_LOCK_BIAS) {
140                 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
141                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
142                         thrd_yield();
143                 }
144                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
145                 /**
146                         @Begin
147                         @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
148                         @Label: Write_Lock_Success_2
149                         @End
150                 */
151         }
152 }
153
154 /**
155         @Begin
156         @Interface: Read_Trylock
157         @Commit_point_set:
158                 Read_Trylock_Succ_Point | Read_Trylock_Fail_Point
159         @Condition:
160                 writer_lock_acquired == false
161         @HB_condition:
162                 HB_Read_Trylock_Succ :: __RET__ == 1
163         @Action:
164                 if (__COND_SAT__)
165                         reader_lock_cnt++;
166         @Post_check:
167                 __COND_SAT__ ? __RET__ == 1 : __RET__ == 0
168         @End
169 */
170 static inline int read_trylock(rwlock_t *rw)
171 {
172         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
173         /**
174                 @Begin
175                 @Potential_commit_point_define: true
176                 @Label: Potential_Read_Trylock_Point
177                 @End
178         */
179         if (priorvalue > 0) {
180                 /**
181                         @Begin
182                         @Commit_point_define: true
183                         @Potential_commit_point_label: Potential_Read_Trylock_Point
184                         @Label:  Read_Trylock_Succ_Point
185                         @End
186                 */
187                 return 1;
188         }
189         /**
190                 @Begin
191                 @Commit_point_define: true
192                 @Potential_commit_point_label: Potential_Read_Trylock_Point
193                 @Label:  Read_Trylock_Fail_Point
194                 @End
195         */
196         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
197         return 0;
198 }
199
200 /**
201         @Begin
202         @Interface: Write_Trylock
203         @Commit_point_set:
204                 Write_Trylock_Succ_Point | Write_Trylock_Fail_Point
205         @Condition:
206                 !writer_lock_acquired && reader_lock_cnt == 0
207         @HB_condition:
208                 HB_Write_Trylock_Succ :: __RET__ == 1
209         @Action:
210                 if (__COND_SAT__)
211                         writer_lock_acquired = true;
212         @Post_check:
213                 __COND_SAT__ ? __RET__ == 1 : __RET__ == 0
214         @End
215 */
216 static inline int write_trylock(rwlock_t *rw)
217 {
218         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
219         /**
220                 @Begin
221                 @Potential_commit_point_define: true
222                 @Label: Potential_Write_Trylock_Point
223                 @End
224         */
225         if (priorvalue == RW_LOCK_BIAS) {
226                 /**
227                         @Begin
228                         @Commit_point_define: true
229                         @Potential_commit_point_label: Potential_Write_Trylock_Point
230                         @Label: Write_Trylock_Succ_Point
231                         @End
232                 */
233                 return 1;
234         }
235         /**
236                 @Begin
237                 @Commit_point_define: true
238                 @Potential_commit_point_label: Potential_Write_Trylock_Point
239                 @Label: Write_Trylock_Fail_Point
240                 @End
241         */
242         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
243         return 0;
244 }
245
246 /**
247         @Begin
248         @Interface: Read_Unlock
249         @Commit_point_set: Read_Unlock_Point
250         @Check:
251                 reader_lock_cnt > 0 && !writer_lock_acquired
252         @Action: 
253                 reader_lock_cnt--;
254         @End
255 */
256 static inline void read_unlock(rwlock_t *rw)
257 {
258         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
259         /**
260                 @Begin
261                 @Commit_point_define_check: true
262                 @Label: Read_Unlock_Point
263                 @End
264         */
265 }
266
267 /**
268         @Begin
269         @Interface: Write_Unlock
270         @Commit_point_set: Write_Unlock_Point
271         @Check:
272                 reader_lock_cnt == 0 && writer_lock_acquired
273         @Action: 
274                 writer_lock_acquired = false;
275         @End
276 */
277 static inline void write_unlock(rwlock_t *rw)
278 {
279         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
280         /**
281                 @Begin
282                 @Commit_point_define_check: true
283                 @Label: Write_Unlock_Point
284                 @End
285         */
286 }
287
288 rwlock_t mylock;
289 int shareddata;
290
291 static void a(void *obj)
292 {
293         int i;
294         for(i = 0; i < 2; i++) {
295                 if ((i % 2) == 0) {
296                         read_lock(&mylock);
297                         //load_32(&shareddata);
298                         printf("%d\n", shareddata);
299                         read_unlock(&mylock);
300                 } else {
301                         write_lock(&mylock);
302                         //store_32(&shareddata,(unsigned int)i);
303                         shareddata = 47;
304                         write_unlock(&mylock);
305                 }
306         }
307 }
308
309 static void b(void *obj)
310 {
311         int i;
312         for(i = 0; i < 2; i++) {
313                 if ((i % 2) == 0) {
314                         if (read_trylock(&mylock) == 1) {
315                                 printf("%d\n", shareddata);
316                                 read_unlock(&mylock);
317                         }
318                 } else {
319                         if (write_trylock(&mylock) == 1) {
320                                 shareddata = 47;
321                                 write_unlock(&mylock);
322                         }
323                 }
324         }
325 }
326
327 int user_main(int argc, char **argv)
328 {
329         /**
330                 @Begin
331                 @Entry_point
332                 @End
333         */
334         thrd_t t1, t2;
335         atomic_init(&mylock.lock, RW_LOCK_BIAS);
336
337         thrd_create(&t1, (thrd_start_t)&a, NULL);
338         thrd_create(&t2, (thrd_start_t)&b, NULL);
339
340         thrd_join(t1);
341         thrd_join(t2);
342
343         return 0;
344 }