more fix
[cdsspec-compiler.git] / benchmark / linuxrwlocks / linuxrwlocks.c
1 #include <stdio.h>
2 #include <threads.h>
3 #include <stdatomic.h>
4
5 #include <spec_lib.h>
6 #include <stdlib.h>
7 #include <cdsannotate.h>
8 #include <specannotation.h>
9 #include <model_memory.h>
10
11 #include "librace.h"
12
13 #define RW_LOCK_BIAS            0x00100000
14 #define WRITE_LOCK_CMP          RW_LOCK_BIAS
15
16 typedef union {
17         atomic_int lock;
18 } rwlock_t;
19
20 /** Example implementation of linux rw lock along with 2 thread test
21  *  driver... */
22
23 /**
24         Properties to check:
25         1. At most 1 thread can acquire the write lock, and at the same time, no
26                 other threads can acquire any lock (including read/write lock).
27         2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
28         3. A read_unlock release 1 read lock, and a write_unlock release the write
29         lock. They can not release a lock that they don't acquire.
30         ###
31         4. Read_lock and write_lock can not be grabbed at the same time.
32         5. Happpens-before relationship should be checked and guaranteed, which
33         should be as the following:
34                 a. read_unlock hb-> write_lock
35                 b. write_unlock hb-> write_lock
36                 c. write_unlock hb-> read_lock
37 */
38
39 /**
40         Interesting point for locks:
41         a. If the users unlock() before any lock(), then the model checker will fail.
42         For this case, we can not say that the data structure is buggy, how can we
43         tell them from a real data structure bug???
44         b. We should specify that for a specific thread, successful locks and
45         unlocks should always come in pairs. We could make this check as an
46         auxiliary check since it is an extra rule for how the interfaces should called.
47 */
48
49 /**
50         @Begin
51         @Options:
52                 LANG = C;
53         @Global_define:
54                 @DeclareVar:
55                         bool writer_lock_acquired;
56                         int reader_lock_cnt;
57                 @InitVar:
58                         writer_lock_acquired = false;
59                         reader_lock_cnt = 0;
60         @Happens_before:
61                 # Since commit_point_set has no ID attached, A -> B means that for any B,
62                 # the previous A happens before B.
63                 Write_Unlock -> Write_Lock
64                 Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
65
66                 Write_Unlock -> Read_Lock
67                 Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
68         @End
69 */
70
71 static inline int read_can_lock(rwlock_t *lock)
72 {
73         return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
74 }
75
76 static inline int write_can_lock(rwlock_t *lock)
77 {
78         return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
79 }
80
81
82 /**
83         @Begin
84         @Interface: Read_Lock
85         @Commit_point_set:
86                 Read_Lock_Success_1 | Read_Lock_Success_2
87         @Check:
88                 !writer_lock_acquired
89         @Action:
90                 reader_lock_cnt++;
91         @End
92 */
93 static inline void read_lock(rwlock_t *rw)
94 {
95         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
96         /**
97                 @Begin
98                 @Commit_point_define_check: priorvalue > 0
99                 @Label:Read_Lock_Success_1
100                 @End
101         */
102         while (priorvalue <= 0) {
103                 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
104                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
105                         thrd_yield();
106                 }
107                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
108                 /**
109                         @Begin
110                         @Commit_point_define_check: priorvalue > 0
111                         @Label:Read_Lock_Success_2
112                         @End
113                 */
114         }
115 }
116
117
118 /**
119         @Begin
120         @Interface: Write_Lock
121         @Commit_point_set:
122                 Write_Lock_Success_1 | Write_Lock_Success_2
123         @Check:
124                 !writer_lock_acquired && reader_lock_cnt == 0
125         @Action:
126                 writer_lock_acquired = true;
127         @End
128 */
129 static inline void write_lock(rwlock_t *rw)
130 {
131         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
132         /**
133                 @Begin
134                 @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
135                 @Label: Write_Lock_Success_1
136                 @End
137         */
138         while (priorvalue != RW_LOCK_BIAS) {
139                 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
140                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
141                         thrd_yield();
142                 }
143                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
144                 /**
145                         @Begin
146                         @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
147                         @Label: Write_Lock_Success_2
148                         @End
149                 */
150         }
151 }
152
153 /**
154         @Begin
155         @Interface: Read_Trylock
156         @Commit_point_set:
157                 Read_Trylock_Succ_Point | Read_Trylock_Fail_Point
158         @Condition:
159                 writer_lock_acquired == false
160         @HB_condition:
161                 HB_Read_Trylock_Succ :: __RET__ == 1
162         @Action:
163                 if (__COND_SAT__)
164                         reader_lock_cnt++;
165         @Post_check:
166                 __COND_SAT__ ? __RET__ == 1 : __RET__ == 0
167         @End
168 */
169 static inline int read_trylock(rwlock_t *rw)
170 {
171         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
172         /**
173                 @Begin
174                 @Potential_commit_point_define: true
175                 @Label: Potential_Read_Trylock_Point
176                 @End
177         */
178         if (priorvalue > 0) {
179                 /**
180                         @Begin
181                         @Commit_point_define: true
182                         @Potential_commit_point_label: Potential_Read_Trylock_Point
183                         @Label:  Read_Trylock_Succ_Point
184                         @End
185                 */
186                 return 1;
187         }
188         /**
189                 @Begin
190                 @Commit_point_define: true
191                 @Potential_commit_point_label: Potential_Read_Trylock_Point
192                 @Label:  Read_Trylock_Fail_Point
193                 @End
194         */
195         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
196         return 0;
197 }
198
199 /**
200         @Begin
201         @Interface: Write_Trylock
202         @Commit_point_set:
203                 Write_Trylock_Succ_Point | Write_Trylock_Fail_Point
204         @Condition:
205                 !writer_lock_acquired && reader_lock_cnt == 0
206         @HB_condition:
207                 HB_Write_Trylock_Succ :: __RET__ == 1
208         @Action:
209                 if (__COND_SAT__)
210                         writer_lock_acquired = true;
211         @Post_check:
212                 __COND_SAT__ ? __RET__ == 1 : __RET__ == 0
213         @End
214 */
215 static inline int write_trylock(rwlock_t *rw)
216 {
217         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
218         /**
219                 @Begin
220                 @Potential_commit_point_define: true
221                 @Label: Potential_Write_Trylock_Point
222                 @End
223         */
224         if (priorvalue == RW_LOCK_BIAS) {
225                 /**
226                         @Begin
227                         @Commit_point_define: true
228                         @Potential_commit_point_label: Potential_Write_Trylock_Point
229                         @Label: Write_Trylock_Succ_Point
230                         @End
231                 */
232                 return 1;
233         }
234         /**
235                 @Begin
236                 @Commit_point_define: true
237                 @Potential_commit_point_label: Potential_Write_Trylock_Point
238                 @Label: Write_Trylock_Fail_Point
239                 @End
240         */
241         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
242         return 0;
243 }
244
245 /**
246         @Begin
247         @Interface: Read_Unlock
248         @Commit_point_set: Read_Unlock_Point
249         @Check:
250                 reader_lock_cnt > 0 && !writer_lock_acquired
251         @Action: 
252                 reader_lock_cnt--;
253         @End
254 */
255 static inline void read_unlock(rwlock_t *rw)
256 {
257         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
258         /**
259                 @Begin
260                 @Commit_point_define_check: true
261                 @Label: Read_Unlock_Point
262                 @End
263         */
264 }
265
266 /**
267         @Begin
268         @Interface: Write_Unlock
269         @Commit_point_set: Write_Unlock_Point
270         @Check:
271                 reader_lock_cnt == 0 && writer_lock_acquired
272         @Action: 
273                 writer_lock_acquired = false;
274         @End
275 */
276 static inline void write_unlock(rwlock_t *rw)
277 {
278         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
279         /**
280                 @Begin
281                 @Commit_point_define_check: true
282                 @Label: Write_Unlock_Point
283                 @End
284         */
285 }
286
287 rwlock_t mylock;
288 int shareddata;
289
290 static void a(void *obj)
291 {
292         int i;
293         for(i = 0; i < 2; i++) {
294                 if ((i % 2) == 0) {
295                         read_lock(&mylock);
296                         load_32(&shareddata);
297                         read_unlock(&mylock);
298                 } else {
299                         write_lock(&mylock);
300                         store_32(&shareddata,(unsigned int)i);
301                         write_unlock(&mylock);
302                 }
303         }
304 }
305
306 int user_main(int argc, char **argv)
307 {
308         /**
309                 @Begin
310                 @Entry_point
311                 @End
312         */
313         thrd_t t1, t2;
314         atomic_init(&mylock.lock, RW_LOCK_BIAS);
315
316         thrd_create(&t1, (thrd_start_t)&a, NULL);
317         thrd_create(&t2, (thrd_start_t)&a, NULL);
318
319         thrd_join(t1);
320         thrd_join(t2);
321
322         return 0;
323 }