b97c45b4b71835f6f74503788487761de78c8b66
[cdsspec-compiler.git] / benchmark / linuxrwlocks / linuxrwlocks.c
1 #include <stdio.h>
2 #include <threads.h>
3 #include <stdatomic.h>
4
5 #include <spec_lib.h>
6 #include <stdlib.h>
7 #include <cdsannotate.h>
8 #include <specannotation.h>
9 #include <model_memory.h>
10 #include "common.h"
11
12 #include "librace.h"
13
14 #define RW_LOCK_BIAS            0x00100000
15 #define WRITE_LOCK_CMP          RW_LOCK_BIAS
16
17 typedef union {
18         atomic_int lock;
19 } rwlock_t;
20
21 /** Example implementation of linux rw lock along with 2 thread test
22  *  driver... */
23
24 /**
25         Properties to check:
26         1. At most 1 thread can acquire the write lock, and at the same time, no
27                 other threads can acquire any lock (including read/write lock).
28         2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
29         3. A read_unlock release 1 read lock, and a write_unlock release the write
30         lock. They can not release a lock that they don't acquire.
31         ###
32         4. Read_lock and write_lock can not be grabbed at the same time.
33         5. Happpens-before relationship should be checked and guaranteed, which
34         should be as the following:
35                 a. read_unlock hb-> write_lock
36                 b. write_unlock hb-> write_lock
37                 c. write_unlock hb-> read_lock
38 */
39
40 /**
41         Interesting point for locks:
42         a. If the users unlock() before any lock(), then the model checker will fail.
43         For this case, we can not say that the data structure is buggy, how can we
44         tell them from a real data structure bug???
45         b. We should specify that for a specific thread, successful locks and
46         unlocks should always come in pairs. We could make this check as an
47         auxiliary check since it is an extra rule for how the interfaces should called.
48 */
49
50 /**
51         @Begin
52         @Options:
53                 LANG = C;
54         @Global_define:
55                 @DeclareVar:
56                         bool writer_lock_acquired;
57                         int reader_lock_cnt;
58                 @InitVar:
59                         writer_lock_acquired = false;
60                         reader_lock_cnt = 0;
61         @Happens_before:
62                 # Since commit_point_set has no ID attached, A -> B means that for any B,
63                 # the previous A happens before B.
64                 Read_Unlock -> Read_Lock
65                 Read_Unlock -> Write_Lock
66                 Read_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
67                 Read_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
68
69                 Write_Unlock -> Write_Lock
70                 Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
71
72                 Write_Unlock -> Read_Lock
73                 Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
74         @End
75 */
76
77 static inline int read_can_lock(rwlock_t *lock)
78 {
79         return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
80 }
81
82 static inline int write_can_lock(rwlock_t *lock)
83 {
84         return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
85 }
86
87
88 /**
89         @Begin
90         @Interface: Read_Lock
91         @Commit_point_set: Read_Lock_Success_1 | Read_Lock_Success_2
92         @Check: !writer_lock_acquired
93         @Action: reader_lock_cnt++;
94         @End
95 */
96 static inline void read_lock(rwlock_t *rw)
97 {
98         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
99         /**
100                 @Begin
101                 @Commit_point_define_check: priorvalue > 0
102                 @Label:Read_Lock_Success_1
103                 @End
104         */
105         while (priorvalue <= 0) {
106                 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
107                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
108                         thrd_yield();
109                 }
110                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
111                 /**
112                         @Begin
113                         @Commit_point_define_check: priorvalue > 0
114                         @Label:Read_Lock_Success_2
115                         @End
116                 */
117         }
118 }
119
120
121 /**
122         @Begin
123         @Interface: Write_Lock
124         @Commit_point_set: Write_Lock_Success_1 | Write_Lock_Success_2
125         @Check: !writer_lock_acquired && reader_lock_cnt == 0
126         @Action: writer_lock_acquired = true;
127         @End
128 */
129 static inline void write_lock(rwlock_t *rw)
130 {
131         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
132         /**
133                 @Begin
134                 @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
135                 @Label: Write_Lock_Success_1
136                 @End
137         */
138         while (priorvalue != RW_LOCK_BIAS) {
139                 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
140                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
141                         thrd_yield();
142                 }
143                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
144                 /**
145                         @Begin
146                         @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
147                         @Label: Write_Lock_Success_2
148                         @End
149                 */
150         }
151 }
152
153 /**
154         @Begin
155         @Interface: Read_Trylock
156         @Commit_point_set: Read_Trylock_Succ_Point | Read_Trylock_Fail_Point
157         //@Condition: writer_lock_acquired == false
158         @HB_condition: HB_Read_Trylock_Succ :: __RET__ == 1
159         @Action:
160                 if (__COND_SAT__)
161                         reader_lock_cnt++;
162         @Post_check: __COND_SAT__ ? __RET__ == 1 : __RET__ == 0
163         @End
164 */
165 static inline int read_trylock(rwlock_t *rw)
166 {
167         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
168         /**
169                 @Begin
170                 @Potential_commit_point_define: true
171                 @Label: Potential_Read_Trylock_Point
172                 @End
173         */
174         if (priorvalue > 0) {
175                 /**
176                         @Begin
177                         @Commit_point_define: true
178                         @Potential_commit_point_label: Potential_Read_Trylock_Point
179                         @Label:  Read_Trylock_Succ_Point
180                         @End
181                 */
182                 return 1;
183         }
184         /**
185                 @Begin
186                 @Commit_point_define: true
187                 @Potential_commit_point_label: Potential_Read_Trylock_Point
188                 @Label:  Read_Trylock_Fail_Point
189                 @End
190         */
191         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
192         return 0;
193 }
194
195 /**
196         @Begin
197         @Interface: Write_Trylock
198         @Commit_point_set: Write_Trylock_Succ_Point | Write_Trylock_Fail_Point
199         @HB_condition: HB_Write_Trylock_Succ :: __RET__ == 1
200         @Action:
201                 if (__RET__ == 1)
202                         writer_lock_acquired = true;
203         @End
204 */
205 static inline int write_trylock(rwlock_t *rw)
206 {
207         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
208         //model_print("write_trylock: priorvalue: %d\n", priorvalue);
209         /**
210                 @Begin
211                 @Potential_commit_point_define: true
212                 @Label: Potential_Write_Trylock_Point
213                 @End
214         */
215         if (priorvalue == RW_LOCK_BIAS) {
216                 /**
217                         @Begin
218                         @Commit_point_define: true
219                         @Potential_commit_point_label: Potential_Write_Trylock_Point
220                         @Label: Write_Trylock_Succ_Point
221                         @End
222                 */
223                 return 1;
224         }
225         /**
226                 @Begin
227                 @Commit_point_define: true
228                 @Potential_commit_point_label: Potential_Write_Trylock_Point
229                 @Label: Write_Trylock_Fail_Point
230                 @End
231         */
232         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
233         return 0;
234 }
235
236 /**
237         @Begin
238         @Interface: Read_Unlock
239         @Commit_point_set: Read_Unlock_Point
240         @Check: reader_lock_cnt > 0 && !writer_lock_acquired
241         @Action: reader_lock_cnt--;
242         @End
243 */
244 static inline void read_unlock(rwlock_t *rw)
245 {
246         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
247         /**
248                 @Begin
249                 @Commit_point_define_check: true
250                 @Label: Read_Unlock_Point
251                 @End
252         */
253 }
254
255 /**
256         @Begin
257         @Interface: Write_Unlock
258         @Commit_point_set: Write_Unlock_Point | Write_Unlock_Clear
259         @Check: reader_lock_cnt == 0 && writer_lock_acquired
260         @Action: writer_lock_acquired = false;
261         @End
262 */
263 static inline void write_unlock(rwlock_t *rw)
264 {
265         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
266         /**
267                 @Begin
268                 @Commit_point_define_check: true
269                 @Label: Write_Unlock_Point
270                 @End
271         */
272
273         /**
274                 //@Begin
275                 @Commit_point_clear: true
276                 @Label: Write_Unlock_Clear
277                 @End
278         */
279 }
280
281 rwlock_t mylock;
282 int shareddata;
283
284 static void a(void *obj)
285 {
286         int i;
287         for(i = 0; i < 2; i++) {
288                 if ((i % 2) == 0) {
289                         read_lock(&mylock);
290                         //load_32(&shareddata);
291                         //printf("%d\n", shareddata);
292                         read_unlock(&mylock);
293                 } else {
294                         write_lock(&mylock);
295                         //store_32(&shareddata,(unsigned int)i);
296                         shareddata = 47;
297                         write_unlock(&mylock);
298                 }
299         }
300 }
301
302 static void b(void *obj)
303 {
304         int i;
305         for(i = 0; i < 2; i++) {
306                 if ((i % 2) == 0) {
307                         if (read_trylock(&mylock) == 1) {
308                                 //printf("%d\n", shareddata);
309                                 read_unlock(&mylock);
310                         }
311                 } else {
312                         if (write_trylock(&mylock) == 1) {
313                                 //shareddata = 47;
314                                 write_unlock(&mylock);
315                         }
316                 }
317         }
318 }
319
320 int user_main(int argc, char **argv)
321 {
322         /**
323                 @Begin
324                 @Entry_point
325                 @End
326         */
327         thrd_t t1, t2;
328         atomic_init(&mylock.lock, RW_LOCK_BIAS);
329
330         thrd_create(&t1, (thrd_start_t)&a, NULL);
331         thrd_create(&t2, (thrd_start_t)&b, NULL);
332
333         thrd_join(t1);
334         thrd_join(t2);
335
336         return 0;
337 }