minor fix
[cdsspec-compiler.git] / benchmark / linuxrwlocks / linuxrwlocks.c
1 #include <stdio.h>
2 #include <threads.h>
3 #include <stdatomic.h>
4
5 #include "librace.h"
6
7 #define RW_LOCK_BIAS            0x00100000
8 #define WRITE_LOCK_CMP          RW_LOCK_BIAS
9
10 typedef union {
11         atomic_int lock;
12 } rwlock_t;
13
14 /** Example implementation of linux rw lock along with 2 thread test
15  *  driver... */
16
17 /**
18         Properties to check:
19         1. At most 1 thread can acquire the write lock, and at the same time, no
20                 other threads can acquire any lock (including read/write lock).
21         2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
22         3. A read_unlock release 1 read lock, and a write_unlock release the write
23         lock. They can not release a lock that they don't acquire.
24         ###
25         4. Read_lock and write_lock can not be grabbed at the same time.
26         5. Happpens-before relationship should be checked and guaranteed, which
27         should be as the following:
28                 a. read_unlock hb-> write_lock
29                 b. write_unlock hb-> write_lock
30                 c. write_unlock hb-> read_lock
31 */
32
33 /**
34         Interesting point for locks:
35         a. If the users unlock() before any lock(), then the model checker will fail.
36         For this case, we can not say that the data structure is buggy, how can we
37         tell them from a real data structure bug???
38         b. We should specify that for a specific thread, successful locks and
39         unlocks should always come in pairs. We could make this check as an
40         auxiliary check since it is an extra rule for how the interfaces should called.
41 */
42
43 /**
44         @Begin
45         @Global_define:
46                 @DeclareVar:
47                         bool writer_lock_acquired;
48                         int reader_lock_cnt;
49                 @InitVar:
50                         writer_lock_acquired = false;
51                         reader_lock_cnt = 0;
52         @Happens_before:
53                 # Since commit_point_set has no ID attached, A -> B means that for any B,
54                 # the previous A happens before B.
55                 Write_Unlock -> Write_Lock
56                 Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
57
58                 Write_Unlock -> Read_Lock
59                 Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
60         @End
61 */
62
63 static inline int read_can_lock(rwlock_t *lock)
64 {
65         return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
66 }
67
68 static inline int write_can_lock(rwlock_t *lock)
69 {
70         return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
71 }
72
73
74 /**
75         @Begin
76         @Interface: Read_Lock
77         @Commit_point_set:
78                 Read_Lock_Success_1 | Read_Lock_Success_2
79         @Check:
80                 !writer_lock_acquired
81         @Action:
82                 reader_lock_cnt++;
83         @End
84 */
85 static inline void read_lock(rwlock_t *rw)
86 {
87         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
88         /**
89                 @Begin
90                 @Commit_point_define_check: priorvalue > 0
91                 @Label:Read_Lock_Success_1
92                 @End
93         */
94         while (priorvalue <= 0) {
95                 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
96                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
97                         thrd_yield();
98                 }
99                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
100                 /**
101                         @Begin
102                         @Commit_point_define_check: priorvalue > 0
103                         @Label:Read_Lock_Success_2
104                         @End
105                 */
106         }
107 }
108
109
110 /**
111         @Begin
112         @Interface: Write_Lock
113         @Commit_point_set:
114                 Write_Lock_Success_1 | Write_Lock_Success_2
115         @Check:
116                 !writer_lock_acquired && reader_lock_cnt == 0
117         @Action:
118                 writer_lock_acquired = true;
119         @End
120 */
121 static inline void write_lock(rwlock_t *rw)
122 {
123         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
124         /**
125                 @Begin
126                 @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
127                 @Label: Write_Lock_Success_1
128                 @End
129         */
130         while (priorvalue != RW_LOCK_BIAS) {
131                 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
132                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
133                         thrd_yield();
134                 }
135                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
136                 /**
137                         @Begin
138                         @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
139                         @Label: Write_Lock_Success_2
140                         @End
141                 */
142         }
143 }
144
145 /**
146         @Begin
147         @Interface: Read_Trylock
148         @Commit_point_set:
149                 Read_Trylock_Succ_Point | Read_Trylock_Fail_Point
150         @Condition:
151                 writer_lock_acquired == false
152         @HB_condition:
153                 HB_Read_Trylock_Succ :: __RET__ == 1
154         @Action:
155                 if (__COND_SAT__)
156                         reader_lock_cnt++;
157         @Post_check:
158                 __COND_SAT__ ? __RET__ == 1 : __RET__ == 0
159         @End
160 */
161 static inline int read_trylock(rwlock_t *rw)
162 {
163         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
164         /**
165                 @Begin
166                 @Potential_commit_point_define: true
167                 @Label: Potential_Read_Trylock_Point
168                 @End
169         */
170         if (priorvalue > 0) {
171                 /**
172                         @Begin
173                         @Commit_point_define: true
174                         @Potential_commit_point_label: Potential_Read_Trylock_Point
175                         @Label:  Read_Trylock_Succ_Point
176                         @End
177                 */
178                 return 1;
179         }
180         /**
181                 @Begin
182                 @Commit_point_define: true
183                 @Potential_commit_point_label: Potential_Read_Trylock_Point
184                 @Label:  Read_Trylock_Fail_Point
185                 @End
186         */
187         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
188         return 0;
189 }
190
191 /**
192         @Begin
193         @Interface: Write_Trylock
194         @Commit_point_set:
195                 Write_Trylock_Succ_Point | Write_Trylock_Fail_Point
196         @Condition:
197                 !writer_lock_acquired && reader_lock_cnt == 0
198         @HB_condition:
199                 HB_Write_Trylock_Succ :: __RET__ == 1
200         @Action:
201                 if (__COND_SAT__)
202                         writer_lock_acquired = true;
203         @Post_check:
204                 __COND_SAT__ ? __RET__ == 1 : __RET__ == 0
205         @End
206 */
207 static inline int write_trylock(rwlock_t *rw)
208 {
209         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
210         /**
211                 @Begin
212                 @Potential_commit_point_define: true
213                 @Label: Potential_Write_Trylock_Point
214                 @End
215         */
216         if (priorvalue == RW_LOCK_BIAS) {
217                 /**
218                         @Begin
219                         @Commit_point_define: true
220                         @Potential_commit_point_label: Potential_Write_Trylock_Point
221                         @Label: Write_Trylock_Succ_Point
222                         @End
223                 */
224                 return 1;
225         }
226         /**
227                 @Begin
228                 @Commit_point_define: true
229                 @Potential_commit_point_label: Potential_Write_Trylock_Point
230                 @Label: Write_Trylock_Fail_Point
231                 @End
232         */
233         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
234         return 0;
235 }
236
237 /**
238         @Begin
239         @Interface: Read_Unlock
240         @Commit_point_set: Read_Unlock_Point
241         @Check:
242                 reader_lock_cnt > 0 && !writer_lock_acquired
243         @Action: 
244                 reader_lock_cnt--;
245         @End
246 */
247 static inline void read_unlock(rwlock_t *rw)
248 {
249         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
250         /**
251                 @Begin
252                 @Commit_point_define_check: true
253                 @Label: Read_Unlock_Point
254                 @End
255         */
256 }
257
258 /**
259         @Begin
260         @Interface: Write_Unlock
261         @Commit_point_set: Write_Unlock_Point
262         @Check:
263                 reader_lock_cnt == 0 && writer_lock_acquired
264         @Action: 
265                 writer_lock_acquired = false;
266         @End
267 */
268 static inline void write_unlock(rwlock_t *rw)
269 {
270         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
271         /**
272                 @Begin
273                 @Commit_point_define_check: true
274                 @Label: Write_Unlock_Point
275                 @End
276         */
277 }
278
279 rwlock_t mylock;
280 int shareddata;
281
282 static void a(void *obj)
283 {
284         int i;
285         for(i = 0; i < 2; i++) {
286                 if ((i % 2) == 0) {
287                         read_lock(&mylock);
288                         load_32(&shareddata);
289                         read_unlock(&mylock);
290                 } else {
291                         write_lock(&mylock);
292                         store_32(&shareddata,(unsigned int)i);
293                         write_unlock(&mylock);
294                 }
295         }
296 }
297
298 int user_main(int argc, char **argv)
299 {
300         /**
301                 @Begin
302                 @Entry_point
303                 @End
304         */
305         thrd_t t1, t2;
306         atomic_init(&mylock.lock, RW_LOCK_BIAS);
307
308         thrd_create(&t1, (thrd_start_t)&a, NULL);
309         thrd_create(&t2, (thrd_start_t)&a, NULL);
310
311         thrd_join(t1);
312         thrd_join(t2);
313
314         return 0;
315 }