make code look better
[cdsspec-compiler.git] / benchmark / linuxrwlocks / linuxrwlocks.c
1 #include <stdio.h>
2 #include <threads.h>
3 #include <stdatomic.h>
4
5 #include "librace.h"
6
7 #define RW_LOCK_BIAS            0x00100000
8 #define WRITE_LOCK_CMP          RW_LOCK_BIAS
9
10 typedef union {
11         atomic_int lock;
12 } rwlock_t;
13
14 /** Example implementation of linux rw lock along with 2 thread test
15  *  driver... */
16
17 /**
18         Properties to check:
19         1. At most 1 thread can acquire the write lock, and at the same time, no
20                 other threads can acquire any lock (including read/write lock).
21         2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
22         3. A read_unlock release 1 read lock, and a write_unlock release the write
23         lock. They can not release a lock that they don't acquire.
24         ###
25         4. Read_lock and write_lock can not be grabbed at the same time.
26         5. Happpens-before relationship should be checked and guaranteed, which
27         should be as the following:
28                 a. read_unlock hb-> write_lock
29                 b. write_unlock hb-> write_lock
30                 c. write_unlock hb-> read_lock
31 */
32
33 /**
34         Interesting point for locks:
35         a. If the users unlock() before any lock(), then the model checker will fail.
36         For this case, we can not say that the data structure is buggy, how can we
37         tell them from a real data structure bug???
38         b. We should specify that for a specific thread, successful locks and
39         unlocks should always come in pairs. We could make this check as an
40         auxiliary check since it is an extra rule for how the interfaces should called.
41 */
42
43 /**
44         @Begin
45         @Options:
46                 LANG = C;
47         @Global_define:
48                 @DeclareVar:
49                         bool writer_lock_acquired;
50                         int reader_lock_cnt;
51                 @InitVar:
52                         writer_lock_acquired = false;
53                         reader_lock_cnt = 0;
54         @Happens_before:
55                 # Since commit_point_set has no ID attached, A -> B means that for any B,
56                 # the previous A happens before B.
57                 Write_Unlock -> Write_Lock
58                 Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
59
60                 Write_Unlock -> Read_Lock
61                 Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
62         @End
63 */
64
65 static inline int read_can_lock(rwlock_t *lock)
66 {
67         return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
68 }
69
70 static inline int write_can_lock(rwlock_t *lock)
71 {
72         return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
73 }
74
75
76 /**
77         @Begin
78         @Interface: Read_Lock
79         @Commit_point_set:
80                 Read_Lock_Success_1 | Read_Lock_Success_2
81         @Check:
82                 !writer_lock_acquired
83         @Action:
84                 reader_lock_cnt++;
85         @End
86 */
87 static inline void read_lock(rwlock_t *rw)
88 {
89         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
90         /**
91                 @Begin
92                 @Commit_point_define_check: priorvalue > 0
93                 @Label:Read_Lock_Success_1
94                 @End
95         */
96         while (priorvalue <= 0) {
97                 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
98                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
99                         thrd_yield();
100                 }
101                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
102                 /**
103                         @Begin
104                         @Commit_point_define_check: priorvalue > 0
105                         @Label:Read_Lock_Success_2
106                         @End
107                 */
108         }
109 }
110
111
112 /**
113         @Begin
114         @Interface: Write_Lock
115         @Commit_point_set:
116                 Write_Lock_Success_1 | Write_Lock_Success_2
117         @Check:
118                 !writer_lock_acquired && reader_lock_cnt == 0
119         @Action:
120                 writer_lock_acquired = true;
121         @End
122 */
123 static inline void write_lock(rwlock_t *rw)
124 {
125         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
126         /**
127                 @Begin
128                 @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
129                 @Label: Write_Lock_Success_1
130                 @End
131         */
132         while (priorvalue != RW_LOCK_BIAS) {
133                 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
134                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
135                         thrd_yield();
136                 }
137                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
138                 /**
139                         @Begin
140                         @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
141                         @Label: Write_Lock_Success_2
142                         @End
143                 */
144         }
145 }
146
147 /**
148         @Begin
149         @Interface: Read_Trylock
150         @Commit_point_set:
151                 Read_Trylock_Succ_Point | Read_Trylock_Fail_Point
152         @Condition:
153                 writer_lock_acquired == false
154         @HB_condition:
155                 HB_Read_Trylock_Succ :: __RET__ == 1
156         @Action:
157                 if (__COND_SAT__)
158                         reader_lock_cnt++;
159         @Post_check:
160                 __COND_SAT__ ? __RET__ == 1 : __RET__ == 0
161         @End
162 */
163 static inline int read_trylock(rwlock_t *rw)
164 {
165         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
166         /**
167                 @Begin
168                 @Potential_commit_point_define: true
169                 @Label: Potential_Read_Trylock_Point
170                 @End
171         */
172         if (priorvalue > 0) {
173                 /**
174                         @Begin
175                         @Commit_point_define: true
176                         @Potential_commit_point_label: Potential_Read_Trylock_Point
177                         @Label:  Read_Trylock_Succ_Point
178                         @End
179                 */
180                 return 1;
181         }
182         /**
183                 @Begin
184                 @Commit_point_define: true
185                 @Potential_commit_point_label: Potential_Read_Trylock_Point
186                 @Label:  Read_Trylock_Fail_Point
187                 @End
188         */
189         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
190         return 0;
191 }
192
193 /**
194         @Begin
195         @Interface: Write_Trylock
196         @Commit_point_set:
197                 Write_Trylock_Succ_Point | Write_Trylock_Fail_Point
198         @Condition:
199                 !writer_lock_acquired && reader_lock_cnt == 0
200         @HB_condition:
201                 HB_Write_Trylock_Succ :: __RET__ == 1
202         @Action:
203                 if (__COND_SAT__)
204                         writer_lock_acquired = true;
205         @Post_check:
206                 __COND_SAT__ ? __RET__ == 1 : __RET__ == 0
207         @End
208 */
209 static inline int write_trylock(rwlock_t *rw)
210 {
211         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
212         /**
213                 @Begin
214                 @Potential_commit_point_define: true
215                 @Label: Potential_Write_Trylock_Point
216                 @End
217         */
218         if (priorvalue == RW_LOCK_BIAS) {
219                 /**
220                         @Begin
221                         @Commit_point_define: true
222                         @Potential_commit_point_label: Potential_Write_Trylock_Point
223                         @Label: Write_Trylock_Succ_Point
224                         @End
225                 */
226                 return 1;
227         }
228         /**
229                 @Begin
230                 @Commit_point_define: true
231                 @Potential_commit_point_label: Potential_Write_Trylock_Point
232                 @Label: Write_Trylock_Fail_Point
233                 @End
234         */
235         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
236         return 0;
237 }
238
239 /**
240         @Begin
241         @Interface: Read_Unlock
242         @Commit_point_set: Read_Unlock_Point
243         @Check:
244                 reader_lock_cnt > 0 && !writer_lock_acquired
245         @Action: 
246                 reader_lock_cnt--;
247         @End
248 */
249 static inline void read_unlock(rwlock_t *rw)
250 {
251         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
252         /**
253                 @Begin
254                 @Commit_point_define_check: true
255                 @Label: Read_Unlock_Point
256                 @End
257         */
258 }
259
260 /**
261         @Begin
262         @Interface: Write_Unlock
263         @Commit_point_set: Write_Unlock_Point
264         @Check:
265                 reader_lock_cnt == 0 && writer_lock_acquired
266         @Action: 
267                 writer_lock_acquired = false;
268         @End
269 */
270 static inline void write_unlock(rwlock_t *rw)
271 {
272         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
273         /**
274                 @Begin
275                 @Commit_point_define_check: true
276                 @Label: Write_Unlock_Point
277                 @End
278         */
279 }
280
281 rwlock_t mylock;
282 int shareddata;
283
284 static void a(void *obj)
285 {
286         int i;
287         for(i = 0; i < 2; i++) {
288                 if ((i % 2) == 0) {
289                         read_lock(&mylock);
290                         load_32(&shareddata);
291                         read_unlock(&mylock);
292                 } else {
293                         write_lock(&mylock);
294                         store_32(&shareddata,(unsigned int)i);
295                         write_unlock(&mylock);
296                 }
297         }
298 }
299
300 int user_main(int argc, char **argv)
301 {
302         /**
303                 @Begin
304                 @Entry_point
305                 @End
306         */
307         thrd_t t1, t2;
308         atomic_init(&mylock.lock, RW_LOCK_BIAS);
309
310         thrd_create(&t1, (thrd_start_t)&a, NULL);
311         thrd_create(&t2, (thrd_start_t)&a, NULL);
312
313         thrd_join(t1);
314         thrd_join(t2);
315
316         return 0;
317 }