more bug fix
[cdsspec-compiler.git] / benchmark / linuxrwlocks / linuxrwlocks.c
1 #include <stdio.h>
2 #include <threads.h>
3 #include <stdatomic.h>
4
5 #include "librace.h"
6
7 #define RW_LOCK_BIAS            0x00100000
8 #define WRITE_LOCK_CMP          RW_LOCK_BIAS
9
10 /** Example implementation of linux rw lock along with 2 thread test
11  *  driver... */
12
13 /**
14         Properties to check:
15         1. At most 1 thread can acquire the write lock, and at the same time, no
16                 other threads can acquire any lock (including read/write lock).
17         2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
18         3. A read_unlock release 1 read lock, and a write_unlock release the write
19         lock. They can not release a lock that they don't acquire.
20         ###
21         4. Read_lock and write_lock can not be grabbed at the same time.
22         5. Happpens-before relationship should be checked and guaranteed, which
23         should be as the following:
24                 a. read_unlock hb-> write_lock
25                 b. write_unlock hb-> write_lock
26                 c. write_unlock hb-> read_lock
27 */
28
29 /**
30         Interesting point for locks:
31         a. If the users unlock() before any lock(), then the model checker will fail.
32         For this case, we can not say that the data structure is buggy, how can we
33         tell them from a real data structure bug???
34         b. We should specify that for a specific thread, successful locks and
35         unlocks should always come in pairs. We could make this check as an
36         auxiliary check since it is an extra rule for how the interfaces should called.
37 */
38
39 /**
40         @Begin
41         @Global_define:
42                 @DeclareVar:
43                         bool writer_lock_acquired;
44                         int reader_lock_cnt;
45                 @InitVar:
46                         writer_lock_acquired = false;
47                         reader_lock_cnt = 0;
48         @Happens_before:
49                 # Since commit_point_set has no ID attached, A -> B means that for any B,
50                 # the previous A happens before B.
51                 Read_Unlock -> Write_Lock
52                 Read_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
53                 
54                 Write_Unlock -> Write_Lock
55                 Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
56
57                 Write_Unlock -> Read_Lock
58                 Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
59         @End
60 */
61
62 /**
63         */
64
65 typedef union {
66         atomic_int lock;
67 } rwlock_t;
68
69 static inline int read_can_lock(rwlock_t *lock)
70 {
71         return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
72 }
73
74 static inline int write_can_lock(rwlock_t *lock)
75 {
76         return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
77 }
78
79
80 /**
81         @Begin
82         @Interface: Read_Lock
83         @Commit_point_set:
84                 Read_Lock_Success_1 | Read_Lock_Success_2
85         @Check:
86                 !writer_lock_acquired
87         @Action:
88                 reader_lock_cnt++;
89         @End
90 */
91 static inline void read_lock(rwlock_t *rw)
92 {
93         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
94         /**
95                 @Begin
96                 @Commit_point_define_check: __ATOMIC_RET__ > 0
97                 @Label:Read_Lock_Success_1
98                 @End
99         */
100         while (priorvalue <= 0) {
101                 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
102                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
103                         thrd_yield();
104                 }
105                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
106                 /**
107                         @Begin
108                         @Commit_point_define_check: __ATOMIC_RET__ > 0
109                         @Label:Read_Lock_Success_2
110                         @End
111                 */
112         }
113 }
114
115
116 /**
117         @Begin
118         @Interface: Write_Lock
119         @Commit_point_set:
120                 Write_Lock_Success_1 | Write_Lock_Success_2
121         @Check:
122                 !writer_lock_acquired && reader_lock_cnt == 0
123         @Action:
124                 writer_lock_acquired = true;
125         @End
126 */
127 static inline void write_lock(rwlock_t *rw)
128 {
129         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
130         /**
131                 @Begin
132                 @Commit_point_define_check: __ATOMIC_RET__ == RW_LOCK_BIAS
133                 @Label: Write_Lock_Success_1
134                 @End
135         */
136         while (priorvalue != RW_LOCK_BIAS) {
137                 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
138                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
139                         thrd_yield();
140                 }
141                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
142                 /**
143                         @Begin
144                         @Commit_point_define_check: __ATOMIC_RET__ == RW_LOCK_BIAS
145                         @Label: Write_Lock_Success_2
146                         @End
147                 */
148         }
149 }
150
151 /**
152         @Begin
153         @Interface: Read_Trylock
154         @Commit_point_set:
155                 Read_Trylock_Point
156         @Condition:
157                 writer_lock_acquired == false
158         @HB_condition:
159                 HB_Read_Trylock_Succ :: __RET__ == 1
160         @Action:
161                 if (__COND_SAT__)
162                         reader_lock_cnt++;
163         @Post_check:
164                 __COND_SAT__ ? __RET__ == 1 : __RET__ == 0
165         @End
166 */
167 static inline int read_trylock(rwlock_t *rw)
168 {
169         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
170         /**
171                 @Begin
172                 @Commit_point_define_check: true
173                 @Label:Read_Trylock_Point
174                 @End
175         */
176         if (priorvalue > 0)
177                 return 1;
178
179         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
180         return 0;
181 }
182
183 /**
184         @Begin
185         @Interface: Write_Trylock
186         @Commit_point_set:
187                 Write_Trylock_Point
188         @Condition:
189                 !writer_lock_acquired && reader_lock_cnt == 0
190         @HB_condition:
191                 HB_Write_Trylock_Succ :: __RET__ == 1
192         @Action:
193                 if (__COND_SAT__)
194                         writer_lock_acquired = true;
195         @Post_check:
196                 __COND_SAT__ ? __RET__ == 1 : __RET__ == 0
197         @End
198 */
199 static inline int write_trylock(rwlock_t *rw)
200 {
201         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
202         /**
203                 @Begin
204                 @Commit_point_define_check: true
205                 @Label: Write_Trylock_Point
206                 @End
207         */
208         if (priorvalue == RW_LOCK_BIAS)
209                 return 1;
210
211         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
212         return 0;
213 }
214
215 /**
216         @Begin
217         @Interface: Read_Unlock
218         @Commit_point_set: Read_Unlock_Point
219         @Check:
220                 reader_lock_cnt > 0 && !writer_lock_acquired
221         @Action: 
222                 reader_lock_cnt--;
223         @End
224 */
225 static inline void read_unlock(rwlock_t *rw)
226 {
227         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
228         /**
229                 @Begin
230                 @Commit_point_define_check: true
231                 @Label: Read_Unlock_Point
232                 @End
233         */
234 }
235
236 /**
237         @Begin
238         @Interface: Write_Unlock
239         @Commit_point_set: Write_Unlock_Point
240         @Check:
241                 reader_lock_cnt == 0 && writer_lock_acquired
242         @Action: 
243                 writer_lock_acquired = false;
244         @End
245 */
246
247 static inline void write_unlock(rwlock_t *rw)
248 {
249         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
250         /**
251                 @Begin
252                 @Commit_point_define_check: true
253                 @Label: Write_Unlock_Point
254                 @End
255         */
256 }
257
258 rwlock_t mylock;
259 int shareddata;
260
261 static void a(void *obj)
262 {
263         int i;
264         for(i = 0; i < 2; i++) {
265                 if ((i % 2) == 0) {
266                         read_lock(&mylock);
267                         load_32(&shareddata);
268                         read_unlock(&mylock);
269                 } else {
270                         write_lock(&mylock);
271                         store_32(&shareddata,(unsigned int)i);
272                         write_unlock(&mylock);
273                 }
274         }
275 }
276
277 int user_main(int argc, char **argv)
278 {
279         thrd_t t1, t2;
280         atomic_init(&mylock.lock, RW_LOCK_BIAS);
281
282         thrd_create(&t1, (thrd_start_t)&a, NULL);
283         thrd_create(&t2, (thrd_start_t)&a, NULL);
284
285         thrd_join(t1);
286         thrd_join(t2);
287
288         return 0;
289 }