03a292b3b6978860bb355081dc7d274a6ca1f3eb
[cdsspec-compiler.git] / benchmark / linuxrwlocks / linuxrwlocks.c
1 #include <stdio.h>
2 #include <threads.h>
3 #include <stdatomic.h>
4
5 #include <spec_lib.h>
6 #include <stdlib.h>
7 #include <cdsannotate.h>
8 #include <specannotation.h>
9 #include <model_memory.h>
10 #include "common.h"
11
12 #include "librace.h"
13
14 #define RW_LOCK_BIAS            0x00100000
15 #define WRITE_LOCK_CMP          RW_LOCK_BIAS
16
17 typedef union {
18         atomic_int lock;
19 } rwlock_t;
20
21 /** Example implementation of linux rw lock along with 2 thread test
22  *  driver... */
23
24 /**
25         Properties to check:
26         1. At most 1 thread can acquire the write lock, and at the same time, no
27                 other threads can acquire any lock (including read/write lock).
28         2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
29         3. A read_unlock release 1 read lock, and a write_unlock release the write
30         lock. They can not release a lock that they don't acquire.
31         ###
32         4. Read_lock and write_lock can not be grabbed at the same time.
33         5. Happpens-before relationship should be checked and guaranteed, which
34         should be as the following:
35                 a. read_unlock hb-> write_lock
36                 b. write_unlock hb-> write_lock
37                 c. write_unlock hb-> read_lock
38 */
39
40 /**
41         Interesting point for locks:
42         a. If the users unlock() before any lock(), then the model checker will fail.
43         For this case, we can not say that the data structure is buggy, how can we
44         tell them from a real data structure bug???
45         b. We should specify that for a specific thread, successful locks and
46         unlocks should always come in pairs. We could make this check as an
47         auxiliary check since it is an extra rule for how the interfaces should called.
48 */
49
50 /** @DeclareState:
51                 bool writer_lock_acquired;
52                 int reader_lock_cnt;
53         @Initial: writer_lock_acquired = false;
54                 reader_lock_cnt = 0;
55         @Commutativity: Read_Lock <-> Read_Lock: true
56         @Commutativity: Read_Lock <-> Read_Unlock: true
57         @Commutativity: Read_Lock <-> Read_Trylock: true
58         @Commutativity: Read_Lock <-> Write_Trylock: !_Method2.__RET__
59         
60         @Commutativity: Read_Unlock <-> Read_Unlock: true
61         @Commutativity: Read_Unlock <-> Read_Trylock: true
62         @Commutativity: Read_Unlock <-> Write_Trylock: !_Method2.__RET__
63
64         @Commutativity: Read_Trylock <-> Read_Trylock: true
65         @Commutativity: Read_Trylock <-> Write_Trylock: !_Method1.__RET__ || !_Method2.__RET__
66         @Commutativity: Read_Trylock <-> Write_Lock: !_Method1.__RET__
67         @Commutativity: Read_Trylock <-> Write_Unlock: !_Method1.__RET__
68
69         @Commutativity: Write_Trylock <-> Write_Trylock: !_Method1.__RET__ || !_Method2.__RET__
70         @Commutativity: Write_Trylock <-> Write_Unlock: !_Method1.__RET__
71         @Commutativity: Write_Trylock <-> Write_Lock: !_Method1.__RET__
72 */
73
74 static inline int read_can_lock(rwlock_t *lock)
75 {
76         return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
77 }
78
79 static inline int write_can_lock(rwlock_t *lock)
80 {
81         return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
82 }
83
84
85 /**
86         @Begin
87         @Interface: Read_Lock
88         @Commit_point_set: Read_Lock_Success_1 | Read_Lock_Success_2
89         @Check: !writer_lock_acquired
90         @Action: reader_lock_cnt++;
91         @End
92 */
93 static inline void read_lock(rwlock_t *rw)
94 {
95         
96         /**********  Admissibility  **********/
97         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
98         /**
99                 @Begin
100                 @Commit_point_define_check: priorvalue > 0
101                 @Label:Read_Lock_Success_1
102                 @End
103         */
104         while (priorvalue <= 0) {
105                 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
106                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
107                         thrd_yield();
108                 }
109                 /**********  Admissibility  **********/
110                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
111                 /**
112                         @Begin
113                         @Commit_point_define_check: priorvalue > 0
114                         @Label:Read_Lock_Success_2
115                         @End
116                 */
117         }
118 }
119
120
121 /**
122         @Begin
123         @Interface: Write_Lock
124         @Commit_point_set: Write_Lock_Success_1 | Write_Lock_Success_2
125         @Check: !writer_lock_acquired && reader_lock_cnt == 0
126         @Action: writer_lock_acquired = true;
127         @End
128 */
129 static inline void write_lock(rwlock_t *rw)
130 {
131         /**********  Admissibility  **********/
132         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
133         /**
134                 @Begin
135                 @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
136                 @Label: Write_Lock_Success_1
137                 @End
138         */
139         while (priorvalue != RW_LOCK_BIAS) {
140                 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
141                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
142                         thrd_yield();
143                 }
144                 /**********  Admissibility  **********/
145                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
146                 /**
147                         @Begin
148                         @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
149                         @Label: Write_Lock_Success_2
150                         @End
151                 */
152         }
153 }
154
155 /**
156         @Begin
157         @Interface: Read_Trylock
158         @Commit_point_set: Read_Trylock_Succ_Point | Read_Trylock_Fail_Point
159         //@Condition: writer_lock_acquired == false
160         @HB_condition: HB_Read_Trylock_Succ :: __RET__ == 1
161         @Action:
162                 if (__RET__)
163                         reader_lock_cnt++;
164         @Post_check: __RET__ == !writer_lock_acquired || !__RET__
165         @End
166 */
167 static inline int read_trylock(rwlock_t *rw)
168 {
169         /**********  Admissibility  **********/
170         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
171         /**
172                 @Begin
173                 @Potential_commit_point_define: true
174                 @Label: Potential_Read_Trylock_Point
175                 @End
176         */
177         if (priorvalue > 0) {
178                 /**
179                         @Begin
180                         @Commit_point_define: true
181                         @Potential_commit_point_label: Potential_Read_Trylock_Point
182                         @Label:  Read_Trylock_Succ_Point
183                         @End
184                 */
185                 return 1;
186         }
187         /**
188                 @Begin
189                 @Commit_point_define: true
190                 @Potential_commit_point_label: Potential_Read_Trylock_Point
191                 @Label:  Read_Trylock_Fail_Point
192                 @End
193         */
194         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
195         return 0;
196 }
197
198 /**
199         @Begin
200         @Interface: Write_Trylock
201         @Commit_point_set: Write_Trylock_Succ_Point | Write_Trylock_Fail_Point
202         @HB_condition: HB_Write_Trylock_Succ :: __RET__ == 1
203         @Action:
204                 if (__RET__ == 1)
205                         writer_lock_acquired = true;
206         @End
207 */
208 static inline int write_trylock(rwlock_t *rw)
209 {
210         /**********  Admissibility  **********/
211         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
212         //model_print("write_trylock: priorvalue: %d\n", priorvalue);
213         /**
214                 @Begin
215                 @Potential_commit_point_define: true
216                 @Label: Potential_Write_Trylock_Point
217                 @End
218         */
219         if (priorvalue == RW_LOCK_BIAS) {
220                 /**
221                         @Begin
222                         @Commit_point_define: true
223                         @Potential_commit_point_label: Potential_Write_Trylock_Point
224                         @Label: Write_Trylock_Succ_Point
225                         @End
226                 */
227                 return 1;
228         }
229         /**
230                 @Begin
231                 @Commit_point_define: true
232                 @Potential_commit_point_label: Potential_Write_Trylock_Point
233                 @Label: Write_Trylock_Fail_Point
234                 @End
235         */
236         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
237         return 0;
238 }
239
240 /**
241         @Begin
242         @Interface: Read_Unlock
243         @Commit_point_set: Read_Unlock_Point
244         @Check: reader_lock_cnt > 0 && !writer_lock_acquired
245         @Action: reader_lock_cnt--;
246         @End
247 */
248 static inline void read_unlock(rwlock_t *rw)
249 {
250         /**********  Admissibility  **********/
251         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
252         /**
253                 @Begin
254                 @Commit_point_define_check: true
255                 @Label: Read_Unlock_Point
256                 @End
257         */
258 }
259
260 /**
261         @Begin
262         @Interface: Write_Unlock
263         @Commit_point_set: Write_Unlock_Point | Write_Unlock_Clear
264         @Check: reader_lock_cnt == 0 && writer_lock_acquired
265         @Action: writer_lock_acquired = false;
266         @End
267 */
268 static inline void write_unlock(rwlock_t *rw)
269 {
270         /**********  Admissibility  **********/
271         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
272         /**
273                 @Begin
274                 @Commit_point_define_check: true
275                 @Label: Write_Unlock_Point
276                 @End
277         */
278
279         /**
280                 //@Begin
281                 @Commit_point_clear: true
282                 @Label: Write_Unlock_Clear
283                 @End
284         */
285 }
286
287 rwlock_t mylock;
288 int shareddata;
289
290 static void a(void *obj)
291 {
292         read_lock(&mylock);
293         //load_32(&shareddata);
294         //printf("%d\n", shareddata);
295         read_unlock(&mylock);
296         
297         write_lock(&mylock);
298         //store_32(&shareddata,(unsigned int)i);
299         shareddata = 47;
300         write_unlock(&mylock);
301 }
302
303 static void b(void *obj)
304 {
305         if (read_trylock(&mylock) == 1) {
306                 //printf("%d\n", shareddata);
307                 read_unlock(&mylock);
308         }
309         
310         if (write_trylock(&mylock) == 1) {
311                 //shareddata = 47;
312                 write_unlock(&mylock);
313         }
314 }
315
316 int user_main(int argc, char **argv)
317 {
318         /**
319                 @Begin
320                 @Entry_point
321                 @End
322         */
323         thrd_t t1, t2;
324         atomic_init(&mylock.lock, RW_LOCK_BIAS);
325
326         thrd_create(&t1, (thrd_start_t)&a, NULL);
327         thrd_create(&t2, (thrd_start_t)&b, NULL);
328
329         thrd_join(t1);
330         thrd_join(t2);
331
332         return 0;
333 }