0f52b66b65f5887ed5e0ee6955d079eb3ac97b51
[cdsspec-compiler.git] / benchmark / linuxrwlocks / linuxrwlocks.c
1 #include <stdio.h>
2 #include <threads.h>
3 #include <stdatomic.h>
4
5 #include "librace.h"
6
7 #define RW_LOCK_BIAS            0x00100000
8 #define WRITE_LOCK_CMP          RW_LOCK_BIAS
9
10 typedef union {
11         atomic_int lock;
12 } rwlock_t;
13
14 /** Example implementation of linux rw lock along with 2 thread test
15  *  driver... */
16
17 /**
18         Properties to check:
19         1. At most 1 thread can acquire the write lock, and at the same time, no
20                 other threads can acquire any lock (including read/write lock).
21         2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
22         3. A read_unlock release 1 read lock, and a write_unlock release the write
23         lock. They can not release a lock that they don't acquire.
24         ###
25         4. Read_lock and write_lock can not be grabbed at the same time.
26         5. Happpens-before relationship should be checked and guaranteed, which
27         should be as the following:
28                 a. read_unlock hb-> write_lock
29                 b. write_unlock hb-> write_lock
30                 c. write_unlock hb-> read_lock
31 */
32
33 /**
34         Interesting point for locks:
35         a. If the users unlock() before any lock(), then the model checker will fail.
36         For this case, we can not say that the data structure is buggy, how can we
37         tell them from a real data structure bug???
38         b. We should specify that for a specific thread, successful locks and
39         unlocks should always come in pairs. We could make this check as an
40         auxiliary check since it is an extra rule for how the interfaces should called.
41 */
42
43 /**
44         @Begin
45         @Global_define:
46                 @DeclareVar:
47                         bool writer_lock_acquired;
48                         int reader_lock_cnt;
49                 @InitVar:
50                         writer_lock_acquired = false;
51                         reader_lock_cnt = 0;
52         @Happens_before:
53                 # Since commit_point_set has no ID attached, A -> B means that for any B,
54                 # the previous A happens before B.
55                 Read_Unlock -> Write_Lock
56                 Read_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
57                 
58                 Write_Unlock -> Write_Lock
59                 Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
60
61                 Write_Unlock -> Read_Lock
62                 Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
63         @End
64 */
65
66 static inline int read_can_lock(rwlock_t *lock)
67 {
68         return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
69 }
70
71 static inline int write_can_lock(rwlock_t *lock)
72 {
73         return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
74 }
75
76
77 /**
78         @Begin
79         @Interface: Read_Lock
80         @Commit_point_set:
81                 Read_Lock_Success_1 | Read_Lock_Success_2
82         @Check:
83                 !writer_lock_acquired
84         @Action:
85                 reader_lock_cnt++;
86         @End
87 */
88 static inline void read_lock(rwlock_t *rw)
89 {
90         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
91         /**
92                 @Begin
93                 @Commit_point_define_check: priorvalue > 0
94                 @Label:Read_Lock_Success_1
95                 @End
96         */
97         while (priorvalue <= 0) {
98                 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
99                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
100                         thrd_yield();
101                 }
102                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
103                 /**
104                         @Begin
105                         @Commit_point_define_check: priorvalue > 0
106                         @Label:Read_Lock_Success_2
107                         @End
108                 */
109         }
110 }
111
112
113 /**
114         @Begin
115         @Interface: Write_Lock
116         @Commit_point_set:
117                 Write_Lock_Success_1 | Write_Lock_Success_2
118         @Check:
119                 !writer_lock_acquired && reader_lock_cnt == 0
120         @Action:
121                 writer_lock_acquired = true;
122         @End
123 */
124 static inline void write_lock(rwlock_t *rw)
125 {
126         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
127         /**
128                 @Begin
129                 @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
130                 @Label: Write_Lock_Success_1
131                 @End
132         */
133         while (priorvalue != RW_LOCK_BIAS) {
134                 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
135                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
136                         thrd_yield();
137                 }
138                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
139                 /**
140                         @Begin
141                         @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
142                         @Label: Write_Lock_Success_2
143                         @End
144                 */
145         }
146 }
147
148 /**
149         @Begin
150         @Interface: Read_Trylock
151         @Commit_point_set:
152                 Read_Trylock_Point
153         @Condition:
154                 writer_lock_acquired == false
155         @HB_condition:
156                 HB_Read_Trylock_Succ :: __RET__ == 1
157         @Action:
158                 if (__COND_SAT__)
159                         reader_lock_cnt++;
160         @Post_check:
161                 __COND_SAT__ ? __RET__ == 1 : __RET__ == 0
162         @End
163 */
164 static inline int read_trylock(rwlock_t *rw)
165 {
166         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
167         /**
168                 @Begin
169                 @Commit_point_define_check: true
170                 @Label:Read_Trylock_Point
171                 @End
172         */
173         if (priorvalue > 0)
174                 return 1;
175
176         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
177         return 0;
178 }
179
180 /**
181         @Begin
182         @Interface: Write_Trylock
183         @Commit_point_set:
184                 Write_Trylock_Point
185         @Condition:
186                 !writer_lock_acquired && reader_lock_cnt == 0
187         @HB_condition:
188                 HB_Write_Trylock_Succ :: __RET__ == 1
189         @Action:
190                 if (__COND_SAT__)
191                         writer_lock_acquired = true;
192         @Post_check:
193                 __COND_SAT__ ? __RET__ == 1 : __RET__ == 0
194         @End
195 */
196 static inline int write_trylock(rwlock_t *rw)
197 {
198         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
199         /**
200                 @Begin
201                 @Commit_point_define_check: true
202                 @Label: Write_Trylock_Point
203                 @End
204         */
205         if (priorvalue == RW_LOCK_BIAS)
206                 return 1;
207
208         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
209         return 0;
210 }
211
212 /**
213         @Begin
214         @Interface: Read_Unlock
215         @Commit_point_set: Read_Unlock_Point
216         @Check:
217                 reader_lock_cnt > 0 && !writer_lock_acquired
218         @Action: 
219                 reader_lock_cnt--;
220         @End
221 */
222 static inline void read_unlock(rwlock_t *rw)
223 {
224         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
225         /**
226                 @Begin
227                 @Commit_point_define_check: true
228                 @Label: Read_Unlock_Point
229                 @End
230         */
231 }
232
233 /**
234         @Begin
235         @Interface: Write_Unlock
236         @Commit_point_set: Write_Unlock_Point
237         @Check:
238                 reader_lock_cnt == 0 && writer_lock_acquired
239         @Action: 
240                 writer_lock_acquired = false;
241         @End
242 */
243 static inline void write_unlock(rwlock_t *rw)
244 {
245         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
246         /**
247                 @Begin
248                 @Commit_point_define_check: true
249                 @Label: Write_Unlock_Point
250                 @End
251         */
252 }
253
254 rwlock_t mylock;
255 int shareddata;
256
257 static void a(void *obj)
258 {
259         int i;
260         for(i = 0; i < 2; i++) {
261                 if ((i % 2) == 0) {
262                         read_lock(&mylock);
263                         load_32(&shareddata);
264                         read_unlock(&mylock);
265                 } else {
266                         write_lock(&mylock);
267                         store_32(&shareddata,(unsigned int)i);
268                         write_unlock(&mylock);
269                 }
270         }
271 }
272
273 int user_main(int argc, char **argv)
274 {
275         /**
276                 @Begin
277                 @Entry_point
278                 @End
279         */
280         thrd_t t1, t2;
281         atomic_init(&mylock.lock, RW_LOCK_BIAS);
282
283         thrd_create(&t1, (thrd_start_t)&a, NULL);
284         thrd_create(&t2, (thrd_start_t)&a, NULL);
285
286         thrd_join(t1);
287         thrd_join(t2);
288
289         return 0;
290 }