7 #include <cdsannotate.h>
8 #include <specannotation.h>
9 #include <model_memory.h>
14 #define RW_LOCK_BIAS 0x00100000
15 #define WRITE_LOCK_CMP RW_LOCK_BIAS
21 /** Example implementation of linux rw lock along with 2 thread test
26 1. At most 1 thread can acquire the write lock, and at the same time, no
27 other threads can acquire any lock (including read/write lock).
28 2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
29 3. A read_unlock release 1 read lock, and a write_unlock release the write
30 lock. They can not release a lock that they don't acquire.
32 4. Read_lock and write_lock can not be grabbed at the same time.
33 5. Happpens-before relationship should be checked and guaranteed, which
34 should be as the following:
35 a. read_unlock hb-> write_lock
36 b. write_unlock hb-> write_lock
37 c. write_unlock hb-> read_lock
41 Interesting point for locks:
42 a. If the users unlock() before any lock(), then the model checker will fail.
43 For this case, we can not say that the data structure is buggy, how can we
44 tell them from a real data structure bug???
45 b. We should specify that for a specific thread, successful locks and
46 unlocks should always come in pairs. We could make this check as an
47 auxiliary check since it is an extra rule for how the interfaces should called.
56 bool writer_lock_acquired;
59 writer_lock_acquired = false;
62 # Since commit_point_set has no ID attached, A -> B means that for any B,
63 # the previous A happens before B.
64 Read_Unlock -> Read_Lock
65 Read_Unlock -> Write_Lock
66 Read_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
67 Read_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
69 Write_Unlock -> Write_Lock
70 Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
72 Write_Unlock -> Read_Lock
73 Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
77 static inline int read_can_lock(rwlock_t *lock)
79 return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
82 static inline int write_can_lock(rwlock_t *lock)
84 return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
91 @Commit_point_set: Read_Lock_Success_1 | Read_Lock_Success_2
92 @Check: !writer_lock_acquired
93 @Action: reader_lock_cnt++;
96 static inline void read_lock(rwlock_t *rw)
98 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
101 @Commit_point_define_check: priorvalue > 0
102 @Label:Read_Lock_Success_1
105 while (priorvalue <= 0) {
106 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
107 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
110 priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
113 @Commit_point_define_check: priorvalue > 0
114 @Label:Read_Lock_Success_2
123 @Interface: Write_Lock
124 @Commit_point_set: Write_Lock_Success_1 | Write_Lock_Success_2
125 @Check: !writer_lock_acquired && reader_lock_cnt == 0
126 @Action: writer_lock_acquired = true;
129 static inline void write_lock(rwlock_t *rw)
131 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
134 @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
135 @Label: Write_Lock_Success_1
138 while (priorvalue != RW_LOCK_BIAS) {
139 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
140 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
143 priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
146 @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
147 @Label: Write_Lock_Success_2
155 @Interface: Read_Trylock
156 @Commit_point_set: Read_Trylock_Succ_Point | Read_Trylock_Fail_Point
157 //@Condition: writer_lock_acquired == false
158 @HB_condition: HB_Read_Trylock_Succ :: __RET__ == 1
162 @Post_check: __COND_SAT__ ? __RET__ == 1 : __RET__ == 0
165 static inline int read_trylock(rwlock_t *rw)
167 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
170 @Potential_commit_point_define: true
171 @Label: Potential_Read_Trylock_Point
174 if (priorvalue > 0) {
177 @Commit_point_define: true
178 @Potential_commit_point_label: Potential_Read_Trylock_Point
179 @Label: Read_Trylock_Succ_Point
186 @Commit_point_define: true
187 @Potential_commit_point_label: Potential_Read_Trylock_Point
188 @Label: Read_Trylock_Fail_Point
191 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
197 @Interface: Write_Trylock
198 @Commit_point_set: Write_Trylock_Succ_Point | Write_Trylock_Fail_Point
199 @HB_condition: HB_Write_Trylock_Succ :: __RET__ == 1
202 writer_lock_acquired = true;
205 static inline int write_trylock(rwlock_t *rw)
207 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
208 //model_print("write_trylock: priorvalue: %d\n", priorvalue);
211 @Potential_commit_point_define: true
212 @Label: Potential_Write_Trylock_Point
215 if (priorvalue == RW_LOCK_BIAS) {
218 @Commit_point_define: true
219 @Potential_commit_point_label: Potential_Write_Trylock_Point
220 @Label: Write_Trylock_Succ_Point
227 @Commit_point_define: true
228 @Potential_commit_point_label: Potential_Write_Trylock_Point
229 @Label: Write_Trylock_Fail_Point
232 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
238 @Interface: Read_Unlock
239 @Commit_point_set: Read_Unlock_Point
240 @Check: reader_lock_cnt > 0 && !writer_lock_acquired
241 @Action: reader_lock_cnt--;
244 static inline void read_unlock(rwlock_t *rw)
246 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
249 @Commit_point_define_check: true
250 @Label: Read_Unlock_Point
257 @Interface: Write_Unlock
258 @Commit_point_set: Write_Unlock_Point | Write_Unlock_Clear
259 @Check: reader_lock_cnt == 0 && writer_lock_acquired
260 @Action: writer_lock_acquired = false;
263 static inline void write_unlock(rwlock_t *rw)
265 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
268 @Commit_point_define_check: true
269 @Label: Write_Unlock_Point
275 @Commit_point_clear: true
276 @Label: Write_Unlock_Clear
284 static void a(void *obj)
287 for(i = 0; i < 2; i++) {
290 //load_32(&shareddata);
291 //printf("%d\n", shareddata);
292 read_unlock(&mylock);
295 //store_32(&shareddata,(unsigned int)i);
297 write_unlock(&mylock);
302 static void b(void *obj)
305 for(i = 0; i < 2; i++) {
307 if (read_trylock(&mylock) == 1) {
308 //printf("%d\n", shareddata);
309 read_unlock(&mylock);
312 if (write_trylock(&mylock) == 1) {
314 write_unlock(&mylock);
320 int user_main(int argc, char **argv)
328 atomic_init(&mylock.lock, RW_LOCK_BIAS);
330 thrd_create(&t1, (thrd_start_t)&a, NULL);
331 thrd_create(&t2, (thrd_start_t)&b, NULL);