7 #define RW_LOCK_BIAS 0x00100000
8 #define WRITE_LOCK_CMP RW_LOCK_BIAS
10 /** Example implementation of linux rw lock along with 2 thread test
15 1. At most 1 thread can acquire the write lock, and at the same time, no
16 other threads can acquire any lock (including read/write lock).
17 2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
18 3. A read_unlock release 1 read lock, and a write_unlock release the write
19 lock. They can not release a lock that they don't acquire.
21 4. Read_lock and write_lock can not be grabbed at the same time.
22 5. Happpens-before relationship should be checked and guaranteed, which
23 should be as the following:
24 a. read_unlock hb-> write_lock
25 b. write_unlock hb-> write_lock
26 c. write_unlock hb-> read_lock
30 Interesting point for locks:
31 a. If the users unlock() before any lock(), then the model checker will fail.
32 For this case, we can not say that the data structure is buggy, how can we
33 tell them from a real data structure bug???
34 b. We should specify that for a specific thread, successful locks and
35 unlocks should always come in pairs. We could make this check as an
36 auxiliary check since it is an extra rule for how the interfaces should called.
42 bool __writer_lock_acquired = false;
43 int __reader_lock_cnt = 0;
46 # Since commit_point_set has no ID attached, A -> B means that for any B,
47 # the previous A happens before B.
48 Read_Unlock -> Write_Lock
49 Read_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
51 Write_Unlock -> Write_Lock
52 Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
54 Write_Unlock -> Read_Lock
55 Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
66 static inline int read_can_lock(rwlock_t *lock)
68 return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
71 static inline int write_can_lock(rwlock_t *lock)
73 return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
81 Read_Lock_Success_1 | Read_Lock_Success_2
83 !__writer_lock_acquired
89 static inline void read_lock(rwlock_t *rw)
91 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
94 @Commit_point_define_check: __ATOMIC_RET__ > 0
95 @Label:Read_Lock_Success_1
98 while (priorvalue <= 0) {
99 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
100 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
103 priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
106 @Commit_point_define_check: __ATOMIC_RET__ > 0
107 @Label:Read_Lock_Success_2
116 @Interface: Write_Lock
118 Write_Lock_Success_1 | Write_Lock_Success_2
120 !__writer_lock_acquired && __reader_lock_cnt == 0
123 __writer_lock_acquired = true;
126 static inline void write_lock(rwlock_t *rw)
128 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
131 @Commit_point_define_check: __ATOMIC_RET__ == RW_LOCK_BIAS
132 @Label: Write_Lock_Success_1
135 while (priorvalue != RW_LOCK_BIAS) {
136 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
137 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
140 priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
143 @Commit_point_define_check: __ATOMIC_RET__ == RW_LOCK_BIAS
144 @Label: Write_Lock_Success_2
152 @Interface: Read_Trylock
156 __writer_lock_acquired == false
158 HB_Read_Trylock_Succ :: __RET__ == 1
164 COND_SAT ? __RET__ == 1 : __RET__ == 0
167 static inline int read_trylock(rwlock_t *rw)
169 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
172 @Commit_point_define_check: true
173 @Label:Read_Trylock_Point
179 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
185 @Interface: Write_Trylock
189 !__writer_lock_acquired && __reader_lock_cnt == 0
191 HB_Write_Trylock_Succ :: __RET__ == 1
195 __writer_lock_acquired = true;
197 COND_SAT ? __RET__ == 1 : __RET__ == 0
200 static inline int write_trylock(rwlock_t *rw)
202 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
205 @Commit_point_define_check: true
206 @Label: Write_Trylock_Point
209 if (priorvalue == RW_LOCK_BIAS)
212 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
218 @Interface: Read_Unlock
219 @Commit_point_set: Read_Unlock_Point
221 __reader_lock_cnt > 0 && !__writer_lock_acquired
227 static inline void read_unlock(rwlock_t *rw)
229 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
232 @Commit_point_define_check: true
233 @Label: Read_Unlock_Point
240 @Interface: Write_Unlock
241 @Commit_point_set: Write_Unlock_Point
243 reader_lock_cnt == 0 && writer_lock_acquired
246 __writer_lock_acquired = false;
250 static inline void write_unlock(rwlock_t *rw)
252 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
255 @Commit_point_define_check: true
256 @Label: Write_Unlock_Point
264 static void a(void *obj)
267 for(i = 0; i < 2; i++) {
270 load_32(&shareddata);
271 read_unlock(&mylock);
274 store_32(&shareddata,(unsigned int)i);
275 write_unlock(&mylock);
280 int user_main(int argc, char **argv)
283 atomic_init(&mylock.lock, RW_LOCK_BIAS);
285 thrd_create(&t1, (thrd_start_t)&a, NULL);
286 thrd_create(&t2, (thrd_start_t)&a, NULL);