7 #define RW_LOCK_BIAS 0x00100000
8 #define WRITE_LOCK_CMP RW_LOCK_BIAS
10 /** Example implementation of linux rw lock along with 2 thread test
15 1. At most 1 thread can acquire the write lock, and at the same time, no
16 other threads can acquire any lock (including read/write lock).
17 2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
18 3. A read_unlock release 1 read lock, and a write_unlock release the write
19 lock. They can not release a lock that they don't acquire.
21 4. Read_lock and write_lock can not be grabbed at the same time.
22 5. Happpens-before relationship should be checked and guaranteed, which
23 should be as the following:
24 a. read_unlock hb-> write_lock
25 b. write_unlock hb-> write_lock
26 c. write_unlock hb-> read_lock
30 Interesting point for locks:
31 a. If the users unlock() before any lock(), then the model checker will fail.
32 For this case, we can not say that the data structure is buggy, how can we
33 tell them from a real data structure bug???
34 b. We should specify that for a specific thread, successful locks and
35 unlocks should always come in pairs. We could make this check as an
36 auxiliary check since it is an extra rule for how the interfaces should called.
43 bool writer_lock_acquired;
46 writer_lock_acquired = false;
50 # Since commit_point_set has no ID attached, A -> B means that for any B,
51 # the previous A happens before B.
52 Read_Unlock -> Write_Lock
53 Read_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
55 Write_Unlock -> Write_Lock
56 Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
58 Write_Unlock -> Read_Lock
59 Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
70 static inline int read_can_lock(rwlock_t *lock)
72 return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
75 static inline int write_can_lock(rwlock_t *lock)
77 return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
85 Read_Lock_Success_1 | Read_Lock_Success_2
87 !__sequential.writer_lock_acquired
90 __sequential.reader_lock_cnt++;
93 static inline void read_lock(rwlock_t *rw)
95 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
98 @Commit_point_define_check: __ATOMIC_RET__ > 0
99 @Label:Read_Lock_Success_1
102 while (priorvalue <= 0) {
103 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
104 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
107 priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
110 @Commit_point_define_check: __ATOMIC_RET__ > 0
111 @Label:Read_Lock_Success_2
120 @Interface: Write_Lock
122 Write_Lock_Success_1 | Write_Lock_Success_2
124 !__sequential.writer_lock_acquired && __sequential.reader_lock_cnt == 0
127 __sequential.writer_lock_acquired = true;
130 static inline void write_lock(rwlock_t *rw)
132 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
135 @Commit_point_define_check: __ATOMIC_RET__ == RW_LOCK_BIAS
136 @Label: Write_Lock_Success_1
139 while (priorvalue != RW_LOCK_BIAS) {
140 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
141 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
144 priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
147 @Commit_point_define_check: __ATOMIC_RET__ == RW_LOCK_BIAS
148 @Label: Write_Lock_Success_2
156 @Interface: Read_Trylock
160 __sequential.writer_lock_acquired == false
162 HB_Read_Trylock_Succ :: __RET__ == 1
166 __sequential.reader_lock_cnt++;
168 __COND_SAY__ ? __RET__ == 1 : __RET__ == 0
171 static inline int read_trylock(rwlock_t *rw)
173 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
176 @Commit_point_define_check: true
177 @Label:Read_Trylock_Point
183 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
189 @Interface: Write_Trylock
193 !__sequential.writer_lock_acquired && __sequential.reader_lock_cnt == 0
195 HB_Write_Trylock_Succ :: __RET__ == 1
199 __sequential.writer_lock_acquired = true;
201 __COND_SAY__ ? __RET__ == 1 : __RET__ == 0
204 static inline int write_trylock(rwlock_t *rw)
206 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
209 @Commit_point_define_check: true
210 @Label: Write_Trylock_Point
213 if (priorvalue == RW_LOCK_BIAS)
216 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
222 @Interface: Read_Unlock
223 @Commit_point_set: Read_Unlock_Point
225 __sequential.reader_lock_cnt > 0 && !__sequential.writer_lock_acquired
231 static inline void read_unlock(rwlock_t *rw)
233 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
236 @Commit_point_define_check: true
237 @Label: Read_Unlock_Point
244 @Interface: Write_Unlock
245 @Commit_point_set: Write_Unlock_Point
247 reader_lock_cnt == 0 && writer_lock_acquired
250 __sequential.writer_lock_acquired = false;
254 static inline void write_unlock(rwlock_t *rw)
256 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
259 @Commit_point_define_check: true
260 @Label: Write_Unlock_Point
268 static void a(void *obj)
271 for(i = 0; i < 2; i++) {
274 load_32(&shareddata);
275 read_unlock(&mylock);
278 store_32(&shareddata,(unsigned int)i);
279 write_unlock(&mylock);
284 int user_main(int argc, char **argv)
287 atomic_init(&mylock.lock, RW_LOCK_BIAS);
289 thrd_create(&t1, (thrd_start_t)&a, NULL);
290 thrd_create(&t2, (thrd_start_t)&a, NULL);