7 #define RW_LOCK_BIAS 0x00100000
8 #define WRITE_LOCK_CMP RW_LOCK_BIAS
14 /** Example implementation of linux rw lock along with 2 thread test
19 1. At most 1 thread can acquire the write lock, and at the same time, no
20 other threads can acquire any lock (including read/write lock).
21 2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
22 3. A read_unlock release 1 read lock, and a write_unlock release the write
23 lock. They can not release a lock that they don't acquire.
25 4. Read_lock and write_lock can not be grabbed at the same time.
26 5. Happpens-before relationship should be checked and guaranteed, which
27 should be as the following:
28 a. read_unlock hb-> write_lock
29 b. write_unlock hb-> write_lock
30 c. write_unlock hb-> read_lock
34 Interesting point for locks:
35 a. If the users unlock() before any lock(), then the model checker will fail.
36 For this case, we can not say that the data structure is buggy, how can we
37 tell them from a real data structure bug???
38 b. We should specify that for a specific thread, successful locks and
39 unlocks should always come in pairs. We could make this check as an
40 auxiliary check since it is an extra rule for how the interfaces should called.
47 bool writer_lock_acquired;
50 writer_lock_acquired = false;
53 # Since commit_point_set has no ID attached, A -> B means that for any B,
54 # the previous A happens before B.
55 Write_Unlock -> Write_Lock
56 Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
58 Write_Unlock -> Read_Lock
59 Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
63 static inline int read_can_lock(rwlock_t *lock)
65 return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
68 static inline int write_can_lock(rwlock_t *lock)
70 return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
78 Read_Lock_Success_1 | Read_Lock_Success_2
85 static inline void read_lock(rwlock_t *rw)
87 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
90 @Commit_point_define_check: priorvalue > 0
91 @Label:Read_Lock_Success_1
94 while (priorvalue <= 0) {
95 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
96 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
99 priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
102 @Commit_point_define_check: priorvalue > 0
103 @Label:Read_Lock_Success_2
112 @Interface: Write_Lock
114 Write_Lock_Success_1 | Write_Lock_Success_2
116 !writer_lock_acquired && reader_lock_cnt == 0
118 writer_lock_acquired = true;
121 static inline void write_lock(rwlock_t *rw)
123 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
126 @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
127 @Label: Write_Lock_Success_1
130 while (priorvalue != RW_LOCK_BIAS) {
131 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
132 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
135 priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
138 @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
139 @Label: Write_Lock_Success_2
147 @Interface: Read_Trylock
149 Read_Trylock_Succ_Point | Read_Trylock_Fail_Point
151 writer_lock_acquired == false
153 HB_Read_Trylock_Succ :: __RET__ == 1
158 __COND_SAT__ ? __RET__ == 1 : __RET__ == 0
161 static inline int read_trylock(rwlock_t *rw)
163 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
166 @Potential_commit_point_define: true
167 @Label: Potential_Read_Trylock_Point
170 if (priorvalue > 0) {
173 @Commit_point_define: true
174 @Potential_commit_point_label: Potential_Read_Trylock_Point
175 @Label: Read_Trylock_Succ_Point
182 @Commit_point_define: true
183 @Potential_commit_point_label: Potential_Read_Trylock_Point
184 @Label: Read_Trylock_Fail_Point
187 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
193 @Interface: Write_Trylock
195 Write_Trylock_Succ_Point | Write_Trylock_Fail_Point
197 !writer_lock_acquired && reader_lock_cnt == 0
199 HB_Write_Trylock_Succ :: __RET__ == 1
202 writer_lock_acquired = true;
204 __COND_SAT__ ? __RET__ == 1 : __RET__ == 0
207 static inline int write_trylock(rwlock_t *rw)
209 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
212 @Potential_commit_point_define: true
213 @Label: Potential_Write_Trylock_Point
216 if (priorvalue == RW_LOCK_BIAS) {
219 @Commit_point_define: true
220 @Potential_commit_point_label: Potential_Write_Trylock_Point
221 @Label: Write_Trylock_Succ_Point
228 @Commit_point_define: true
229 @Potential_commit_point_label: Potential_Write_Trylock_Point
230 @Label: Write_Trylock_Fail_Point
233 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
239 @Interface: Read_Unlock
240 @Commit_point_set: Read_Unlock_Point
242 reader_lock_cnt > 0 && !writer_lock_acquired
247 static inline void read_unlock(rwlock_t *rw)
249 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
252 @Commit_point_define_check: true
253 @Label: Read_Unlock_Point
260 @Interface: Write_Unlock
261 @Commit_point_set: Write_Unlock_Point
263 reader_lock_cnt == 0 && writer_lock_acquired
265 writer_lock_acquired = false;
268 static inline void write_unlock(rwlock_t *rw)
270 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
273 @Commit_point_define_check: true
274 @Label: Write_Unlock_Point
282 static void a(void *obj)
285 for(i = 0; i < 2; i++) {
288 load_32(&shareddata);
289 read_unlock(&mylock);
292 store_32(&shareddata,(unsigned int)i);
293 write_unlock(&mylock);
298 int user_main(int argc, char **argv)
306 atomic_init(&mylock.lock, RW_LOCK_BIAS);
308 thrd_create(&t1, (thrd_start_t)&a, NULL);
309 thrd_create(&t2, (thrd_start_t)&a, NULL);