7 #include <cdsannotate.h>
8 #include <specannotation.h>
9 #include <model_memory.h>
14 #define RW_LOCK_BIAS 0x00100000
15 #define WRITE_LOCK_CMP RW_LOCK_BIAS
21 /** Example implementation of linux rw lock along with 2 thread test
26 1. At most 1 thread can acquire the write lock, and at the same time, no
27 other threads can acquire any lock (including read/write lock).
28 2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
29 3. A read_unlock release 1 read lock, and a write_unlock release the write
30 lock. They can not release a lock that they don't acquire.
32 4. Read_lock and write_lock can not be grabbed at the same time.
33 5. Happpens-before relationship should be checked and guaranteed, which
34 should be as the following:
35 a. read_unlock hb-> write_lock
36 b. write_unlock hb-> write_lock
37 c. write_unlock hb-> read_lock
41 Interesting point for locks:
42 a. If the users unlock() before any lock(), then the model checker will fail.
43 For this case, we can not say that the data structure is buggy, how can we
44 tell them from a real data structure bug???
45 b. We should specify that for a specific thread, successful locks and
46 unlocks should always come in pairs. We could make this check as an
47 auxiliary check since it is an extra rule for how the interfaces should called.
56 bool writer_lock_acquired;
59 writer_lock_acquired = false;
62 # Since commit_point_set has no ID attached, A -> B means that for any B,
63 # the previous A happens before B.
64 #Read_Unlock -> Read_Lock
65 Read_Unlock -> Write_Lock
66 Read_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
67 #Read_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
69 Write_Unlock -> Write_Lock
70 Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
72 Write_Unlock -> Read_Lock
73 Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
75 @Commutativity: Read_Lock <-> Read_Lock: true
76 @Commutativity: Read_Lock <-> Read_Unlock: true
77 @Commutativity: Read_Lock <-> Read_Trylock: true
78 @Commutativity: Read_Lock <-> Write_Trylock: !_Method2.__RET__
80 @Commutativity: Read_Unlock <-> Read_Unlock: true
81 @Commutativity: Read_Unlock <-> Read_Trylock: true
82 @Commutativity: Read_Unlock <-> Write_Trylock: !_Method2.__RET__
84 @Commutativity: Read_Trylock <-> Read_Trylock: true
85 @Commutativity: Read_Trylock <-> Write_Trylock: !_Method1.__RET__ || !_Method2.__RET__
86 @Commutativity: Read_Trylock <-> Write_Lock: !_Method1.__RET__
87 @Commutativity: Read_Trylock <-> Write_Unlock: !_Method1.__RET__
89 @Commutativity: Write_Trylock <-> Write_Trylock: !_Method1.__RET__ || !_Method2.__RET__
90 @Commutativity: Write_Trylock <-> Write_Unlock: !_Method1.__RET__
91 @Commutativity: Write_Trylock <-> Write_Lock: !_Method1.__RET__
95 static inline int read_can_lock(rwlock_t *lock)
97 return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
100 static inline int write_can_lock(rwlock_t *lock)
102 return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
108 @Interface: Read_Lock
109 @Commit_point_set: Read_Lock_Success_1 | Read_Lock_Success_2
110 @Check: !writer_lock_acquired
111 @Action: reader_lock_cnt++;
114 static inline void read_lock(rwlock_t *rw)
116 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
119 @Commit_point_define_check: priorvalue > 0
120 @Label:Read_Lock_Success_1
123 while (priorvalue <= 0) {
124 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
125 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
128 priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
131 @Commit_point_define_check: priorvalue > 0
132 @Label:Read_Lock_Success_2
141 @Interface: Write_Lock
142 @Commit_point_set: Write_Lock_Success_1 | Write_Lock_Success_2
143 @Check: !writer_lock_acquired && reader_lock_cnt == 0
144 @Action: writer_lock_acquired = true;
147 static inline void write_lock(rwlock_t *rw)
149 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
152 @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
153 @Label: Write_Lock_Success_1
156 while (priorvalue != RW_LOCK_BIAS) {
157 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
158 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
161 priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
164 @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
165 @Label: Write_Lock_Success_2
173 @Interface: Read_Trylock
174 @Commit_point_set: Read_Trylock_Succ_Point | Read_Trylock_Fail_Point
175 //@Condition: writer_lock_acquired == false
176 @HB_condition: HB_Read_Trylock_Succ :: __RET__ == 1
180 @Post_check: __RET__ == !writer_lock_acquired || !__RET__
183 static inline int read_trylock(rwlock_t *rw)
185 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
188 @Potential_commit_point_define: true
189 @Label: Potential_Read_Trylock_Point
192 if (priorvalue > 0) {
195 @Commit_point_define: true
196 @Potential_commit_point_label: Potential_Read_Trylock_Point
197 @Label: Read_Trylock_Succ_Point
204 @Commit_point_define: true
205 @Potential_commit_point_label: Potential_Read_Trylock_Point
206 @Label: Read_Trylock_Fail_Point
209 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
215 @Interface: Write_Trylock
216 @Commit_point_set: Write_Trylock_Succ_Point | Write_Trylock_Fail_Point
217 @HB_condition: HB_Write_Trylock_Succ :: __RET__ == 1
220 writer_lock_acquired = true;
223 static inline int write_trylock(rwlock_t *rw)
225 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
226 //model_print("write_trylock: priorvalue: %d\n", priorvalue);
229 @Potential_commit_point_define: true
230 @Label: Potential_Write_Trylock_Point
233 if (priorvalue == RW_LOCK_BIAS) {
236 @Commit_point_define: true
237 @Potential_commit_point_label: Potential_Write_Trylock_Point
238 @Label: Write_Trylock_Succ_Point
245 @Commit_point_define: true
246 @Potential_commit_point_label: Potential_Write_Trylock_Point
247 @Label: Write_Trylock_Fail_Point
250 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
256 @Interface: Read_Unlock
257 @Commit_point_set: Read_Unlock_Point
258 @Check: reader_lock_cnt > 0 && !writer_lock_acquired
259 @Action: reader_lock_cnt--;
262 static inline void read_unlock(rwlock_t *rw)
264 /********** Admissibility **********/
265 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
268 @Commit_point_define_check: true
269 @Label: Read_Unlock_Point
276 @Interface: Write_Unlock
277 @Commit_point_set: Write_Unlock_Point | Write_Unlock_Clear
278 @Check: reader_lock_cnt == 0 && writer_lock_acquired
279 @Action: writer_lock_acquired = false;
282 static inline void write_unlock(rwlock_t *rw)
284 /********** Admissibility **********/
285 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
288 @Commit_point_define_check: true
289 @Label: Write_Unlock_Point
295 @Commit_point_clear: true
296 @Label: Write_Unlock_Clear
304 static void a(void *obj)
307 //store_32(&shareddata,(unsigned int)i);
309 write_unlock(&mylock);
312 static void b(void *obj)
314 if (read_trylock(&mylock)) {
315 //load_32(&shareddata);
316 //printf("%d\n", shareddata);
317 read_unlock(&mylock);
321 int user_main(int argc, char **argv)
329 atomic_init(&mylock.lock, RW_LOCK_BIAS);
331 thrd_create(&t1, (thrd_start_t)&a, NULL);
332 thrd_create(&t2, (thrd_start_t)&b, NULL);