+#include <stdio.h>
+#include <threads.h>
+#include <stdatomic.h>
+
+#include <spec_lib.h>
+#include <stdlib.h>
+#include <cdsannotate.h>
+#include <specannotation.h>
+#include <model_memory.h>
+#include "common.h"
+
+#include "librace.h"
+
+#define RW_LOCK_BIAS 0x00100000
+#define WRITE_LOCK_CMP RW_LOCK_BIAS
+
+typedef union {
+ atomic_int lock;
+} rwlock_t;
+
+/** Example implementation of linux rw lock along with 2 thread test
+ * driver... */
+
+/**
+ Properties to check:
+ 1. At most 1 thread can acquire the write lock, and at the same time, no
+ other threads can acquire any lock (including read/write lock).
+ 2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
+ 3. A read_unlock release 1 read lock, and a write_unlock release the write
+ lock. They can not release a lock that they don't acquire.
+ ###
+ 4. Read_lock and write_lock can not be grabbed at the same time.
+ 5. Happpens-before relationship should be checked and guaranteed, which
+ should be as the following:
+ a. read_unlock hb-> write_lock
+ b. write_unlock hb-> write_lock
+ c. write_unlock hb-> read_lock
+*/
+
+/**
+ Interesting point for locks:
+ a. If the users unlock() before any lock(), then the model checker will fail.
+ For this case, we can not say that the data structure is buggy, how can we
+ tell them from a real data structure bug???
+ b. We should specify that for a specific thread, successful locks and
+ unlocks should always come in pairs. We could make this check as an
+ auxiliary check since it is an extra rule for how the interfaces should called.
+*/
+
+/**
+ @Begin
+ @Options:
+ LANG = C;
+ @Global_define:
+ @DeclareVar:
+ bool writer_lock_acquired;
+ int reader_lock_cnt;
+ @InitVar:
+ writer_lock_acquired = false;
+ reader_lock_cnt = 0;
+ @Happens_before:
+ # Since commit_point_set has no ID attached, A -> B means that for any B,
+ # the previous A happens before B.
+ #Read_Unlock -> Read_Lock
+ Read_Unlock -> Write_Lock
+ Read_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
+ #Read_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
+
+ Write_Unlock -> Write_Lock
+ Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
+
+ Write_Unlock -> Read_Lock
+ Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
+
+ @Commutativity: Read_Lock <-> Read_Lock: true
+ @Commutativity: Read_Lock <-> Read_Unlock: true
+ @Commutativity: Read_Lock <-> Read_Trylock: true
+ @Commutativity: Read_Lock <-> Write_Trylock: !_Method2.__RET__
+
+ @Commutativity: Read_Unlock <-> Read_Unlock: true
+ @Commutativity: Read_Unlock <-> Read_Trylock: true
+ @Commutativity: Read_Unlock <-> Write_Trylock: !_Method2.__RET__
+
+ @Commutativity: Read_Trylock <-> Read_Trylock: true
+ @Commutativity: Read_Trylock <-> Write_Trylock: !_Method1.__RET__ || !_Method2.__RET__
+ @Commutativity: Read_Trylock <-> Write_Lock: !_Method1.__RET__
+ @Commutativity: Read_Trylock <-> Write_Unlock: !_Method1.__RET__
+
+ @Commutativity: Write_Trylock <-> Write_Trylock: !_Method1.__RET__ || !_Method2.__RET__
+ @Commutativity: Write_Trylock <-> Write_Unlock: !_Method1.__RET__
+ @Commutativity: Write_Trylock <-> Write_Lock: !_Method1.__RET__
+ @End
+*/
+
+static inline int read_can_lock(rwlock_t *lock)
+{
+ return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
+}
+
+static inline int write_can_lock(rwlock_t *lock)
+{
+ return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
+}
+
+
+/**
+ @Begin
+ @Interface: Read_Lock
+ @Commit_point_set: Read_Lock_Success_1 | Read_Lock_Success_2
+ @Check: !writer_lock_acquired
+ @Action: reader_lock_cnt++;
+ @End
+*/
+static inline void read_lock(rwlock_t *rw)
+{
+ int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
+ /**
+ @Begin
+ @Commit_point_define_check: priorvalue > 0
+ @Label:Read_Lock_Success_1
+ @End
+ */
+ while (priorvalue <= 0) {
+ atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
+ while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
+ thrd_yield();
+ }
+ priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
+ /**
+ @Begin
+ @Commit_point_define_check: priorvalue > 0
+ @Label:Read_Lock_Success_2
+ @End
+ */
+ }
+}
+
+
+/**
+ @Begin
+ @Interface: Write_Lock
+ @Commit_point_set: Write_Lock_Success_1 | Write_Lock_Success_2
+ @Check: !writer_lock_acquired && reader_lock_cnt == 0
+ @Action: writer_lock_acquired = true;
+ @End
+*/
+static inline void write_lock(rwlock_t *rw)
+{
+ int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
+ /**
+ @Begin
+ @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
+ @Label: Write_Lock_Success_1
+ @End
+ */
+ while (priorvalue != RW_LOCK_BIAS) {
+ atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
+ while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
+ thrd_yield();
+ }
+ priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
+ /**
+ @Begin
+ @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
+ @Label: Write_Lock_Success_2
+ @End
+ */
+ }
+}
+
+/**
+ @Begin
+ @Interface: Read_Trylock
+ @Commit_point_set: Read_Trylock_Succ_Point | Read_Trylock_Fail_Point
+ //@Condition: writer_lock_acquired == false
+ @HB_condition: HB_Read_Trylock_Succ :: __RET__ == 1
+ @Action:
+ if (__RET__)
+ reader_lock_cnt++;
+ @Post_check: __RET__ == !writer_lock_acquired || !__RET__
+ @End
+*/
+static inline int read_trylock(rwlock_t *rw)
+{
+ int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
+ /**
+ @Begin
+ @Potential_commit_point_define: true
+ @Label: Potential_Read_Trylock_Point
+ @End
+ */
+ if (priorvalue > 0) {
+ /**
+ @Begin
+ @Commit_point_define: true
+ @Potential_commit_point_label: Potential_Read_Trylock_Point
+ @Label: Read_Trylock_Succ_Point
+ @End
+ */
+ return 1;
+ }
+ /**
+ @Begin
+ @Commit_point_define: true
+ @Potential_commit_point_label: Potential_Read_Trylock_Point
+ @Label: Read_Trylock_Fail_Point
+ @End
+ */
+ atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
+ return 0;
+}
+
+/**
+ @Begin
+ @Interface: Write_Trylock
+ @Commit_point_set: Write_Trylock_Succ_Point | Write_Trylock_Fail_Point
+ @HB_condition: HB_Write_Trylock_Succ :: __RET__ == 1
+ @Action:
+ if (__RET__ == 1)
+ writer_lock_acquired = true;
+ @End
+*/
+static inline int write_trylock(rwlock_t *rw)
+{
+ int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
+ //model_print("write_trylock: priorvalue: %d\n", priorvalue);
+ /**
+ @Begin
+ @Potential_commit_point_define: true
+ @Label: Potential_Write_Trylock_Point
+ @End
+ */
+ if (priorvalue == RW_LOCK_BIAS) {
+ /**
+ @Begin
+ @Commit_point_define: true
+ @Potential_commit_point_label: Potential_Write_Trylock_Point
+ @Label: Write_Trylock_Succ_Point
+ @End
+ */
+ return 1;
+ }
+ /**
+ @Begin
+ @Commit_point_define: true
+ @Potential_commit_point_label: Potential_Write_Trylock_Point
+ @Label: Write_Trylock_Fail_Point
+ @End
+ */
+ atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
+ return 0;
+}
+
+/**
+ @Begin
+ @Interface: Read_Unlock
+ @Commit_point_set: Read_Unlock_Point
+ @Check: reader_lock_cnt > 0 && !writer_lock_acquired
+ @Action: reader_lock_cnt--;
+ @End
+*/
+static inline void read_unlock(rwlock_t *rw)
+{
+ /********** Admissibility **********/
+ atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
+ /**
+ @Begin
+ @Commit_point_define_check: true
+ @Label: Read_Unlock_Point
+ @End
+ */
+}
+
+/**
+ @Begin
+ @Interface: Write_Unlock
+ @Commit_point_set: Write_Unlock_Point | Write_Unlock_Clear
+ @Check: reader_lock_cnt == 0 && writer_lock_acquired
+ @Action: writer_lock_acquired = false;
+ @End
+*/
+static inline void write_unlock(rwlock_t *rw)
+{
+ /********** Admissibility **********/
+ atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
+ /**
+ @Begin
+ @Commit_point_define_check: true
+ @Label: Write_Unlock_Point
+ @End
+ */
+
+ /**
+ //@Begin
+ @Commit_point_clear: true
+ @Label: Write_Unlock_Clear
+ @End
+ */
+}
+
+rwlock_t mylock;
+int shareddata;
+
+static void a(void *obj)
+{
+ write_lock(&mylock);
+ //store_32(&shareddata,(unsigned int)i);
+ shareddata = 47;
+ write_unlock(&mylock);
+}
+
+static void b(void *obj)
+{
+ if (write_trylock(&mylock)) {
+ //store_32(&shareddata,(unsigned int)i);
+ shareddata = 47;
+ write_unlock(&mylock);
+ }
+
+ read_lock(&mylock);
+ //load_32(&shareddata);
+ //printf("%d\n", shareddata);
+ read_unlock(&mylock);
+}
+
+int user_main(int argc, char **argv)
+{
+ /**
+ @Begin
+ @Entry_point
+ @End
+ */
+ thrd_t t1, t2;
+ atomic_init(&mylock.lock, RW_LOCK_BIAS);
+
+ thrd_create(&t1, (thrd_start_t)&a, NULL);
+ thrd_create(&t2, (thrd_start_t)&b, NULL);
+
+ thrd_join(t1);
+ thrd_join(t2);
+
+ return 0;
+}