add yield support
[model-checker.git] / test / linuxrwlocksyield.c
1 #include <stdio.h>
2 #include <threads.h>
3 #include <stdatomic.h>
4
5 #include "librace.h"
6
7 #define RW_LOCK_BIAS            0x00100000
8 #define WRITE_LOCK_CMP          RW_LOCK_BIAS
9
10 /** Example implementation of linux rw lock along with 2 thread test
11  *  driver... */
12
13 typedef union {
14         atomic_int lock;
15 } rwlock_t;
16
17 static inline int read_can_lock(rwlock_t *lock)
18 {
19         return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
20 }
21
22 static inline int write_can_lock(rwlock_t *lock)
23 {
24         return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
25 }
26
27 static inline void read_lock(rwlock_t *rw)
28 {
29         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
30         while (priorvalue <= 0) {
31                 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
32                 do {
33                         priorvalue = atomic_load_explicit(&rw->lock, memory_order_relaxed);
34                         if (priorvalue > 0)
35                                 break;
36                         thrd_yield();
37                 } while (true);
38                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
39         }
40 }
41
42 static inline void write_lock(rwlock_t *rw)
43 {
44         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
45         while (priorvalue != RW_LOCK_BIAS) {
46                 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
47                 do {
48                         priorvalue = atomic_load_explicit(&rw->lock, memory_order_relaxed);
49                         if (priorvalue == RW_LOCK_BIAS)
50                                 break;
51                         thrd_yield();
52                 } while (true);
53                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
54         }
55 }
56
57 static inline int read_trylock(rwlock_t *rw)
58 {
59         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
60         if (priorvalue > 0)
61                 return 1;
62
63         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
64         return 0;
65 }
66
67 static inline int write_trylock(rwlock_t *rw)
68 {
69         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
70         if (priorvalue == RW_LOCK_BIAS)
71                 return 1;
72
73         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
74         return 0;
75 }
76
77 static inline void read_unlock(rwlock_t *rw)
78 {
79         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
80 }
81
82 static inline void write_unlock(rwlock_t *rw)
83 {
84         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
85 }
86
87 rwlock_t mylock;
88 int shareddata;
89
90 static void a(void *obj)
91 {
92         int i;
93         for(i = 0; i < 2; i++) {
94                 if ((i % 2) == 0) {
95                         read_lock(&mylock);
96                         load_32(&shareddata);
97                         read_unlock(&mylock);
98                 } else {
99                         write_lock(&mylock);
100                         store_32(&shareddata,(unsigned int)i);
101                         write_unlock(&mylock);
102                 }
103         }
104 }
105
106 int user_main(int argc, char **argv)
107 {
108         thrd_t t1, t2;
109         atomic_init(&mylock.lock, RW_LOCK_BIAS);
110
111         thrd_create(&t1, (thrd_start_t)&a, NULL);
112         thrd_create(&t2, (thrd_start_t)&a, NULL);
113
114         thrd_join(t1);
115         thrd_join(t2);
116
117         return 0;
118 }