minor fix
[cdsspec-compiler.git] / benchmark / mcs-lock / mcs-lock.h
1 // mcs on stack
2
3 #include <stdatomic.h>
4 #include <unrelacy.h>
5
6
7
8
9 struct mcs_node {
10         std::atomic<mcs_node *> next;
11         std::atomic<int> gate;
12
13         mcs_node() {
14                 next.store(0);
15                 gate.store(0);
16         }
17 };
18
19
20 struct mcs_mutex {
21 public:
22         // tail is null when lock is not held
23         std::atomic<mcs_node *> m_tail;
24
25         mcs_mutex() {
26                 /**
27                         @Begin
28                 @Entry_point
29                         @End
30                 */
31                 m_tail.store( NULL );
32         }
33         ~mcs_mutex() {
34                 ASSERT( m_tail.load() == NULL );
35         }
36
37         // Each thread will have their own guard.
38         class guard {
39         public:
40                 mcs_mutex * m_t;
41                 mcs_node    m_node; // node held on the stack
42
43                 guard(mcs_mutex * t) : m_t(t) { t->lock(this); }
44                 ~guard() { m_t->unlock(this); }
45         };
46
47         /**
48                 @Begin
49                 @Options:
50                         LANG = CPP;
51                         CLASS = mcs_mutex;
52                 @Global_define:
53                         @DeclareVar:
54                                 bool _lock_acquired;
55                         @InitVar:
56                                 _lock_acquired = false;
57                 @Happens_before:
58                         Unlock -> Lock
59                 @End
60         */
61
62         /**
63                 @Begin
64                 @Interface: Lock
65                 @Commit_point_set: Lock_Enqueue_Point1 | Lock_Enqueue_Point2
66                 @Check:
67                         _lock_acquired == false;
68                 @Action:
69                         _lock_acquired = true;
70                 @End
71         */
72         void lock(guard * I) {
73                 mcs_node * me = &(I->m_node);
74
75                 // set up my node :
76                 // not published yet so relaxed :
77                 me->next.store(NULL, std::mo_relaxed );
78                 me->gate.store(1, std::mo_relaxed );
79
80                 // publish my node as the new tail :
81                 mcs_node * pred = m_tail.exchange(me, std::mo_acq_rel);
82                 /**
83                         @Begin
84                         @Commit_point_define_check: pred == NULL
85                         @Label: Lock_Enqueue_Point1
86                         @End
87                 */
88                 if ( pred != NULL ) {
89                         // (*1) race here
90                         // unlock of pred can see me in the tail before I fill next
91
92                         // publish me to previous lock-holder :
93                         pred->next.store(me, std::mo_release );
94
95                         // (*2) pred not touched any more       
96
97                         // now this is the spin -
98                         // wait on predecessor setting my flag -
99                         rl::linear_backoff bo;
100                         int my_gate = 1;
101                         while (my_gate ) {
102                                 my_gate = me->gate.load(std::mo_acquire);
103                                 /**
104                                         @Begin
105                                         @Commit_point_define_check: my_gate == 0
106                                         @Label: Lock_Enqueue_Point2
107                                         @End
108                                 */
109                                 thrd_yield();
110                         }
111                 }
112         }
113
114         /**
115                 @Begin
116                 @Interface: Unlock
117                 @Commit_point_set:
118                         Unlock_Point_Success_1 | Unlock_Point_Success_2
119                 @Check:
120                         _lock_acquired == true
121                 @Action:
122                         _lock_acquired = false;
123                 @End
124         */
125         void unlock(guard * I) {
126                 mcs_node * me = &(I->m_node);
127
128                 mcs_node * next = me->next.load(std::mo_acquire);
129                 if ( next == NULL )
130                 {
131                         mcs_node * tail_was_me = me;
132                         bool success;
133                         success = m_tail.compare_exchange_strong(
134                                 tail_was_me,NULL,std::mo_acq_rel);
135                         /**
136                                 @Begin
137                                 @Commit_point_define_check: success == true
138                                 @Label: Unlock_Point_Success_1
139                                 @End
140                         */
141                         if (success) {
142                                 
143                                 // got null in tail, mutex is unlocked
144                                 return;
145                         }
146
147                         // (*1) catch the race :
148                         rl::linear_backoff bo;
149                         for(;;) {
150                                 next = me->next.load(std::mo_acquire);
151                                 if ( next != NULL )
152                                         break;
153                                 thrd_yield();
154                         }
155                 }
156
157                 // (*2) - store to next must be done,
158                 //  so no locker can be viewing my node any more        
159
160                 // let next guy in :
161                 next->gate.store( 0, std::mo_release );
162                 /**
163                         @Begin
164                         @Commit_point_define_check: true
165                         @Label: Unlock_Point_Success_2
166                         @End
167                 */
168         }
169 };
170 /**
171         @Begin
172         @Class_end
173         @End
174 */