--- /dev/null
+// mcs on stack
+
+#include <stdatomic.h>
+#include <unrelacy.h>
+
+
+/**
+ Properties to check:
+ 1. At any point, only one thread can acquire the mutex; when any thread
+ nlock the mutex, he must have it in his hand.
+ 2. The sequence of the lock is guaranteed, which means there is a queue for
+ all the lock operations.
+ ####
+ 3. Should establish the happens-before relationship between unlock and lock
+*/
+
+/**
+ @Begin
+ @Global_define:
+ # The invariant is that the thread that has a guard at the head of the
+ # queue should be the one who currently has the lock.
+
+ int __lock_acquired = 0;
+ queue<guard*> __lock_waiting_queue;
+
+ @Happens-before:
+ # Without any specifying, this means the beginning of a successful Unlock()
+ # happens before the end of the next successful Lock().
+ Unlock -> Lock
+ @End
+*/
+
+struct mcs_node {
+ std::atomic<mcs_node *> next;
+ std::atomic<int> gate;
+
+ mcs_node() {
+ next.store(0);
+ gate.store(0);
+ }
+};
+
+struct mcs_mutex {
+public:
+ // tail is null when lock is not held
+ std::atomic<mcs_node *> m_tail;
+
+ mcs_mutex() {
+ m_tail.store( NULL );
+ }
+ ~mcs_mutex() {
+ ASSERT( m_tail.load() == NULL );
+ }
+
+ // Each thread will have their own guard.
+ class guard {
+ public:
+ mcs_mutex * m_t;
+ mcs_node m_node; // node held on the stack
+
+ guard(mcs_mutex * t) : m_t(t) { t->lock(this); }
+ ~guard() { m_t->unlock(this); }
+ };
+
+
+ /**
+ @Begin
+ # This function will soon enqueue the current guard to the queue to make
+ # sure it will get fair lock successfully.
+ @Interface: Lock
+ @Commit_point_set: Lock_Enqueue_Point
+ @Action:
+ @Code:
+ __lock_waiting_queue.enqueue(I);
+ @Post_action:
+ __lock_acquired++;
+ @Post_check:
+ # Make sure when it successfully locks, the lock is not acquired yet
+ # and the locking is in a FIFO order
+ __lock_acquired == 1 && I == __lock_waiting_queue.peak()
+ @End
+ */
+ void lock(guard * I) {
+ mcs_node * me = &(I->m_node);
+
+ // set up my node :
+ // not published yet so relaxed :
+ me->next.store(NULL, std::mo_relaxed );
+ me->gate.store(1, std::mo_relaxed );
+
+ // publish my node as the new tail :
+ mcs_node * pred = m_tail.exchange(me, std::mo_acq_rel);
+ /**
+ # Once the exchange completes, the thread already claimed the next
+ # available slot for the lock
+ @Begin
+ @Commit_point_check_define: true
+ @Label: Lock_Enqueue_Point
+ @End
+ */
+ if ( pred != NULL ) {
+ // (*1) race here
+ // unlock of pred can see me in the tail before I fill next
+
+ // publish me to previous lock-holder :
+ pred->next.store(me, std::mo_release );
+
+ // (*2) pred not touched any more
+
+ // now this is the spin -
+ // wait on predecessor setting my flag -
+ rl::linear_backoff bo;
+ int my_gate = 1;
+ while ( (my_gate = me->gate.load(std::mo_acquire)) ) {
+ thrd_yield();
+ }
+ }
+ }
+
+ /**
+ @Begin
+ @Interface: Unlock
+ @Commit_point_set:
+ Unlock = Unlock_Point_Success_1 | Unlock_Point_Success_2
+ @Check:
+ lock_acquired == 1 && I == lock_waiting_queue.peak()
+ @Action:
+ @Code:
+ lock_acquired--;
+ lock_waiting_queue.dequeue();
+ @End
+ */
+ void unlock(guard * I) {
+ mcs_node * me = &(I->m_node);
+
+ mcs_node * next = me->next.load(std::mo_acquire);
+ if ( next == NULL )
+ {
+ mcs_node * tail_was_me = me;
+ bool success;
+ if ( (success = m_tail.compare_exchange_strong(
+ tail_was_me,NULL,std::mo_acq_rel)) ) {
+ /**
+ @Begin
+ @Commit_point_check_define: __ATOMIC_RET__ == true
+ @Label: Unlock_Point_Success_1
+ @End
+ */
+ // got null in tail, mutex is unlocked
+ return;
+ }
+
+ // (*1) catch the race :
+ rl::linear_backoff bo;
+ for(;;) {
+ next = me->next.load(std::mo_acquire);
+ if ( next != NULL )
+ break;
+ thrd_yield();
+ }
+ }
+
+ // (*2) - store to next must be done,
+ // so no locker can be viewing my node any more
+
+ // let next guy in :
+ next->gate.store( 0, std::mo_release );
+ /**
+ @Begin
+ @Commit_point_check_define: true
+ @Label: Unlock_Point_Success_2
+ @End
+ */
+ }
+};