--- /dev/null
+#ifndef _MCS_LOCK_H
+#define _MCS_LOCK_H
+
+#include "backoff.h"
+#include <atomic>
+#include <cds/algo/backoff_strategy.h>
+#include <thread>
+
+namespace cds_others {
+
+size_t BackoffTraits::lower_bound = 16;
+size_t BackoffTraits::upper_bound = 1024;
+
+// Forward declaration
+struct mcs_node;
+struct mcs_mutex;
+
+struct mcs_node {
+ std::atomic<mcs_node *> next;
+ std::atomic<int> gate;
+
+ mcs_node() {
+ next.store(0);
+ gate.store(0);
+ }
+};
+
+struct mcs_mutex {
+public:
+ // tail is null when lock is not held
+ std::atomic<mcs_node *> m_tail;
+
+ mcs_mutex() { m_tail.store(nullptr); }
+ ~mcs_mutex() { assert(m_tail.load() == nullptr); }
+
+ class guard {
+ public:
+ mcs_mutex *m_t;
+ mcs_node m_node; // node held on the stack
+
+ // Call the wrapper (instrument every lock/unlock)
+ guard(mcs_mutex *t) : m_t(t) { t->lock(this); }
+ ~guard() { m_t->unlock(this); }
+ };
+
+ void lock(guard *I) {
+ mcs_node *me = &(I->m_node);
+
+ // set up my node :
+ // not published yet so relaxed :
+ me->next.store(nullptr, std::memory_order_relaxed);
+ me->gate.store(1, std::memory_order_relaxed);
+
+ // publish my node as the new tail :
+ mcs_node *pred = m_tail.exchange(me, std::memory_order_acq_rel);
+ if (pred != nullptr) {
+ // (*1) race here
+ // unlock of pred can see me in the tail before I fill next
+
+ // If this is relaxed, the store 0 to gate will be read before and
+ // that lock will never ends.
+ // publish me to previous lock-holder :
+ pred->next.store(me, std::memory_order_release);
+
+ // (*2) pred not touched any more
+
+ // now this is the spin -
+ // wait on predecessor setting my flag -
+ ExpBackoff backoff;
+ while (me->gate.load(std::memory_order_acquire)) {
+ backoff();
+ }
+ }
+ }
+
+ void unlock(guard *I) {
+ mcs_node *me = &(I->m_node);
+ mcs_node *next = me->next.load(std::memory_order_acquire);
+ if (next == nullptr) {
+ mcs_node *tail_was_me = me;
+
+ // This was mo_acq_rel, which is stronger than necessary
+ if (m_tail.compare_exchange_strong(tail_was_me, nullptr,
+ std::memory_order_release)) {
+ // got null in tail, mutex is unlocked
+ return;
+ }
+
+ // (*1) catch the race :
+ ExpBackoff backoff;
+ for (;;) {
+ next = me->next.load(std::memory_order_acquire);
+ if (next != nullptr)
+ break;
+ backoff();
+ }
+ }
+
+ // (*2) - store to next must be done,
+ // so no locker can be viewing my node any more
+
+ next->gate.store(0, std::memory_order_release);
+ }
+};
+
+} // namespace cds_others
+
+#endif