--- /dev/null
+#ifndef _BARRIER_H
+#define _BARRIER_H
+
+#include "backoff.h"
+#include <atomic>
+
+namespace cds_others {
+
+class SpinBarrier {
+public:
+ SpinBarrier(unsigned int n) : n_(n) {
+ nwait_ = 0;
+ step_ = 0;
+ }
+
+ // The purpose of wait() is that threads that enter it synchronize with
+ // threads when they get out of it.
+ /** wildcard(2) is acq_rel, ensuring that all threads hb before other
+ * threads in the rmw chain order, then the wildcard (4) and (5) are
+ * release/acquire to make sure the last thread synchronize with all other
+ * earlier threads. Plus, the (4) and (5) synchronization can make sure the
+ * reset of nwait_ in wildcard(3) happens-before any other threads in the
+ * later usage of the barrier.
+ */
+
+ bool wait() {
+ unsigned int step = step_.load(std::memory_order_relaxed);
+
+ if (nwait_.fetch_add(1, std::memory_order_acq_rel) == n_ - 1) {
+ /* OK, last thread to come. */
+ nwait_.store(0, std::memory_order_relaxed);
+ step_.fetch_add(1, std::memory_order_release);
+ return true;
+ } else {
+ ExpBackoff backoff;
+ /* Run in circles and scream like a little girl. */
+ while (step_.load(std::memory_order_acquire) == step) {
+ backoff();
+ }
+ return false;
+ }
+ }
+
+protected:
+ /* Number of synchronized threads. */
+ const unsigned int n_;
+
+ /* Number of threads currently spinning. */
+ std::atomic<unsigned int> nwait_;
+
+ /* Number of barrier syncronizations completed so far,
+ * * it's OK to wrap. */
+ std::atomic<unsigned int> step_;
+};
+
+} // namespace cds_others
+
+#endif