#error "This should only be included by hazptr.h"
#endif
+/* quality of implementation switches */
+
+// NOTE: The #ifndef pattern is prone to ODR violation. Its use for
+// quality of implementation options is temporary. Eventually these
+// options should be added to the API in future API extensions.
+
+#ifndef HAZPTR_AMB
+#define HAZPTR_AMB true
+#endif
+
+#ifndef HAZPTR_SCAN_MULT
+#define HAZPTR_SCAN_MULT 2
+#endif
+
+#ifndef HAZPTR_SCAN_THRESHOLD
+#define HAZPTR_SCAN_THRESHOLD 1000
+#endif
+
+/* stats switch */
+#ifndef HAZPTR_STATS
+#define HAZPTR_STATS false
+#endif
+
+#include <folly/experimental/AsymmetricMemoryBarrier.h>
#include <folly/experimental/hazptr/debug.h>
#include <unordered_set>
namespace folly {
namespace hazptr {
+/**
+ * helper classes and functions
+ */
+
+/** hazptr_stats */
+
+class hazptr_stats;
+hazptr_stats& hazptr_stats();
+
+/** hazptr_mb */
+
+class hazptr_mb {
+ public:
+ static void light();
+ static void heavy();
+};
+
+/**
+ * public functions
+ */
+
/** hazptr_domain */
-constexpr hazptr_domain::hazptr_domain(memory_resource* mr) noexcept
- : mr_(mr) {}
+constexpr hazptr_domain::hazptr_domain(memory_resource* mr) noexcept : mr_(mr) {
+ hazptr_stats();
+}
/** hazptr_obj_base */
"Return type of A::load() must be T*");
DEBUG_PRINT(this << " " << ptr << " " << &src);
set(ptr);
- T* p = src.load();
+ /*** Full fence ***/ hazptr_mb::light();
+ T* p = src.load(std::memory_order_acquire);
if (p != ptr) {
ptr = p;
clear();
static_assert(
std::is_same<decltype(std::declval<A>().load()), T*>::value,
"Return type of A::load() must be T*");
- T* p = src.load();
+ T* p = src.load(std::memory_order_relaxed);
while (!try_protect(p, src)) {}
DEBUG_PRINT(this << " " << p << " " << &src);
return p;
lhs.swap(rhs);
}
-////////////////////////////////////////////////////////////////////////////////
-// Non-template part of implementation
////////////////////////////////////////////////////////////////////////////////
// [TODO]:
// - Thread caching of hazptr_rec-s
// - Private storage of retired objects
// - Control of reclamation (when and by whom)
-// - Optimized memory order
/** Definition of default_hazptr_domain() */
inline hazptr_domain& default_hazptr_domain() {
inline void hazptr_rec::set(const void* p) noexcept {
DEBUG_PRINT(this << " " << p);
- hazptr_.store(p);
+ hazptr_.store(p, std::memory_order_release);
}
inline const void* hazptr_rec::get() const noexcept {
- DEBUG_PRINT(this << " " << hazptr_.load());
- return hazptr_.load();
+ auto p = hazptr_.load(std::memory_order_acquire);
+ DEBUG_PRINT(this << " " << p);
+ return p;
}
inline void hazptr_rec::clear() noexcept {
DEBUG_PRINT(this);
- hazptr_.store(nullptr);
+ hazptr_.store(nullptr, std::memory_order_release);
}
inline void hazptr_rec::release() noexcept {
DEBUG_PRINT(this);
clear();
- active_.store(false);
+ active_.store(false, std::memory_order_release);
}
/** hazptr_obj */
}
{ /* free all hazptr_rec-s */
hazptr_rec* next;
- for (auto p = hazptrs_.load(); p; p = next) {
+ for (auto p = hazptrs_.load(std::memory_order_acquire); p; p = next) {
next = p->next_;
mr_->deallocate(static_cast<void*>(p), sizeof(hazptr_rec));
}
}
}
-inline void hazptr_domain::try_reclaim() {
- DEBUG_PRINT(this);
- rcount_.exchange(0);
- bulkReclaim();
-}
-
inline hazptr_rec* hazptr_domain::hazptrAcquire() {
hazptr_rec* p;
hazptr_rec* next;
- for (p = hazptrs_.load(); p; p = next) {
+ for (p = hazptrs_.load(std::memory_order_acquire); p; p = next) {
next = p->next_;
- bool active = p->active_.load();
+ bool active = p->active_.load(std::memory_order_acquire);
if (!active) {
- if (p->active_.compare_exchange_weak(active, true)) {
+ if (p->active_.compare_exchange_weak(
+ active,
+ true,
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
DEBUG_PRINT(this << " " << p);
return p;
}
if (p == nullptr) {
return nullptr;
}
- p->active_.store(true);
+ p->active_.store(true, std::memory_order_relaxed);
+ p->next_ = hazptrs_.load(std::memory_order_acquire);
do {
- p->next_ = hazptrs_.load();
- if (hazptrs_.compare_exchange_weak(p->next_, p)) {
+ if (hazptrs_.compare_exchange_weak(
+ p->next_,
+ p,
+ std::memory_order_release,
+ std::memory_order_acquire)) {
break;
}
} while (true);
inline int
hazptr_domain::pushRetired(hazptr_obj* head, hazptr_obj* tail, int count) {
- tail->next_ = retired_.load();
- while (!retired_.compare_exchange_weak(tail->next_, head)) {}
+ /*** Full fence ***/ hazptr_mb::light();
+ tail->next_ = retired_.load(std::memory_order_acquire);
+ while (!retired_.compare_exchange_weak(
+ tail->next_,
+ head,
+ std::memory_order_release,
+ std::memory_order_acquire)) {
+ }
return rcount_.fetch_add(count);
}
inline void hazptr_domain::objRetire(hazptr_obj* p) {
auto rcount = pushRetired(p, p, 1) + 1;
- if (rcount >= kScanThreshold * hcount_.load()) {
+ if (rcount >= HAZPTR_SCAN_THRESHOLD &&
+ rcount >= HAZPTR_SCAN_MULT * hcount_.load(std::memory_order_acquire)) {
tryBulkReclaim();
}
}
inline void hazptr_domain::tryBulkReclaim() {
DEBUG_PRINT(this);
do {
- auto hcount = hcount_.load();
- auto rcount = rcount_.load();
- if (rcount < kScanThreshold * hcount) {
+ auto hcount = hcount_.load(std::memory_order_acquire);
+ auto rcount = rcount_.load(std::memory_order_acquire);
+ if (rcount < HAZPTR_SCAN_THRESHOLD || rcount < HAZPTR_SCAN_MULT * hcount) {
return;
}
- if (rcount_.compare_exchange_weak(rcount, 0)) {
+ if (rcount_.compare_exchange_weak(
+ rcount, 0, std::memory_order_release, std::memory_order_relaxed)) {
break;
}
} while (true);
inline void hazptr_domain::bulkReclaim() {
DEBUG_PRINT(this);
- auto p = retired_.exchange(nullptr);
- auto h = hazptrs_.load();
- std::unordered_set<const void*> hs;
+ /*** Full fence ***/ hazptr_mb::heavy();
+ auto p = retired_.exchange(nullptr, std::memory_order_acquire);
+ auto h = hazptrs_.load(std::memory_order_acquire);
+ std::unordered_set<const void*> hs; // TODO lock-free alternative
for (; h; h = h->next_) {
- hs.insert(h->hazptr_.load());
+ hs.insert(h->hazptr_.load(std::memory_order_relaxed));
}
int rcount = 0;
hazptr_obj* retired = nullptr;
}
}
+/** hazptr_stats */
+
+class hazptr_stats {
+ public:
+ ~hazptr_stats();
+ void light();
+ void heavy();
+ void seq_cst();
+
+ private:
+ std::atomic<uint64_t> light_{0};
+ std::atomic<uint64_t> heavy_{0};
+ std::atomic<uint64_t> seq_cst_{0};
+};
+
+inline hazptr_stats::~hazptr_stats() {
+ DEBUG_PRINT(this << " light " << light_.load());
+ DEBUG_PRINT(this << " heavy " << heavy_.load());
+ DEBUG_PRINT(this << " seq_cst " << seq_cst_.load());
+}
+
+inline void hazptr_stats::light() {
+ if (HAZPTR_STATS) {
+ /* atomic */ ++light_;
+ }
+}
+
+inline void hazptr_stats::heavy() {
+ if (HAZPTR_STATS) {
+ /* atomic */ ++heavy_;
+ }
+}
+
+inline void hazptr_stats::seq_cst() {
+ if (HAZPTR_STATS) {
+ /* atomic */ ++seq_cst_;
+ }
+}
+
+inline class hazptr_stats& hazptr_stats() {
+ static class hazptr_stats stats_;
+ DEBUG_PRINT(&stats_);
+ return stats_;
+}
+
+/** hazptr_mb */
+
+inline void hazptr_mb::light() {
+ DEBUG_PRINT("");
+ if (HAZPTR_AMB) {
+ folly::asymmetricLightBarrier();
+ hazptr_stats().light();
+ } else {
+ atomic_thread_fence(std::memory_order_seq_cst);
+ hazptr_stats().seq_cst();
+ }
+}
+
+inline void hazptr_mb::heavy() {
+ DEBUG_PRINT("");
+ if (HAZPTR_AMB) {
+ folly::asymmetricHeavyBarrier();
+ hazptr_stats().heavy();
+ } else {
+ atomic_thread_fence(std::memory_order_seq_cst);
+ hazptr_stats().seq_cst();
+ }
+}
+
} // namespace folly
} // namespace hazptr