/*
- * Copyright 2017 Facebook, Inc.
+ * Copyright 2017-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
/* override-include-guard */
#ifndef HAZPTR_H
#error "This should only be included by hazptr.h"
#define HAZPTR_PRIV true
#endif
+#ifndef HAZPTR_PRIV_THRESHOLD
+#define HAZPTR_PRIV_THRESHOLD 20
+#endif
+
#ifndef HAZPTR_ONE_DOMAIN
#define HAZPTR_ONE_DOMAIN false
#endif
#endif
#include <folly/concurrency/CacheLocality.h>
-#include <folly/experimental/AsymmetricMemoryBarrier.h>
#include <folly/experimental/hazptr/debug.h>
+#include <folly/synchronization/AsymmetricMemoryBarrier.h>
#include <mutex> // for thread caching
#include <unordered_set> // for hash set in bulk reclamation
struct hazptr_tc {
hazptr_tc_entry entry_[HAZPTR_TC_SIZE];
size_t count_;
-#ifndef NDEBUG
- bool local_;
-#endif
+ bool local_; // for debug mode only
public:
hazptr_tc_entry& operator[](size_t i);
domain.objRetire(this);
}
+/**
+ * hazptr_obj_base_refcounted
+ */
+
+template <typename T, typename D>
+inline void hazptr_obj_base_refcounted<T, D>::retire(
+ hazptr_domain& domain,
+ D deleter) {
+ DEBUG_PRINT(this << " " << &domain);
+ preRetire(deleter);
+ if (HAZPTR_PRIV &&
+ (HAZPTR_ONE_DOMAIN || (&domain == &default_hazptr_domain()))) {
+ if (hazptr_priv_try_retire(this)) {
+ return;
+ }
+ }
+ domain.objRetire(this);
+}
+
+template <typename T, typename D>
+inline void hazptr_obj_base_refcounted<T, D>::acquire_ref() {
+ DEBUG_PRINT(this);
+ auto oldval = refcount_.fetch_add(1);
+ DCHECK(oldval >= 0);
+}
+
+template <typename T, typename D>
+inline void hazptr_obj_base_refcounted<T, D>::acquire_ref_safe() {
+ DEBUG_PRINT(this);
+ auto oldval = refcount_.load(std::memory_order_acquire);
+ DCHECK(oldval >= 0);
+ refcount_.store(oldval + 1, std::memory_order_release);
+}
+
+template <typename T, typename D>
+inline bool hazptr_obj_base_refcounted<T, D>::release_ref() {
+ DEBUG_PRINT(this);
+ auto oldval = refcount_.load(std::memory_order_acquire);
+ if (oldval > 0) {
+ oldval = refcount_.fetch_sub(1);
+ } else {
+ if (kIsDebug) {
+ refcount_.store(-1);
+ }
+ }
+ DEBUG_PRINT(this << " " << oldval);
+ DCHECK(oldval >= 0);
+ return oldval == 0;
+}
+
+template <typename T, typename D>
+inline void hazptr_obj_base_refcounted<T, D>::preRetire(D deleter) {
+ DCHECK(next_ == nullptr);
+ deleter_ = std::move(deleter);
+ reclaim_ = [](hazptr_obj* p) {
+ auto hrobp = static_cast<hazptr_obj_base_refcounted*>(p);
+ if (hrobp->release_ref()) {
+ auto obj = static_cast<T*>(hrobp);
+ hrobp->deleter_(obj);
+ }
+ };
+}
+
/**
* hazptr_rec
*/
-class hazptr_rec {
+class alignas(hardware_destructive_interference_size) hazptr_rec {
friend class hazptr_domain;
friend class hazptr_holder;
friend struct hazptr_tc_entry;
- FOLLY_ALIGN_TO_AVOID_FALSE_SHARING
std::atomic<const void*> hazptr_{nullptr};
hazptr_rec* next_{nullptr};
std::atomic<bool> active_{false};
hazptr_array&& other) noexcept {
DEBUG_PRINT(this << " " << M << " " << &other);
auto h = reinterpret_cast<hazptr_holder*>(&raw_);
+ auto hother = reinterpret_cast<hazptr_holder*>(&other.raw_);
for (size_t i = 0; i < M; ++i) {
- new (&h[i]) hazptr_holder(std::move(other.h_[i]));
- DEBUG_PRINT(i << " " << &h[i] << " " << &other.h_[i]);
+ new (&h[i]) hazptr_holder(std::move(hother[i]));
+ DEBUG_PRINT(i << " " << &h[i] << " " << &hother[i]);
}
empty_ = other.empty_;
other.empty_ = true;
auto& tc = *ptc;
auto count = tc.count();
if (M <= count) {
-#ifndef NDEBUG
- DCHECK(!tc.local_);
- tc.local_ = true;
-#endif
+ if (kIsDebug) {
+ DCHECK(!tc.local_);
+ tc.local_ = true;
+ }
// Fast path
for (size_t i = 0; i < M; ++i) {
auto hprec = tc[i].hprec_;
template <size_t M>
FOLLY_ALWAYS_INLINE hazptr_local<M>::~hazptr_local() {
if (LIKELY(!need_destruct_)) {
-#ifndef NDEBUG
- auto ptc = hazptr_tc_tls();
- DCHECK(ptc != nullptr);
- auto& tc = *ptc;
- DCHECK(tc.local_);
- tc.local_ = false;
-#endif
+ if (kIsDebug) {
+ auto ptc = hazptr_tc_tls();
+ DCHECK(ptc != nullptr);
+ auto& tc = *ptc;
+ DCHECK(tc.local_);
+ tc.local_ = false;
+ }
return;
}
// Slow path
return default_domain_;
}
+template <typename T, typename D>
+FOLLY_ALWAYS_INLINE void hazptr_retire(T* obj, D reclaim) {
+ default_hazptr_domain().retire(obj, std::move(reclaim));
+}
+
/** hazptr_rec */
FOLLY_ALWAYS_INLINE void hazptr_rec::set(const void* p) noexcept {
/** hazptr_domain */
+template <typename T, typename D>
+void hazptr_domain::retire(T* obj, D reclaim) {
+ struct hazptr_retire_node : hazptr_obj {
+ std::unique_ptr<T, D> obj_;
+
+ hazptr_retire_node(T* obj, D reclaim) : obj_{obj, std::move(reclaim)} {}
+ };
+
+ auto node = new hazptr_retire_node(obj, std::move(reclaim));
+ node->reclaim_ = [](hazptr_obj* p) {
+ delete static_cast<hazptr_retire_node*>(p);
+ };
+ objRetire(node);
+}
+
inline hazptr_domain::~hazptr_domain() {
DEBUG_PRINT(this);
{ /* reclaim all remaining retired objects */
while (retired) {
for (auto p = retired; p; p = next) {
next = p->next_;
+ DCHECK(p != next);
+ DEBUG_PRINT(this << " " << p << " " << p->reclaim_);
(*(p->reclaim_))(p);
}
retired = retired_.exchange(nullptr);
hazptr_obj* next;
for (; p; p = next) {
next = p->next_;
+ DCHECK(p != next);
if (hs.count(p->getObjPtr()) == 0) {
DEBUG_PRINT(this << " " << p << " " << p->reclaim_);
(*(p->reclaim_))(p);
auto& tc = tls_tc_data_;
DEBUG_PRINT(&tc);
tc.count_ = 0;
-#ifndef NDEBUG
- tc.local_ = false;
-#endif
+ if (kIsDebug) {
+ tc.local_ = false;
+ }
}
inline void hazptr_tc_shutdown() {
head_ = obj;
}
tail_ = obj;
- ++rcount_;
- if (domain.reachedThreshold(rcount_)) {
+ if (++rcount_ >= HAZPTR_PRIV_THRESHOLD) {
pushAllToDomain();
}
}
tls_state_ = TLS_DESTROYED;
}
-} // namespace folly
+/** hazptr_obj_batch */
+/* Only for default domain. Supports only hazptr_obj_base_refcounted
+ * and a thread-safe access only, for now. */
+
+class hazptr_obj_batch {
+ static constexpr size_t DefaultThreshold = 20;
+ hazptr_obj* head_{nullptr};
+ hazptr_obj* tail_{nullptr};
+ size_t rcount_{0};
+ size_t threshold_{DefaultThreshold};
+
+ public:
+ hazptr_obj_batch() {}
+ hazptr_obj_batch(hazptr_obj* head, hazptr_obj* tail, size_t rcount)
+ : head_(head), tail_(tail), rcount_(rcount) {}
+
+ ~hazptr_obj_batch() {
+ retire_all();
+ }
+
+ /* Prepare a hazptr_obj_base_refcounted for retirement but don't
+ push it the domain yet. Return true if the batch is ready. */
+ template <typename T, typename D = std::default_delete<T>>
+ hazptr_obj_batch prep_retire_refcounted(
+ hazptr_obj_base_refcounted<T, D>* obj,
+ D deleter = {}) {
+ obj->preRetire(deleter);
+ obj->next_ = head_;
+ head_ = obj;
+ if (tail_ == nullptr) {
+ tail_ = obj;
+ }
+ if (++rcount_ < threshold_) {
+ return hazptr_obj_batch();
+ } else {
+ auto head = head_;
+ auto tail = tail_;
+ auto rcount = rcount_;
+ clear();
+ return hazptr_obj_batch(head, tail, rcount);
+ }
+ }
+
+ bool empty() {
+ return rcount_ == 0;
+ }
+
+ void retire_all() {
+ if (!empty()) {
+ auto& domain = default_hazptr_domain();
+ domain.pushRetired(head_, tail_, rcount_);
+ domain.tryBulkReclaim();
+ clear();
+ }
+ }
+
+ void set_threshold(size_t thresh) {
+ threshold_ = thresh;
+ }
+
+ private:
+ void clear() {
+ head_ = nullptr;
+ tail_ = nullptr;
+ rcount_ = 0;
+ }
+};
+
} // namespace hazptr
+} // namespace folly