- struct Recycler : private boost::noncopyable {
- Recycler() : refs_(0), dirty_(false) { lock_.init(); }
-
- ~Recycler() {
- if (nodes_) {
- for (auto& node : *nodes_) {
- NodeType::destroy(node);
- }
- }
- }
-
- void add(NodeType* node) {
- std::lock_guard<MicroSpinLock> g(lock_);
- if (nodes_.get() == nullptr) {
- nodes_.reset(new std::vector<NodeType*>(1, node));
- } else {
- nodes_->push_back(node);
- }
- DCHECK_GT(refs(), 0);
- dirty_.store(true, std::memory_order_relaxed);
- }
-
- int refs() const {
- return refs_.load(std::memory_order_relaxed);
- }
-
- int addRef() {
- return refs_.fetch_add(1, std::memory_order_relaxed);
- }
-
- int release() {
- // We don't expect to clean the recycler immediately everytime it is OK
- // to do so. Here, it is possible that multiple accessors all release at
- // the same time but nobody would clean the recycler here. If this
- // happens, the recycler will usually still get cleaned when
- // such a race doesn't happen. The worst case is the recycler will
- // eventually get deleted along with the skiplist.
- if (LIKELY(!dirty_.load(std::memory_order_relaxed) || refs() > 1)) {
- return refs_.fetch_add(-1, std::memory_order_relaxed);
- }
-
- boost::scoped_ptr<std::vector<NodeType*> > newNodes;
- {
- std::lock_guard<MicroSpinLock> g(lock_);
- if (nodes_.get() == nullptr || refs() > 1) {
- return refs_.fetch_add(-1, std::memory_order_relaxed);
- }
- // once refs_ reaches 1 and there is no other accessor, it is safe to
- // remove all the current nodes in the recycler, as we already acquired
- // the lock here so no more new nodes can be added, even though new
- // accessors may be added after that.
- newNodes.swap(nodes_);
- dirty_.store(false, std::memory_order_relaxed);
- }
-
- // TODO(xliu) should we spawn a thread to do this when there are large
- // number of nodes in the recycler?
- for (auto& node : *newNodes) {
- NodeType::destroy(node);
- }
-
- // decrease the ref count at the very end, to minimize the
- // chance of other threads acquiring lock_ to clear the deleted
- // nodes again.
- return refs_.fetch_add(-1, std::memory_order_relaxed);
- }
-
- private:
- boost::scoped_ptr<std::vector<NodeType*>> nodes_;
- std::atomic<int32_t> refs_; // current number of visitors to the list
- std::atomic<bool> dirty_; // whether *nodes_ is non-empty
- MicroSpinLock lock_; // protects access to *nodes_
- }; // class ConcurrentSkipList::Recycler
-
- explicit ConcurrentSkipList(int height) :
- head_(NodeType::create(height, value_type(), true)), size_(0) {}
-