X-Git-Url: http://plrg.eecs.uci.edu/git/?p=folly.git;a=blobdiff_plain;f=folly%2FConcurrentSkipList-inl.h;h=e4144bcbfb57dfaa035ace57a2f163ac62f4dbcf;hp=78be7243186dc7d0147918c545a26c46178047ea;hb=9276f2a5646a94eda765c92b171b98c499313213;hpb=27494a20393fa45072e7d526d358835f3abe312a diff --git a/folly/ConcurrentSkipList-inl.h b/folly/ConcurrentSkipList-inl.h index 78be7243..e4144bcb 100644 --- a/folly/ConcurrentSkipList-inl.h +++ b/folly/ConcurrentSkipList-inl.h @@ -1,5 +1,5 @@ /* - * Copyright 2012 Facebook, Inc. + * Copyright 2016 Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,24 +16,31 @@ // @author: Xin Liu -#ifndef FOLLY_CONCURRENTSKIPLIST_INL_H_ -#define FOLLY_CONCURRENTSKIPLIST_INL_H_ +#pragma once #include +#include #include #include +#include +#include +#include +#include +#include #include - +#include #include -#include "folly/SmallLocks.h" -#include "folly/ThreadLocal.h" + +#include +#include +#include namespace folly { namespace detail { template class csl_iterator; template -class SkipListNode : boost::noncopyable { +class SkipListNode : private boost::noncopyable { enum { IS_HEAD_NODE = 1, MARKED_FOR_REMOVAL = (1 << 1), @@ -42,40 +49,36 @@ class SkipListNode : boost::noncopyable { public: typedef T value_type; - static SkipListNode* create(int height, - const value_type& data, bool isHead = false) { + template::value>::type> + static SkipListNode* create( + NodeAlloc& alloc, int height, U&& data, bool isHead = false) { DCHECK(height >= 1 && height < 64) << height; - size_t size = sizeof(SkipListNode) + height * sizeof(SkipListNode*); - auto* node = static_cast(malloc(size)); - new (node) SkipListNode(height); - - node->spinLock_.init(); - node->setFlags(0); - - if (isHead) { - node->setIsHeadNode(); - } else { - new (&(node->data_)) value_type(data); - } + size_t size = sizeof(SkipListNode) + + height * sizeof(std::atomic); + auto* node = static_cast(alloc.allocate(size)); + // do placement new + new (node) SkipListNode(height, std::forward(data), isHead); return node; } - static void destroy(SkipListNode* node) { - if (!node->isHeadNode()) { - node->data_.~value_type(); - } + template + static void destroy(NodeAlloc& alloc, SkipListNode* node) { node->~SkipListNode(); - free(node); + alloc.deallocate(node); } - // assuming lock acquired - SkipListNode* promoteFrom(const SkipListNode* node) { + template + static constexpr bool destroyIsNoOp() { + return IsArenaAllocator::value && + boost::has_trivial_destructor::value; + } + + // copy the head node to a new head node assuming lock acquired + SkipListNode* copyHead(SkipListNode* node) { DCHECK(node != nullptr && height_ > node->height_); setFlags(node->getFlags()); - if (!isHeadNode()) { - new (&(data_)) value_type(node->data()); - } for (int i = 0; i < node->height_; ++i) { setSkip(i, node->skip(i)); } @@ -125,14 +128,22 @@ class SkipListNode : boost::noncopyable { } private: - ~SkipListNode() { + // Note! this can only be called from create() as a placement new. + template + SkipListNode(uint8_t height, U&& data, bool isHead) : + height_(height), data_(std::forward(data)) { + spinLock_.init(); + setFlags(0); + if (isHead) setIsHeadNode(); + // need to explicitly init the dynamic atomic pointer array for (uint8_t i = 0; i < height_; ++i) { - skip_[i].~atomic(); + new (&skip_[i]) std::atomic(nullptr); } } - explicit SkipListNode(uint8_t height) : height_(height) { + + ~SkipListNode() { for (uint8_t i = 0; i < height_; ++i) { - new (&skip_[i]) std::atomic(nullptr); + skip_[i].~atomic(); } } @@ -182,7 +193,6 @@ class SkipListRandomHeight { } private: - SkipListRandomHeight() { initLookupTable(); } void initLookupTable() { @@ -215,6 +225,110 @@ class SkipListRandomHeight { size_t sizeLimitTable_[kMaxHeight]; }; -}} +template +class NodeRecycler; + +template +class NodeRecycler()>::type> { + public: + explicit NodeRecycler(const NodeAlloc& alloc) + : refs_(0), dirty_(false), alloc_(alloc) { lock_.init(); } + + explicit NodeRecycler() : refs_(0), dirty_(false) { lock_.init(); } + + ~NodeRecycler() { + CHECK_EQ(refs(), 0); + if (nodes_) { + for (auto& node : *nodes_) { + NodeType::destroy(alloc_, node); + } + } + } + + void add(NodeType* node) { + std::lock_guard g(lock_); + if (nodes_.get() == nullptr) { + nodes_.reset(new std::vector(1, node)); + } else { + nodes_->push_back(node); + } + DCHECK_GT(refs(), 0); + dirty_.store(true, std::memory_order_relaxed); + } + + int addRef() { + return refs_.fetch_add(1, std::memory_order_relaxed); + } + + int releaseRef() { + // We don't expect to clean the recycler immediately everytime it is OK + // to do so. Here, it is possible that multiple accessors all release at + // the same time but nobody would clean the recycler here. If this + // happens, the recycler will usually still get cleaned when + // such a race doesn't happen. The worst case is the recycler will + // eventually get deleted along with the skiplist. + if (LIKELY(!dirty_.load(std::memory_order_relaxed) || refs() > 1)) { + return refs_.fetch_add(-1, std::memory_order_relaxed); + } + + std::unique_ptr> newNodes; + { + std::lock_guard g(lock_); + if (nodes_.get() == nullptr || refs() > 1) { + return refs_.fetch_add(-1, std::memory_order_relaxed); + } + // once refs_ reaches 1 and there is no other accessor, it is safe to + // remove all the current nodes in the recycler, as we already acquired + // the lock here so no more new nodes can be added, even though new + // accessors may be added after that. + newNodes.swap(nodes_); + dirty_.store(false, std::memory_order_relaxed); + } + + // TODO(xliu) should we spawn a thread to do this when there are large + // number of nodes in the recycler? + for (auto& node : *newNodes) { + NodeType::destroy(alloc_, node); + } + + // decrease the ref count at the very end, to minimize the + // chance of other threads acquiring lock_ to clear the deleted + // nodes again. + return refs_.fetch_add(-1, std::memory_order_relaxed); + } + + NodeAlloc& alloc() { return alloc_; } + + private: + int refs() const { + return refs_.load(std::memory_order_relaxed); + } + + std::unique_ptr> nodes_; + std::atomic refs_; // current number of visitors to the list + std::atomic dirty_; // whether *nodes_ is non-empty + MicroSpinLock lock_; // protects access to *nodes_ + NodeAlloc alloc_; +}; + +// In case of arena allocator, no recycling is necessary, and it's possible +// to save on ConcurrentSkipList size. +template +class NodeRecycler()>::type> { + public: + explicit NodeRecycler(const NodeAlloc& alloc) : alloc_(alloc) { } + + void addRef() { } + void releaseRef() { } + + void add(NodeType* /* node */) {} + + NodeAlloc& alloc() { return alloc_; } + + private: + NodeAlloc alloc_; +}; -#endif // FOLLY_CONCURRENTSKIPLIST_INL_H_ +}} // namespaces