/*
- * Copyright 2014 Facebook, Inc.
+ * Copyright 2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
// @author: Xin Liu <xliux@fb.com>
-#ifndef FOLLY_CONCURRENTSKIPLIST_INL_H_
-#define FOLLY_CONCURRENTSKIPLIST_INL_H_
+#pragma once
#include <algorithm>
#include <atomic>
#include <boost/type_traits.hpp>
#include <glog/logging.h>
-#include "folly/Memory.h"
-#include "folly/SmallLocks.h"
-#include "folly/ThreadLocal.h"
+#include <folly/Memory.h>
+#include <folly/MicroSpinLock.h>
+#include <folly/ThreadLocal.h>
namespace folly { namespace detail {
template<typename NodeAlloc>
static constexpr bool destroyIsNoOp() {
return IsArenaAllocator<NodeAlloc>::value &&
- boost::has_trivial_destructor<std::atomic<SkipListNode*>>::value;
+ boost::has_trivial_destructor<SkipListNode>::value;
}
// copy the head node to a new head node assuming lock acquired
!NodeType::template destroyIsNoOp<NodeAlloc>()>::type> {
public:
explicit NodeRecycler(const NodeAlloc& alloc)
- : alloc_(alloc), refs_(0), dirty_(false) { lock_.init(); }
+ : refs_(0), dirty_(false), alloc_(alloc) { lock_.init(); }
+
+ explicit NodeRecycler() : refs_(0), dirty_(false) { lock_.init(); }
~NodeRecycler() {
CHECK_EQ(refs(), 0);
return refs_.load(std::memory_order_relaxed);
}
- NodeAlloc alloc_;
std::unique_ptr<std::vector<NodeType*>> nodes_;
std::atomic<int32_t> refs_; // current number of visitors to the list
std::atomic<bool> dirty_; // whether *nodes_ is non-empty
MicroSpinLock lock_; // protects access to *nodes_
+ NodeAlloc alloc_;
};
// In case of arena allocator, no recycling is necessary, and it's possible
void addRef() { }
void releaseRef() { }
- void add(NodeType* node) { }
+ void add(NodeType* /* node */) {}
NodeAlloc& alloc() { return alloc_; }
};
}} // namespaces
-
-#endif // FOLLY_CONCURRENTSKIPLIST_INL_H_