2 * Copyright 2015 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * AtomicHashArray is the building block for AtomicHashMap. It provides the
19 * core lock-free functionality, but is limitted by the fact that it cannot
20 * grow past it's initialization size and is a little more awkward (no public
21 * constructor, for example). If you're confident that you won't run out of
22 * space, don't mind the awkardness, and really need bare-metal performance,
23 * feel free to use AHA directly.
25 * Check out AtomicHashMap.h for more thorough documentation on perf and
26 * general pros and cons relative to other hash maps.
28 * @author Spencer Ahrens <sahrens@fb.com>
29 * @author Jordan DeLong <delong.j@fb.com>
32 #ifndef FOLLY_ATOMICHASHARRAY_H_
33 #define FOLLY_ATOMICHASHARRAY_H_
37 #include <boost/iterator/iterator_facade.hpp>
38 #include <boost/noncopyable.hpp>
40 #include <folly/Hash.h>
41 #include <folly/ThreadCachedInt.h>
45 struct AtomicHashArrayLinearProbeFcn
47 inline size_t operator()(size_t idx, size_t numProbes, size_t capacity) const{
48 idx += 1; // linear probing
50 // Avoid modulus because it's slow
51 return LIKELY(idx < capacity) ? idx : (idx - capacity);
55 struct AtomicHashArrayQuadraticProbeFcn
57 inline size_t operator()(size_t idx, size_t numProbes, size_t capacity) const{
58 idx += numProbes; // quadratic probing
60 // Avoid modulus because it's slow
61 return LIKELY(idx < capacity) ? idx : (idx - capacity);
65 // Enables specializing checkLegalKey without specializing its class.
67 // Local copy of folly::gen::Identity, to avoid heavy dependencies.
71 auto operator()(Value&& value) const ->
72 decltype(std::forward<Value>(value)) {
73 return std::forward<Value>(value);
77 template <typename NotKeyT, typename KeyT>
78 inline void checkLegalKeyIfKeyTImpl(NotKeyT ignored, KeyT emptyKey,
79 KeyT lockedKey, KeyT erasedKey) {
82 template <typename KeyT>
83 inline void checkLegalKeyIfKeyTImpl(KeyT key_in, KeyT emptyKey,
84 KeyT lockedKey, KeyT erasedKey) {
85 DCHECK_NE(key_in, emptyKey);
86 DCHECK_NE(key_in, lockedKey);
87 DCHECK_NE(key_in, erasedKey);
91 template <class KeyT, class ValueT,
92 class HashFcn = std::hash<KeyT>,
93 class EqualFcn = std::equal_to<KeyT>,
94 class Allocator = std::allocator<char>,
95 class ProbeFcn = AtomicHashArrayLinearProbeFcn,
96 class KeyConvertFcn = detail::AHAIdentity>
99 template <class KeyT, class ValueT,
100 class HashFcn = std::hash<KeyT>,
101 class EqualFcn = std::equal_to<KeyT>,
102 class Allocator = std::allocator<char>,
103 class ProbeFcn = AtomicHashArrayLinearProbeFcn,
104 class KeyConvertFcn = detail::AHAIdentity>
105 class AtomicHashArray : boost::noncopyable {
106 static_assert((std::is_convertible<KeyT,int32_t>::value ||
107 std::is_convertible<KeyT,int64_t>::value ||
108 std::is_convertible<KeyT,const void*>::value),
109 "You are trying to use AtomicHashArray with disallowed key "
110 "types. You must use atomically compare-and-swappable integer "
111 "keys, or a different container class.");
113 typedef KeyT key_type;
114 typedef ValueT mapped_type;
115 typedef HashFcn hasher;
116 typedef EqualFcn key_equal;
117 typedef KeyConvertFcn key_convert;
118 typedef std::pair<const KeyT, ValueT> value_type;
119 typedef std::size_t size_type;
120 typedef std::ptrdiff_t difference_type;
121 typedef value_type& reference;
122 typedef const value_type& const_reference;
123 typedef value_type* pointer;
124 typedef const value_type* const_pointer;
126 const size_t capacity_;
127 const size_t maxEntries_;
128 const KeyT kEmptyKey_;
129 const KeyT kLockedKey_;
130 const KeyT kErasedKey_;
132 template<class ContT, class IterVal>
135 typedef aha_iterator<const AtomicHashArray,const value_type> const_iterator;
136 typedef aha_iterator<AtomicHashArray,value_type> iterator;
138 // You really shouldn't need this if you use the SmartPtr provided by create,
139 // but if you really want to do something crazy like stick the released
140 // pointer into a DescriminatedPtr or something, you'll need this to clean up
142 static void destroy(AtomicHashArray*);
145 const size_t kAnchorMask_;
148 void operator()(AtomicHashArray* ptr) {
149 AtomicHashArray::destroy(ptr);
154 typedef std::unique_ptr<AtomicHashArray, Deleter> SmartPtr;
159 * Creates AtomicHashArray objects. Use instead of constructor/destructor.
161 * We do things this way in order to avoid the perf penalty of a second
162 * pointer indirection when composing these into AtomicHashMap, which needs
163 * to store an array of pointers so that it can perform atomic operations on
166 * Instead of a mess of arguments, we take a max size and a Config struct to
167 * simulate named ctor parameters. The Config struct has sensible defaults
168 * for everything, but is overloaded - if you specify a positive capacity,
169 * that will be used directly instead of computing it based on
172 * Create returns an AHA::SmartPtr which is a unique_ptr with a custom
173 * deleter to make sure everything is cleaned up properly.
179 double maxLoadFactor;
181 int entryCountThreadCacheSize;
182 size_t capacity; // if positive, overrides maxLoadFactor
185 // Cannot have constexpr ctor because some compilers rightly complain.
186 Config() : emptyKey((KeyT)-1),
191 entryCountThreadCacheSize(1000),
195 // Cannot have pre-instantiated const Config instance because of SIOF.
196 static SmartPtr create(size_t maxSize, const Config& c = Config());
202 * Returns the iterator to the element if found, otherwise end().
204 * As an optional feature, the type of the key to look up (LookupKeyT) is
205 * allowed to be different from the type of keys actually stored (KeyT).
207 * This enables use cases where materializing the key is costly and usually
208 * redudant, e.g., canonicalizing/interning a set of strings and being able
209 * to look up by StringPiece. To use this feature, LookupHashFcn must take
210 * a LookupKeyT, and LookupEqualFcn must take KeyT and LookupKeyT as first
211 * and second parameter, respectively.
213 * See folly/test/ArrayHashArrayTest.cpp for sample usage.
215 template <typename LookupKeyT = key_type,
216 typename LookupHashFcn = hasher,
217 typename LookupEqualFcn = key_equal>
218 iterator find(LookupKeyT k) {
219 return iterator(this,
220 findInternal<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k).idx);
223 template <typename LookupKeyT = key_type,
224 typename LookupHashFcn = hasher,
225 typename LookupEqualFcn = key_equal>
226 const_iterator find(LookupKeyT k) const {
227 return const_cast<AtomicHashArray*>(this)->
228 find<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k);
234 * Returns a pair with iterator to the element at r.first and bool success.
235 * Retrieve the index with ret.first.getIndex().
237 * Fails on key collision (does not overwrite) or if map becomes
238 * full, at which point no element is inserted, iterator is set to end(),
239 * and success is set false. On collisions, success is set false, but the
240 * iterator is set to the existing entry.
242 std::pair<iterator,bool> insert(const value_type& r) {
243 return emplace(r.first, r.second);
245 std::pair<iterator,bool> insert(value_type&& r) {
246 return emplace(r.first, std::move(r.second));
252 * Same contract as insert(), but performs in-place construction
253 * of the value type using the specified arguments.
255 * Also, like find(), this method optionally allows 'key_in' to have a type
256 * different from that stored in the table; see find(). If and only if no
257 * equal key is already present, this method converts 'key_in' to a key of
258 * type KeyT using the provided LookupKeyToKeyFcn.
260 template <typename LookupKeyT = key_type,
261 typename LookupHashFcn = hasher,
262 typename LookupEqualFcn = key_equal,
263 typename LookupKeyToKeyFcn = key_convert,
265 std::pair<iterator,bool> emplace(LookupKeyT key_in, ArgTs&&... vCtorArgs) {
266 SimpleRetT ret = insertInternal<LookupKeyT,
271 std::forward<ArgTs>(vCtorArgs)...);
272 return std::make_pair(iterator(this, ret.idx), ret.success);
275 // returns the number of elements erased - should never exceed 1
276 size_t erase(KeyT k);
278 // clears all keys and values in the map and resets all counters. Not thread
282 // Exact number of elements in the map - note that readFull() acquires a
283 // mutex. See folly/ThreadCachedInt.h for more details.
284 size_t size() const {
285 return numEntries_.readFull() -
286 numErases_.load(std::memory_order_relaxed);
289 bool empty() const { return size() == 0; }
292 iterator it(this, 0);
293 it.advancePastEmpty();
296 const_iterator begin() const {
297 const_iterator it(this, 0);
298 it.advancePastEmpty();
302 iterator end() { return iterator(this, capacity_); }
303 const_iterator end() const { return const_iterator(this, capacity_); }
305 // See AtomicHashMap::findAt - access elements directly
306 // WARNING: The following 2 functions will fail silently for hashtable
307 // with capacity > 2^32
308 iterator findAt(uint32_t idx) {
309 DCHECK_LT(idx, capacity_);
310 return iterator(this, idx);
312 const_iterator findAt(uint32_t idx) const {
313 return const_cast<AtomicHashArray*>(this)->findAt(idx);
316 iterator makeIter(size_t idx) { return iterator(this, idx); }
317 const_iterator makeIter(size_t idx) const {
318 return const_iterator(this, idx);
321 // The max load factor allowed for this map
322 double maxLoadFactor() const { return ((double) maxEntries_) / capacity_; }
324 void setEntryCountThreadCacheSize(uint32_t newSize) {
325 numEntries_.setCacheSize(newSize);
326 numPendingEntries_.setCacheSize(newSize);
329 int getEntryCountThreadCacheSize() const {
330 return numEntries_.getCacheSize();
333 /* Private data and helper functions... */
336 friend class AtomicHashMap<KeyT,
343 struct SimpleRetT { size_t idx; bool success;
344 SimpleRetT(size_t i, bool s) : idx(i), success(s) {}
345 SimpleRetT() = default;
350 template <typename LookupKeyT = key_type,
351 typename LookupHashFcn = hasher,
352 typename LookupEqualFcn = key_equal,
353 typename LookupKeyToKeyFcn = detail::AHAIdentity,
355 SimpleRetT insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs);
357 template <typename LookupKeyT = key_type,
358 typename LookupHashFcn = hasher,
359 typename LookupEqualFcn = key_equal>
360 SimpleRetT findInternal(const LookupKeyT key);
362 template <typename MaybeKeyT>
363 void checkLegalKeyIfKey(MaybeKeyT key) {
364 detail::checkLegalKeyIfKeyTImpl(key, kEmptyKey_, kLockedKey_, kErasedKey_);
367 static std::atomic<KeyT>* cellKeyPtr(const value_type& r) {
368 // We need some illegal casting here in order to actually store
369 // our value_type as a std::pair<const,>. But a little bit of
370 // undefined behavior never hurt anyone ...
371 static_assert(sizeof(std::atomic<KeyT>) == sizeof(KeyT),
372 "std::atomic is implemented in an unexpected way for AHM");
374 const_cast<std::atomic<KeyT>*>(
375 reinterpret_cast<std::atomic<KeyT> const*>(&r.first));
378 static KeyT relaxedLoadKey(const value_type& r) {
379 return cellKeyPtr(r)->load(std::memory_order_relaxed);
382 static KeyT acquireLoadKey(const value_type& r) {
383 return cellKeyPtr(r)->load(std::memory_order_acquire);
386 // Fun with thread local storage - atomic increment is expensive
387 // (relatively), so we accumulate in the thread cache and periodically
388 // flush to the actual variable, and walk through the unflushed counts when
389 // reading the value, so be careful of calling size() too frequently. This
390 // increases insertion throughput several times over while keeping the count
392 ThreadCachedInt<uint64_t> numEntries_; // Successful key inserts
393 ThreadCachedInt<uint64_t> numPendingEntries_; // Used by insertInternal
394 std::atomic<int64_t> isFull_; // Used by insertInternal
395 std::atomic<int64_t> numErases_; // Successful key erases
397 value_type cells_[0]; // This must be the last field of this class
399 // Force constructor/destructor private since create/destroy should be
400 // used externally instead
401 AtomicHashArray(size_t capacity, KeyT emptyKey, KeyT lockedKey,
402 KeyT erasedKey, double maxLoadFactor, size_t cacheSize);
404 ~AtomicHashArray() = default;
406 inline void unlockCell(value_type* const cell, KeyT newKey) {
407 cellKeyPtr(*cell)->store(newKey, std::memory_order_release);
410 inline bool tryLockCell(value_type* const cell) {
411 KeyT expect = kEmptyKey_;
412 return cellKeyPtr(*cell)->compare_exchange_strong(expect, kLockedKey_,
413 std::memory_order_acq_rel);
416 template <class LookupKeyT = key_type, class LookupHashFcn = hasher>
417 inline size_t keyToAnchorIdx(const LookupKeyT k) const {
418 const size_t hashVal = LookupHashFcn()(k);
419 const size_t probe = hashVal & kAnchorMask_;
420 return LIKELY(probe < capacity_) ? probe : hashVal % capacity_;
424 }; // AtomicHashArray
428 #include <folly/AtomicHashArray-inl.h>
430 #endif // FOLLY_ATOMICHASHARRAY_H_