2 * Copyright 2013 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_ATOMICHASHARRAY_H_
18 #error "This should only be included by AtomicHashArray.h"
21 #include "folly/Bits.h"
22 #include "folly/detail/AtomicHashUtils.h"
26 // AtomicHashArray private constructor --
27 template <class KeyT, class ValueT,
28 class HashFcn, class EqualFcn, class Allocator>
29 AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
30 AtomicHashArray(size_t capacity, KeyT emptyKey, KeyT lockedKey,
31 KeyT erasedKey, double maxLoadFactor, size_t cacheSize)
32 : capacity_(capacity), maxEntries_(size_t(maxLoadFactor * capacity_ + 0.5)),
33 kEmptyKey_(emptyKey), kLockedKey_(lockedKey), kErasedKey_(erasedKey),
34 kAnchorMask_(nextPowTwo(capacity_) - 1), numEntries_(0, cacheSize),
35 numPendingEntries_(0, cacheSize), isFull_(0), numErases_(0) {
41 * Sets ret.second to value found and ret.index to index
42 * of key and returns true, or if key does not exist returns false and
43 * ret.index is set to capacity_.
45 template <class KeyT, class ValueT,
46 class HashFcn, class EqualFcn, class Allocator>
47 typename AtomicHashArray<KeyT, ValueT,
48 HashFcn, EqualFcn, Allocator>::SimpleRetT
49 AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
50 findInternal(const KeyT key_in) {
51 DCHECK_NE(key_in, kEmptyKey_);
52 DCHECK_NE(key_in, kLockedKey_);
53 DCHECK_NE(key_in, kErasedKey_);
54 for (size_t idx = keyToAnchorIdx(key_in), numProbes = 0;
56 idx = probeNext(idx, numProbes)) {
57 const KeyT key = acquireLoadKey(cells_[idx]);
58 if (LIKELY(EqualFcn()(key, key_in))) {
59 return SimpleRetT(idx, true);
61 if (UNLIKELY(key == kEmptyKey_)) {
62 // if we hit an empty element, this key does not exist
63 return SimpleRetT(capacity_, false);
66 if (UNLIKELY(numProbes >= capacity_)) {
67 // probed every cell...fail
68 return SimpleRetT(capacity_, false);
76 * Returns false on failure due to key collision or full.
77 * Also sets ret.index to the index of the key. If the map is full, sets
78 * ret.index = capacity_. Also sets ret.second to cell value, thus if insert
79 * successful this will be what we just inserted, if there is a key collision
80 * this will be the previously inserted value, and if the map is full it is
83 template <class KeyT, class ValueT,
84 class HashFcn, class EqualFcn, class Allocator>
86 typename AtomicHashArray<KeyT, ValueT,
87 HashFcn, EqualFcn, Allocator>::SimpleRetT
88 AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
89 insertInternal(KeyT key_in, T&& value) {
90 const short NO_NEW_INSERTS = 1;
91 const short NO_PENDING_INSERTS = 2;
92 CHECK_NE(key_in, kEmptyKey_);
93 CHECK_NE(key_in, kLockedKey_);
94 CHECK_NE(key_in, kErasedKey_);
96 size_t idx = keyToAnchorIdx(key_in);
99 DCHECK_LT(idx, capacity_);
100 value_type* cell = &cells_[idx];
101 if (relaxedLoadKey(*cell) == kEmptyKey_) {
102 // NOTE: isFull_ is set based on numEntries_.readFast(), so it's
103 // possible to insert more than maxEntries_ entries. However, it's not
104 // possible to insert past capacity_.
105 ++numPendingEntries_;
106 if (isFull_.load(std::memory_order_acquire)) {
107 --numPendingEntries_;
109 // Before deciding whether this insert succeeded, this thread needs to
110 // wait until no other thread can add a new entry.
112 // Correctness assumes isFull_ is true at this point. If
113 // another thread now does ++numPendingEntries_, we expect it
114 // to pass the isFull_.load() test above. (It shouldn't insert
117 isFull_.load(std::memory_order_acquire) != NO_PENDING_INSERTS
118 && numPendingEntries_.readFull() != 0
120 isFull_.store(NO_PENDING_INSERTS, std::memory_order_release);
122 if (relaxedLoadKey(*cell) == kEmptyKey_) {
123 // Don't insert past max load factor
124 return SimpleRetT(capacity_, false);
127 // An unallocated cell. Try once to lock it. If we succeed, insert here.
128 // If we fail, fall through to comparison below; maybe the insert that
129 // just beat us was for this very key....
130 if (tryLockCell(cell)) {
131 // Write the value - done before unlocking
133 DCHECK(relaxedLoadKey(*cell) == kLockedKey_);
135 * This happens using the copy constructor because we won't have
136 * constructed a lhs to use an assignment operator on when
137 * values are being set.
139 new (&cell->second) ValueT(std::forward<T>(value));
140 unlockCell(cell, key_in); // Sets the new key
142 // Transition back to empty key---requires handling
143 // locked->empty below.
144 unlockCell(cell, kEmptyKey_);
145 --numPendingEntries_;
148 // Direct comparison rather than EqualFcn ok here
149 // (we just inserted it)
150 DCHECK(relaxedLoadKey(*cell) == key_in);
151 --numPendingEntries_;
152 ++numEntries_; // This is a thread cached atomic increment :)
153 if (numEntries_.readFast() >= maxEntries_) {
154 isFull_.store(NO_NEW_INSERTS, std::memory_order_relaxed);
156 return SimpleRetT(idx, true);
158 --numPendingEntries_;
161 DCHECK(relaxedLoadKey(*cell) != kEmptyKey_);
162 if (kLockedKey_ == acquireLoadKey(*cell)) {
164 kLockedKey_ == acquireLoadKey(*cell)
168 const KeyT thisKey = acquireLoadKey(*cell);
169 if (EqualFcn()(thisKey, key_in)) {
170 // Found an existing entry for our key, but we don't overwrite the
172 return SimpleRetT(idx, false);
173 } else if (thisKey == kEmptyKey_ || thisKey == kLockedKey_) {
174 // We need to try again (i.e., don't increment numProbes or
175 // advance idx): this case can happen if the constructor for
176 // ValueT threw for this very cell (the rethrow block above).
181 if (UNLIKELY(numProbes >= capacity_)) {
182 // probed every cell...fail
183 return SimpleRetT(capacity_, false);
186 idx = probeNext(idx, numProbes);
194 * This will attempt to erase the given key key_in if the key is found. It
195 * returns 1 iff the key was located and marked as erased, and 0 otherwise.
197 * Memory is not freed or reclaimed by erase, i.e. the cell containing the
198 * erased key will never be reused. If there's an associated value, we won't
201 template <class KeyT, class ValueT,
202 class HashFcn, class EqualFcn, class Allocator>
203 size_t AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
205 CHECK_NE(key_in, kEmptyKey_);
206 CHECK_NE(key_in, kLockedKey_);
207 CHECK_NE(key_in, kErasedKey_);
208 for (size_t idx = keyToAnchorIdx(key_in), numProbes = 0;
210 idx = probeNext(idx, numProbes)) {
211 DCHECK_LT(idx, capacity_);
212 value_type* cell = &cells_[idx];
213 KeyT currentKey = acquireLoadKey(*cell);
214 if (currentKey == kEmptyKey_ || currentKey == kLockedKey_) {
215 // If we hit an empty (or locked) element, this key does not exist. This
216 // is similar to how it's handled in find().
219 if (EqualFcn()(currentKey, key_in)) {
220 // Found an existing entry for our key, attempt to mark it erased.
221 // Some other thread may have erased our key, but this is ok.
222 KeyT expect = currentKey;
223 if (cellKeyPtr(*cell)->compare_exchange_strong(expect, kErasedKey_)) {
224 numErases_.fetch_add(1, std::memory_order_relaxed);
226 // Even if there's a value in the cell, we won't delete (or even
227 // default construct) it because some other thread may be accessing it.
228 // Locking it meanwhile won't work either since another thread may be
229 // holding a pointer to it.
231 // We found the key and successfully erased it.
234 // If another thread succeeds in erasing our key, we'll stop our search.
238 if (UNLIKELY(numProbes >= capacity_)) {
239 // probed every cell...fail
245 template <class KeyT, class ValueT,
246 class HashFcn, class EqualFcn, class Allocator>
247 const typename AtomicHashArray<KeyT, ValueT,
248 HashFcn, EqualFcn, Allocator>::Config
249 AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::defaultConfig;
251 template <class KeyT, class ValueT,
252 class HashFcn, class EqualFcn, class Allocator>
253 typename AtomicHashArray<KeyT, ValueT,
254 HashFcn, EqualFcn, Allocator>::SmartPtr
255 AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
256 create(size_t maxSize, const Config& c) {
257 CHECK_LE(c.maxLoadFactor, 1.0);
258 CHECK_GT(c.maxLoadFactor, 0.0);
259 CHECK_NE(c.emptyKey, c.lockedKey);
260 size_t capacity = size_t(maxSize / c.maxLoadFactor);
261 size_t sz = sizeof(AtomicHashArray) + sizeof(value_type) * capacity;
263 auto const mem = Allocator().allocate(sz);
265 new (mem) AtomicHashArray(capacity, c.emptyKey, c.lockedKey, c.erasedKey,
266 c.maxLoadFactor, c.entryCountThreadCacheSize);
268 Allocator().deallocate(mem, sz);
272 SmartPtr map(static_cast<AtomicHashArray*>((void *)mem));
275 * Mark all cells as empty.
277 * Note: we're bending the rules a little here accessing the key
278 * element in our cells even though the cell object has not been
279 * constructed, and casting them to atomic objects (see cellKeyPtr).
280 * (Also, in fact we never actually invoke the value_type
281 * constructor.) This is in order to avoid needing to default
282 * construct a bunch of value_type when we first start up: if you
283 * have an expensive default constructor for the value type this can
284 * noticeably speed construction time for an AHA.
286 FOR_EACH_RANGE(i, 0, map->capacity_) {
287 cellKeyPtr(map->cells_[i])->store(map->kEmptyKey_,
288 std::memory_order_relaxed);
293 template <class KeyT, class ValueT,
294 class HashFcn, class EqualFcn, class Allocator>
295 void AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
296 destroy(AtomicHashArray* p) {
299 size_t sz = sizeof(AtomicHashArray) + sizeof(value_type) * p->capacity_;
301 FOR_EACH_RANGE(i, 0, p->capacity_) {
302 if (p->cells_[i].first != p->kEmptyKey_) {
303 p->cells_[i].~value_type();
306 p->~AtomicHashArray();
308 Allocator().deallocate((char *)p, sz);
311 // clear -- clears all keys and values in the map and resets all counters
312 template <class KeyT, class ValueT,
313 class HashFcn, class EqualFcn, class Allocator>
314 void AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
316 FOR_EACH_RANGE(i, 0, capacity_) {
317 if (cells_[i].first != kEmptyKey_) {
318 cells_[i].~value_type();
319 *const_cast<KeyT*>(&cells_[i].first) = kEmptyKey_;
321 CHECK(cells_[i].first == kEmptyKey_);
324 numPendingEntries_.set(0);
325 isFull_.store(0, std::memory_order_relaxed);
326 numErases_.store(0, std::memory_order_relaxed);
330 // Iterator implementation
332 template <class KeyT, class ValueT,
333 class HashFcn, class EqualFcn, class Allocator>
334 template <class ContT, class IterVal>
335 struct AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::aha_iterator
336 : boost::iterator_facade<aha_iterator<ContT,IterVal>,
338 boost::forward_traversal_tag>
340 explicit aha_iterator() : aha_(0) {}
342 // Conversion ctor for interoperability between const_iterator and
343 // iterator. The enable_if<> magic keeps us well-behaved for
344 // is_convertible<> (v. the iterator_facade documentation).
345 template<class OtherContT, class OtherVal>
346 aha_iterator(const aha_iterator<OtherContT,OtherVal>& o,
347 typename std::enable_if<
348 std::is_convertible<OtherVal*,IterVal*>::value >::type* = 0)
353 explicit aha_iterator(ContT* array, size_t offset)
360 // Returns unique index that can be used with findAt().
361 // WARNING: The following function will fail silently for hashtable
362 // with capacity > 2^32
363 uint32_t getIndex() const { return offset_; }
366 friend class AtomicHashArray;
367 friend class boost::iterator_core_access;
374 bool equal(const aha_iterator& o) const {
375 return aha_ == o.aha_ && offset_ == o.offset_;
378 IterVal& dereference() const {
379 return aha_->cells_[offset_];
382 void advancePastEmpty() {
383 while (offset_ < aha_->capacity_ && !isValid()) {
388 bool isValid() const {
389 KeyT key = acquireLoadKey(aha_->cells_[offset_]);
390 return key != aha_->kEmptyKey_ &&
391 key != aha_->kLockedKey_ &&
392 key != aha_->kErasedKey_;
402 #undef FOLLY_SPIN_WAIT