2 * Copyright 2013 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_ATOMICHASHARRAY_H_
18 #error "This should only be included by AtomicHashArray.h"
21 #include "folly/Bits.h"
22 #include "folly/detail/AtomicHashUtils.h"
26 // AtomicHashArray private constructor --
27 template <class KeyT, class ValueT, class HashFcn, class EqualFcn>
28 AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::
29 AtomicHashArray(size_t capacity, KeyT emptyKey, KeyT lockedKey,
30 KeyT erasedKey, double maxLoadFactor, size_t cacheSize)
31 : capacity_(capacity), maxEntries_(size_t(maxLoadFactor * capacity_ + 0.5)),
32 kEmptyKey_(emptyKey), kLockedKey_(lockedKey), kErasedKey_(erasedKey),
33 kAnchorMask_(nextPowTwo(capacity_) - 1), numEntries_(0, cacheSize),
34 numPendingEntries_(0, cacheSize), isFull_(0), numErases_(0) {
40 * Sets ret.second to value found and ret.index to index
41 * of key and returns true, or if key does not exist returns false and
42 * ret.index is set to capacity_.
44 template <class KeyT, class ValueT, class HashFcn, class EqualFcn>
45 typename AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::SimpleRetT
46 AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::
47 findInternal(const KeyT key_in) {
48 DCHECK_NE(key_in, kEmptyKey_);
49 DCHECK_NE(key_in, kLockedKey_);
50 DCHECK_NE(key_in, kErasedKey_);
51 for (size_t idx = keyToAnchorIdx(key_in), numProbes = 0;
53 idx = probeNext(idx, numProbes)) {
54 const KeyT key = acquireLoadKey(cells_[idx]);
55 if (LIKELY(EqualFcn()(key, key_in))) {
56 return SimpleRetT(idx, true);
58 if (UNLIKELY(key == kEmptyKey_)) {
59 // if we hit an empty element, this key does not exist
60 return SimpleRetT(capacity_, false);
63 if (UNLIKELY(numProbes >= capacity_)) {
64 // probed every cell...fail
65 return SimpleRetT(capacity_, false);
73 * Returns false on failure due to key collision or full.
74 * Also sets ret.index to the index of the key. If the map is full, sets
75 * ret.index = capacity_. Also sets ret.second to cell value, thus if insert
76 * successful this will be what we just inserted, if there is a key collision
77 * this will be the previously inserted value, and if the map is full it is
80 template <class KeyT, class ValueT, class HashFcn, class EqualFcn>
82 typename AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::SimpleRetT
83 AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::
84 insertInternal(KeyT key_in, T&& value) {
85 const short NO_NEW_INSERTS = 1;
86 const short NO_PENDING_INSERTS = 2;
87 CHECK_NE(key_in, kEmptyKey_);
88 CHECK_NE(key_in, kLockedKey_);
89 CHECK_NE(key_in, kErasedKey_);
91 size_t idx = keyToAnchorIdx(key_in);
94 DCHECK_LT(idx, capacity_);
95 value_type* cell = &cells_[idx];
96 if (relaxedLoadKey(*cell) == kEmptyKey_) {
97 // NOTE: isFull_ is set based on numEntries_.readFast(), so it's
98 // possible to insert more than maxEntries_ entries. However, it's not
99 // possible to insert past capacity_.
100 ++numPendingEntries_;
101 if (isFull_.load(std::memory_order_acquire)) {
102 --numPendingEntries_;
104 // Before deciding whether this insert succeeded, this thread needs to
105 // wait until no other thread can add a new entry.
107 // Correctness assumes isFull_ is true at this point. If
108 // another thread now does ++numPendingEntries_, we expect it
109 // to pass the isFull_.load() test above. (It shouldn't insert
112 isFull_.load(std::memory_order_acquire) != NO_PENDING_INSERTS
113 && numPendingEntries_.readFull() != 0
115 isFull_.store(NO_PENDING_INSERTS, std::memory_order_release);
117 if (relaxedLoadKey(*cell) == kEmptyKey_) {
118 // Don't insert past max load factor
119 return SimpleRetT(capacity_, false);
122 // An unallocated cell. Try once to lock it. If we succeed, insert here.
123 // If we fail, fall through to comparison below; maybe the insert that
124 // just beat us was for this very key....
125 if (tryLockCell(cell)) {
126 // Write the value - done before unlocking
128 DCHECK(relaxedLoadKey(*cell) == kLockedKey_);
130 * This happens using the copy constructor because we won't have
131 * constructed a lhs to use an assignment operator on when
132 * values are being set.
134 new (&cell->second) ValueT(std::forward<T>(value));
135 unlockCell(cell, key_in); // Sets the new key
137 // Transition back to empty key---requires handling
138 // locked->empty below.
139 unlockCell(cell, kEmptyKey_);
140 --numPendingEntries_;
143 // Direct comparison rather than EqualFcn ok here
144 // (we just inserted it)
145 DCHECK(relaxedLoadKey(*cell) == key_in);
146 --numPendingEntries_;
147 ++numEntries_; // This is a thread cached atomic increment :)
148 if (numEntries_.readFast() >= maxEntries_) {
149 isFull_.store(NO_NEW_INSERTS, std::memory_order_relaxed);
151 return SimpleRetT(idx, true);
153 --numPendingEntries_;
156 DCHECK(relaxedLoadKey(*cell) != kEmptyKey_);
157 if (kLockedKey_ == acquireLoadKey(*cell)) {
159 kLockedKey_ == acquireLoadKey(*cell)
163 const KeyT thisKey = acquireLoadKey(*cell);
164 if (EqualFcn()(thisKey, key_in)) {
165 // Found an existing entry for our key, but we don't overwrite the
167 return SimpleRetT(idx, false);
168 } else if (thisKey == kEmptyKey_ || thisKey == kLockedKey_) {
169 // We need to try again (i.e., don't increment numProbes or
170 // advance idx): this case can happen if the constructor for
171 // ValueT threw for this very cell (the rethrow block above).
176 if (UNLIKELY(numProbes >= capacity_)) {
177 // probed every cell...fail
178 return SimpleRetT(capacity_, false);
181 idx = probeNext(idx, numProbes);
189 * This will attempt to erase the given key key_in if the key is found. It
190 * returns 1 iff the key was located and marked as erased, and 0 otherwise.
192 * Memory is not freed or reclaimed by erase, i.e. the cell containing the
193 * erased key will never be reused. If there's an associated value, we won't
196 template <class KeyT, class ValueT, class HashFcn, class EqualFcn>
197 size_t AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::
199 CHECK_NE(key_in, kEmptyKey_);
200 CHECK_NE(key_in, kLockedKey_);
201 CHECK_NE(key_in, kErasedKey_);
202 for (size_t idx = keyToAnchorIdx(key_in), numProbes = 0;
204 idx = probeNext(idx, numProbes)) {
205 DCHECK_LT(idx, capacity_);
206 value_type* cell = &cells_[idx];
207 KeyT currentKey = acquireLoadKey(*cell);
208 if (currentKey == kEmptyKey_ || currentKey == kLockedKey_) {
209 // If we hit an empty (or locked) element, this key does not exist. This
210 // is similar to how it's handled in find().
213 if (EqualFcn()(currentKey, key_in)) {
214 // Found an existing entry for our key, attempt to mark it erased.
215 // Some other thread may have erased our key, but this is ok.
216 KeyT expect = currentKey;
217 if (cellKeyPtr(*cell)->compare_exchange_strong(expect, kErasedKey_)) {
218 numErases_.fetch_add(1, std::memory_order_relaxed);
220 // Even if there's a value in the cell, we won't delete (or even
221 // default construct) it because some other thread may be accessing it.
222 // Locking it meanwhile won't work either since another thread may be
223 // holding a pointer to it.
225 // We found the key and successfully erased it.
228 // If another thread succeeds in erasing our key, we'll stop our search.
232 if (UNLIKELY(numProbes >= capacity_)) {
233 // probed every cell...fail
239 template <class KeyT, class ValueT, class HashFcn, class EqualFcn>
240 const typename AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::Config
241 AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::defaultConfig;
243 template <class KeyT, class ValueT, class HashFcn, class EqualFcn>
244 typename AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::SmartPtr
245 AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::
246 create(size_t maxSize, const Config& c) {
247 CHECK_LE(c.maxLoadFactor, 1.0);
248 CHECK_GT(c.maxLoadFactor, 0.0);
249 CHECK_NE(c.emptyKey, c.lockedKey);
250 size_t capacity = size_t(maxSize / c.maxLoadFactor);
251 size_t sz = sizeof(AtomicHashArray) + sizeof(value_type) * capacity;
253 std::unique_ptr<void, void(*)(void*)> mem(malloc(sz), free);
254 new(mem.get()) AtomicHashArray(capacity, c.emptyKey, c.lockedKey, c.erasedKey,
255 c.maxLoadFactor, c.entryCountThreadCacheSize);
256 SmartPtr map(static_cast<AtomicHashArray*>(mem.release()));
259 * Mark all cells as empty.
261 * Note: we're bending the rules a little here accessing the key
262 * element in our cells even though the cell object has not been
263 * constructed, and casting them to atomic objects (see cellKeyPtr).
264 * (Also, in fact we never actually invoke the value_type
265 * constructor.) This is in order to avoid needing to default
266 * construct a bunch of value_type when we first start up: if you
267 * have an expensive default constructor for the value type this can
268 * noticeably speed construction time for an AHA.
270 FOR_EACH_RANGE(i, 0, map->capacity_) {
271 cellKeyPtr(map->cells_[i])->store(map->kEmptyKey_,
272 std::memory_order_relaxed);
277 template <class KeyT, class ValueT, class HashFcn, class EqualFcn>
278 void AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::
279 destroy(AtomicHashArray* p) {
281 FOR_EACH_RANGE(i, 0, p->capacity_) {
282 if (p->cells_[i].first != p->kEmptyKey_) {
283 p->cells_[i].~value_type();
286 p->~AtomicHashArray();
290 // clear -- clears all keys and values in the map and resets all counters
291 template <class KeyT, class ValueT, class HashFcn, class EqualFcn>
292 void AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::
294 FOR_EACH_RANGE(i, 0, capacity_) {
295 if (cells_[i].first != kEmptyKey_) {
296 cells_[i].~value_type();
297 *const_cast<KeyT*>(&cells_[i].first) = kEmptyKey_;
299 CHECK(cells_[i].first == kEmptyKey_);
302 numPendingEntries_.set(0);
303 isFull_.store(0, std::memory_order_relaxed);
304 numErases_.store(0, std::memory_order_relaxed);
308 // Iterator implementation
310 template <class KeyT, class ValueT, class HashFcn, class EqualFcn>
311 template <class ContT, class IterVal>
312 struct AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn>::aha_iterator
313 : boost::iterator_facade<aha_iterator<ContT,IterVal>,
315 boost::forward_traversal_tag>
317 explicit aha_iterator() : aha_(0) {}
319 // Conversion ctor for interoperability between const_iterator and
320 // iterator. The enable_if<> magic keeps us well-behaved for
321 // is_convertible<> (v. the iterator_facade documentation).
322 template<class OtherContT, class OtherVal>
323 aha_iterator(const aha_iterator<OtherContT,OtherVal>& o,
324 typename std::enable_if<
325 std::is_convertible<OtherVal*,IterVal*>::value >::type* = 0)
330 explicit aha_iterator(ContT* array, size_t offset)
337 // Returns unique index that can be used with findAt().
338 // WARNING: The following function will fail silently for hashtable
339 // with capacity > 2^32
340 uint32_t getIndex() const { return offset_; }
343 friend class AtomicHashArray;
344 friend class boost::iterator_core_access;
351 bool equal(const aha_iterator& o) const {
352 return aha_ == o.aha_ && offset_ == o.offset_;
355 IterVal& dereference() const {
356 return aha_->cells_[offset_];
359 void advancePastEmpty() {
360 while (offset_ < aha_->capacity_ && !isValid()) {
365 bool isValid() const {
366 KeyT key = acquireLoadKey(aha_->cells_[offset_]);
367 return key != aha_->kEmptyKey_ &&
368 key != aha_->kLockedKey_ &&
369 key != aha_->kErasedKey_;
379 #undef FOLLY_SPIN_WAIT