2 * Copyright 2012 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_ATOMICHASHMAP_H_
18 #error "This should only be included by AtomicHashMap.h"
21 #include "folly/detail/AtomicHashUtils.h"
25 template <class KeyT, class ValueT, class HashFcn>
26 const typename AtomicHashMap<KeyT, ValueT, HashFcn>::Config
27 AtomicHashMap<KeyT, ValueT, HashFcn>::defaultConfig;
29 // AtomicHashMap constructor -- Atomic wrapper that allows growth
30 // This class has a lot of overhead (184 Bytes) so only use for big maps
31 template <typename KeyT, typename ValueT, typename HashFcn>
32 AtomicHashMap<KeyT, ValueT, HashFcn>::
33 AtomicHashMap(size_t size, const Config& config)
34 : kGrowthFrac_(1.0 - config.maxLoadFactor) {
35 CHECK(config.maxLoadFactor > 0.0 && config.maxLoadFactor < 1.0);
36 subMaps_[0].store(SubMap::create(size, config).release(),
37 std::memory_order_relaxed);
38 auto numSubMaps = kNumSubMaps_;
39 FOR_EACH_RANGE(i, 1, numSubMaps) {
40 subMaps_[i].store(nullptr, std::memory_order_relaxed);
42 numMapsAllocated_.store(1, std::memory_order_relaxed);
46 template <typename KeyT, typename ValueT, typename HashFcn>
47 std::pair<typename AtomicHashMap<KeyT,ValueT,HashFcn>::iterator,bool>
48 AtomicHashMap<KeyT, ValueT, HashFcn>::
49 insert(const value_type& r) {
50 SimpleRetT ret = insertInternal(r);
51 SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
52 return std::make_pair(iterator(this, ret.i, subMap->makeIter(ret.j)),
56 // insertInternal -- Allocates new sub maps as existing ones fill up.
57 template <typename KeyT, typename ValueT, typename HashFcn>
58 typename AtomicHashMap<KeyT, ValueT, HashFcn>::SimpleRetT
59 AtomicHashMap<KeyT, ValueT, HashFcn>::
60 insertInternal(const value_type& r) {
62 int nextMapIdx = // this maintains our state
63 numMapsAllocated_.load(std::memory_order_acquire);
65 typename SubMap::SimpleRetT ret;
66 FOR_EACH_RANGE(i, 0, nextMapIdx) {
67 // insert in each map successively. If one succeeds, we're done!
68 SubMap* subMap = subMaps_[i].load(std::memory_order_relaxed);
69 ret = subMap->insertInternal(r);
70 if (ret.idx == subMap->capacity_) {
71 continue; //map is full, so try the next one
73 // Either collision or success - insert in either case
74 return SimpleRetT(i, ret.idx, ret.success);
77 // If we made it this far, all maps are full and we need to try to allocate
80 SubMap* primarySubMap = subMaps_[0].load(std::memory_order_relaxed);
81 if (nextMapIdx >= kNumSubMaps_ ||
82 primarySubMap->capacity_ * kGrowthFrac_ < 1.0) {
83 // Can't allocate any more sub maps.
84 throw AtomicHashMapFullError();
87 if (tryLockMap(nextMapIdx)) {
88 // Alloc a new map and shove it in. We can change whatever
89 // we want because other threads are waiting on us...
90 size_t numCellsAllocated = (size_t)
91 (primarySubMap->capacity_ *
92 std::pow(1.0 + kGrowthFrac_, nextMapIdx - 1));
93 size_t newSize = (int) (numCellsAllocated * kGrowthFrac_);
94 DCHECK(subMaps_[nextMapIdx].load(std::memory_order_relaxed) ==
95 (SubMap*)kLockedPtr_);
96 // create a new map using the settings stored in the first map
99 config.emptyKey = primarySubMap->kEmptyKey_;
100 config.lockedKey = primarySubMap->kLockedKey_;
101 config.erasedKey = primarySubMap->kErasedKey_;
102 config.maxLoadFactor = primarySubMap->maxLoadFactor();
103 config.entryCountThreadCacheSize =
104 primarySubMap->getEntryCountThreadCacheSize();
105 subMaps_[nextMapIdx].store(SubMap::create(newSize, config).release(),
106 std::memory_order_relaxed);
108 // Publish the map to other threads.
109 numMapsAllocated_.fetch_add(1, std::memory_order_release);
110 DCHECK_EQ(nextMapIdx + 1,
111 numMapsAllocated_.load(std::memory_order_relaxed));
113 // If we lost the race, we'll have to wait for the next map to get
114 // allocated before doing any insertion here.
116 nextMapIdx >= numMapsAllocated_.load(std::memory_order_acquire)
120 // Relaxed is ok here because either we just created this map, or we
121 // just did a spin wait with an acquire load on numMapsAllocated_.
122 SubMap* loadedMap = subMaps_[nextMapIdx].load(std::memory_order_relaxed);
123 DCHECK(loadedMap && loadedMap != (SubMap*)kLockedPtr_);
124 ret = loadedMap->insertInternal(r);
125 if (ret.idx != loadedMap->capacity_) {
126 return SimpleRetT(nextMapIdx, ret.idx, ret.success);
128 // We took way too long and the new map is already full...try again from
129 // the top (this should pretty much never happen).
130 goto beginInsertInternal;
134 template <typename KeyT, typename ValueT, typename HashFcn>
135 typename AtomicHashMap<KeyT, ValueT, HashFcn>::iterator
136 AtomicHashMap<KeyT, ValueT, HashFcn>::
138 SimpleRetT ret = findInternal(k);
139 if (ret.i >= numMapsAllocated_.load(std::memory_order_acquire)) {
142 SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
143 return iterator(this, ret.i, subMap->makeIter(ret.j));
146 template <typename KeyT, typename ValueT, typename HashFcn>
147 typename AtomicHashMap<KeyT, ValueT, HashFcn>::const_iterator
148 AtomicHashMap<KeyT, ValueT, HashFcn>::
150 return const_cast<AtomicHashMap*>(this)->find(k);
154 template <typename KeyT, typename ValueT, typename HashFcn>
155 typename AtomicHashMap<KeyT, ValueT, HashFcn>::SimpleRetT
156 AtomicHashMap<KeyT, ValueT, HashFcn>::
157 findInternal(const KeyT k) const {
158 SubMap* const primaryMap = subMaps_[0].load(std::memory_order_relaxed);
159 typename SubMap::SimpleRetT ret = primaryMap->findInternal(k);
160 if (LIKELY(ret.idx != primaryMap->capacity_)) {
161 return SimpleRetT(0, ret.idx, ret.success);
163 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
164 FOR_EACH_RANGE(i, 1, numMaps) {
165 // Check each map successively. If one succeeds, we're done!
166 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
167 ret = thisMap->findInternal(k);
168 if (LIKELY(ret.idx != thisMap->capacity_)) {
169 return SimpleRetT(i, ret.idx, ret.success);
172 // Didn't find our key...
173 return SimpleRetT(numMaps, 0, false);
176 // findAtInternal -- see encodeIndex() for details.
177 template <typename KeyT, typename ValueT, typename HashFcn>
178 typename AtomicHashMap<KeyT, ValueT, HashFcn>::SimpleRetT
179 AtomicHashMap<KeyT, ValueT, HashFcn>::
180 findAtInternal(uint32_t idx) const {
181 uint32_t subMapIdx, subMapOffset;
182 if (idx & kSecondaryMapBit_) {
183 // idx falls in a secondary map
184 idx &= ~kSecondaryMapBit_; // unset secondary bit
185 subMapIdx = idx >> kSubMapIndexShift_;
186 DCHECK_LT(subMapIdx, numMapsAllocated_.load(std::memory_order_relaxed));
187 subMapOffset = idx & kSubMapIndexMask_;
189 // idx falls in primary map
193 return SimpleRetT(subMapIdx, subMapOffset, true);
197 template <typename KeyT, typename ValueT, typename HashFcn>
198 typename AtomicHashMap<KeyT, ValueT, HashFcn>::size_type
199 AtomicHashMap<KeyT, ValueT, HashFcn>::
200 erase(const KeyT k) {
201 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
202 FOR_EACH_RANGE(i, 0, numMaps) {
203 // Check each map successively. If one succeeds, we're done!
204 if (subMaps_[i].load(std::memory_order_relaxed)->erase(k)) {
208 // Didn't find our key...
212 // capacity -- summation of capacities of all submaps
213 template <typename KeyT, typename ValueT, typename HashFcn>
214 size_t AtomicHashMap<KeyT, ValueT, HashFcn>::
217 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
218 FOR_EACH_RANGE(i, 0, numMaps) {
219 totalCap += subMaps_[i].load(std::memory_order_relaxed)->capacity_;
225 // number of new insertions until current submaps are all at max load
226 template <typename KeyT, typename ValueT, typename HashFcn>
227 size_t AtomicHashMap<KeyT, ValueT, HashFcn>::
228 spaceRemaining() const {
230 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
231 FOR_EACH_RANGE(i, 0, numMaps) {
232 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
233 spaceRem += std::max(
235 thisMap->maxEntries_ - &thisMap->numEntries_.readFull()
241 // clear -- Wipes all keys and values from primary map and destroys
242 // all secondary maps. Not thread safe.
243 template <typename KeyT, typename ValueT, typename HashFcn>
244 void AtomicHashMap<KeyT, ValueT, HashFcn>::
246 subMaps_[0].load(std::memory_order_relaxed)->clear();
247 int const numMaps = numMapsAllocated_
248 .load(std::memory_order_relaxed);
249 FOR_EACH_RANGE(i, 1, numMaps) {
250 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
252 SubMap::destroy(thisMap);
253 subMaps_[i].store(nullptr, std::memory_order_relaxed);
255 numMapsAllocated_.store(1, std::memory_order_relaxed);
259 template <typename KeyT, typename ValueT, typename HashFcn>
260 size_t AtomicHashMap<KeyT, ValueT, HashFcn>::
263 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
264 FOR_EACH_RANGE(i, 0, numMaps) {
265 totalSize += subMaps_[i].load(std::memory_order_relaxed)->size();
270 // encodeIndex -- Encode the submap index and offset into return.
271 // index_ret must be pre-populated with the submap offset.
273 // We leave index_ret untouched when referring to the primary map
274 // so it can be as large as possible (31 data bits). Max size of
275 // secondary maps is limited by what can fit in the low 27 bits.
277 // Returns the following bit-encoded data in index_ret:
278 // if subMap == 0 (primary map) =>
281 // 0-30 submap offset (index_ret input)
283 // if subMap > 0 (secondary maps) =>
286 // 27-30 which subMap
287 // 0-26 subMap offset (index_ret input)
288 template <typename KeyT, typename ValueT, typename HashFcn>
289 inline uint32_t AtomicHashMap<KeyT, ValueT, HashFcn>::
290 encodeIndex(uint32_t subMap, uint32_t offset) {
291 DCHECK_EQ(offset & kSecondaryMapBit_, 0); // offset can't be too big
292 if (subMap == 0) return offset;
293 // Make sure subMap isn't too big
294 DCHECK_EQ(subMap >> kNumSubMapBits_, 0);
295 // Make sure subMap bits of offset are clear
296 DCHECK_EQ(offset & (~kSubMapIndexMask_ | kSecondaryMapBit_), 0);
298 // Set high-order bits to encode which submap this index belongs to
299 return offset | (subMap << kSubMapIndexShift_) | kSecondaryMapBit_;
303 // Iterator implementation
305 template <typename KeyT, typename ValueT, typename HashFcn>
306 template<class ContT, class IterVal, class SubIt>
307 struct AtomicHashMap<KeyT, ValueT, HashFcn>::ahm_iterator
308 : boost::iterator_facade<ahm_iterator<ContT,IterVal,SubIt>,
310 boost::forward_traversal_tag>
312 explicit ahm_iterator() : ahm_(0) {}
314 // Conversion ctor for interoperability between const_iterator and
315 // iterator. The enable_if<> magic keeps us well-behaved for
316 // is_convertible<> (v. the iterator_facade documentation).
317 template<class OtherContT, class OtherVal, class OtherSubIt>
318 ahm_iterator(const ahm_iterator<OtherContT,OtherVal,OtherSubIt>& o,
319 typename std::enable_if<
320 std::is_convertible<OtherSubIt,SubIt>::value >::type* = 0)
327 * Returns the unique index that can be used for access directly
328 * into the data storage.
330 uint32_t getIndex() const {
332 return ahm_->encodeIndex(subMap_, subIt_.getIndex());
336 friend class AtomicHashMap;
337 explicit ahm_iterator(ContT* ahm,
344 checkAdvanceToNextSubmap();
347 friend class boost::iterator_core_access;
352 checkAdvanceToNextSubmap();
355 bool equal(const ahm_iterator& other) const {
356 if (ahm_ != other.ahm_) {
360 if (isEnd() || other.isEnd()) {
361 return isEnd() == other.isEnd();
364 return subMap_ == other.subMap_ &&
365 subIt_ == other.subIt_;
368 IterVal& dereference() const {
372 bool isEnd() const { return ahm_ == nullptr; }
374 void checkAdvanceToNextSubmap() {
379 SubMap* thisMap = ahm_->subMaps_[subMap_].
380 load(std::memory_order_relaxed);
381 if (subIt_ == thisMap->end()) {
382 // This sub iterator is done, advance to next one
384 ahm_->numMapsAllocated_.load(std::memory_order_acquire)) {
386 thisMap = ahm_->subMaps_[subMap_].load(std::memory_order_relaxed);
387 subIt_ = thisMap->begin();
402 #undef FOLLY_SPIN_WAIT