make io::Cursor::push() safe to call with an empty ByteRange
[folly.git] / folly / AtomicHashMap-inl.h
1 /*
2  * Copyright 2017 Facebook, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *   http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #ifndef FOLLY_ATOMICHASHMAP_H_
18 #error "This should only be included by AtomicHashMap.h"
19 #endif
20
21 #include <folly/detail/AtomicHashUtils.h>
22
23 namespace folly {
24
25 // AtomicHashMap constructor -- Atomic wrapper that allows growth
26 // This class has a lot of overhead (184 Bytes) so only use for big maps
27 template <
28     typename KeyT,
29     typename ValueT,
30     typename HashFcn,
31     typename EqualFcn,
32     typename Allocator,
33     typename ProbeFcn,
34     typename KeyConvertFcn>
35 AtomicHashMap<
36     KeyT,
37     ValueT,
38     HashFcn,
39     EqualFcn,
40     Allocator,
41     ProbeFcn,
42     KeyConvertFcn>::AtomicHashMap(size_t finalSizeEst, const Config& config)
43     : kGrowthFrac_(
44           config.growthFactor < 0 ? 1.0f - config.maxLoadFactor
45                                   : config.growthFactor) {
46   CHECK(config.maxLoadFactor > 0.0f && config.maxLoadFactor < 1.0f);
47   subMaps_[0].store(SubMap::create(finalSizeEst, config).release(),
48     std::memory_order_relaxed);
49   auto subMapCount = kNumSubMaps_;
50   FOR_EACH_RANGE(i, 1, subMapCount) {
51     subMaps_[i].store(nullptr, std::memory_order_relaxed);
52   }
53   numMapsAllocated_.store(1, std::memory_order_relaxed);
54 }
55
56 // emplace --
57 template <typename KeyT,
58           typename ValueT,
59           typename HashFcn,
60           typename EqualFcn,
61           typename Allocator,
62           typename ProbeFcn,
63           typename KeyConvertFcn>
64 template <typename LookupKeyT,
65           typename LookupHashFcn,
66           typename LookupEqualFcn,
67           typename LookupKeyToKeyFcn,
68           typename... ArgTs>
69 std::pair<typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator,
70                                  ProbeFcn, KeyConvertFcn>::iterator, bool>
71 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
72               Allocator, ProbeFcn, KeyConvertFcn>::
73 emplace(LookupKeyT k, ArgTs&&... vCtorArgs) {
74   SimpleRetT ret = insertInternal<LookupKeyT,
75                                   LookupHashFcn,
76                                   LookupEqualFcn,
77                                   LookupKeyToKeyFcn>(
78       k, std::forward<ArgTs>(vCtorArgs)...);
79   SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
80   return std::make_pair(iterator(this, ret.i, subMap->makeIter(ret.j)),
81                         ret.success);
82 }
83
84 // insertInternal -- Allocates new sub maps as existing ones fill up.
85 template <typename KeyT,
86           typename ValueT,
87           typename HashFcn,
88           typename EqualFcn,
89           typename Allocator,
90           typename ProbeFcn,
91           typename KeyConvertFcn>
92 template <typename LookupKeyT,
93           typename LookupHashFcn,
94           typename LookupEqualFcn,
95           typename LookupKeyToKeyFcn,
96           typename... ArgTs>
97 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
98                        Allocator, ProbeFcn, KeyConvertFcn>::
99     SimpleRetT
100 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
101               Allocator, ProbeFcn, KeyConvertFcn>::
102 insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs) {
103  beginInsertInternal:
104   auto nextMapIdx = // this maintains our state
105     numMapsAllocated_.load(std::memory_order_acquire);
106   typename SubMap::SimpleRetT ret;
107   FOR_EACH_RANGE(i, 0, nextMapIdx) {
108     // insert in each map successively.  If one succeeds, we're done!
109     SubMap* subMap = subMaps_[i].load(std::memory_order_relaxed);
110     ret = subMap->template insertInternal<LookupKeyT,
111                                           LookupHashFcn,
112                                           LookupEqualFcn,
113                                           LookupKeyToKeyFcn>(
114         key, std::forward<ArgTs>(vCtorArgs)...);
115     if (ret.idx == subMap->capacity_) {
116       continue;  //map is full, so try the next one
117     }
118     // Either collision or success - insert in either case
119     return SimpleRetT(i, ret.idx, ret.success);
120   }
121
122   // If we made it this far, all maps are full and we need to try to allocate
123   // the next one.
124
125   SubMap* primarySubMap = subMaps_[0].load(std::memory_order_relaxed);
126   if (nextMapIdx >= kNumSubMaps_ ||
127       primarySubMap->capacity_ * kGrowthFrac_ < 1.0) {
128     // Can't allocate any more sub maps.
129     throw AtomicHashMapFullError();
130   }
131
132   if (tryLockMap(nextMapIdx)) {
133     // Alloc a new map and shove it in.  We can change whatever
134     // we want because other threads are waiting on us...
135     size_t numCellsAllocated = (size_t)
136       (primarySubMap->capacity_ *
137        std::pow(1.0 + kGrowthFrac_, nextMapIdx - 1));
138     size_t newSize = size_t(numCellsAllocated * kGrowthFrac_);
139     DCHECK(subMaps_[nextMapIdx].load(std::memory_order_relaxed) ==
140       (SubMap*)kLockedPtr_);
141     // create a new map using the settings stored in the first map
142
143     Config config;
144     config.emptyKey = primarySubMap->kEmptyKey_;
145     config.lockedKey = primarySubMap->kLockedKey_;
146     config.erasedKey = primarySubMap->kErasedKey_;
147     config.maxLoadFactor = primarySubMap->maxLoadFactor();
148     config.entryCountThreadCacheSize =
149       primarySubMap->getEntryCountThreadCacheSize();
150     subMaps_[nextMapIdx].store(SubMap::create(newSize, config).release(),
151       std::memory_order_relaxed);
152
153     // Publish the map to other threads.
154     numMapsAllocated_.fetch_add(1, std::memory_order_release);
155     DCHECK_EQ(nextMapIdx + 1,
156       numMapsAllocated_.load(std::memory_order_relaxed));
157   } else {
158     // If we lost the race, we'll have to wait for the next map to get
159     // allocated before doing any insertion here.
160     detail::atomic_hash_spin_wait([&] {
161       return nextMapIdx >= numMapsAllocated_.load(std::memory_order_acquire);
162     });
163   }
164
165   // Relaxed is ok here because either we just created this map, or we
166   // just did a spin wait with an acquire load on numMapsAllocated_.
167   SubMap* loadedMap = subMaps_[nextMapIdx].load(std::memory_order_relaxed);
168   DCHECK(loadedMap && loadedMap != (SubMap*)kLockedPtr_);
169   ret = loadedMap->insertInternal(key, std::forward<ArgTs>(vCtorArgs)...);
170   if (ret.idx != loadedMap->capacity_) {
171     return SimpleRetT(nextMapIdx, ret.idx, ret.success);
172   }
173   // We took way too long and the new map is already full...try again from
174   // the top (this should pretty much never happen).
175   goto beginInsertInternal;
176 }
177
178 // find --
179 template <typename KeyT,
180           typename ValueT,
181           typename HashFcn,
182           typename EqualFcn,
183           typename Allocator,
184           typename ProbeFcn,
185           typename KeyConvertFcn>
186 template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
187 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
188                        Allocator, ProbeFcn, KeyConvertFcn>::
189     iterator
190 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
191               Allocator, ProbeFcn, KeyConvertFcn>::find(
192     LookupKeyT k) {
193   SimpleRetT ret = findInternal<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k);
194   if (!ret.success) {
195     return end();
196   }
197   SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
198   return iterator(this, ret.i, subMap->makeIter(ret.j));
199 }
200
201 template <typename KeyT,
202           typename ValueT,
203           typename HashFcn,
204           typename EqualFcn,
205           typename Allocator,
206           typename ProbeFcn,
207           typename KeyConvertFcn>
208 template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
209 typename AtomicHashMap<KeyT, ValueT,
210          HashFcn, EqualFcn, Allocator, ProbeFcn, KeyConvertFcn>::const_iterator
211 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
212               Allocator, ProbeFcn, KeyConvertFcn>::
213 find(LookupKeyT k) const {
214   return const_cast<AtomicHashMap*>(this)->find<LookupKeyT,
215                                                 LookupHashFcn,
216                                                 LookupEqualFcn>(k);
217 }
218
219 // findInternal --
220 template <typename KeyT,
221           typename ValueT,
222           typename HashFcn,
223           typename EqualFcn,
224           typename Allocator,
225           typename ProbeFcn,
226           typename KeyConvertFcn>
227 template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
228 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
229                        Allocator, ProbeFcn, KeyConvertFcn>::
230     SimpleRetT
231 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
232               Allocator, ProbeFcn, KeyConvertFcn>::
233     findInternal(const LookupKeyT k) const {
234   SubMap* const primaryMap = subMaps_[0].load(std::memory_order_relaxed);
235   typename SubMap::SimpleRetT ret =
236     primaryMap->template findInternal<LookupKeyT,
237                                       LookupHashFcn,
238                                       LookupEqualFcn>(k);
239   if (LIKELY(ret.idx != primaryMap->capacity_)) {
240     return SimpleRetT(0, ret.idx, ret.success);
241   }
242   const unsigned int numMaps =
243       numMapsAllocated_.load(std::memory_order_acquire);
244   FOR_EACH_RANGE(i, 1, numMaps) {
245     // Check each map successively.  If one succeeds, we're done!
246     SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
247     ret = thisMap->template findInternal<LookupKeyT,
248                                          LookupHashFcn,
249                                          LookupEqualFcn>(k);
250     if (LIKELY(ret.idx != thisMap->capacity_)) {
251       return SimpleRetT(i, ret.idx, ret.success);
252     }
253   }
254   // Didn't find our key...
255   return SimpleRetT(numMaps, 0, false);
256 }
257
258 // findAtInternal -- see encodeIndex() for details.
259 template <typename KeyT,
260           typename ValueT,
261           typename HashFcn,
262           typename EqualFcn,
263           typename Allocator,
264           typename ProbeFcn,
265           typename KeyConvertFcn>
266 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
267                        Allocator, ProbeFcn, KeyConvertFcn>::
268     SimpleRetT
269 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
270               Allocator, ProbeFcn, KeyConvertFcn>::
271 findAtInternal(uint32_t idx) const {
272   uint32_t subMapIdx, subMapOffset;
273   if (idx & kSecondaryMapBit_) {
274     // idx falls in a secondary map
275     idx &= ~kSecondaryMapBit_;  // unset secondary bit
276     subMapIdx = idx >> kSubMapIndexShift_;
277     DCHECK_LT(subMapIdx, numMapsAllocated_.load(std::memory_order_relaxed));
278     subMapOffset = idx & kSubMapIndexMask_;
279   } else {
280     // idx falls in primary map
281     subMapIdx = 0;
282     subMapOffset = idx;
283   }
284   return SimpleRetT(subMapIdx, subMapOffset, true);
285 }
286
287 // erase --
288 template <typename KeyT,
289           typename ValueT,
290           typename HashFcn,
291           typename EqualFcn,
292           typename Allocator,
293           typename ProbeFcn,
294           typename KeyConvertFcn>
295 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
296                        Allocator, ProbeFcn, KeyConvertFcn>::
297     size_type
298 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
299               Allocator, ProbeFcn, KeyConvertFcn>::
300 erase(const KeyT k) {
301   int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
302   FOR_EACH_RANGE(i, 0, numMaps) {
303     // Check each map successively.  If one succeeds, we're done!
304     if (subMaps_[i].load(std::memory_order_relaxed)->erase(k)) {
305       return 1;
306     }
307   }
308   // Didn't find our key...
309   return 0;
310 }
311
312 // capacity -- summation of capacities of all submaps
313 template <typename KeyT,
314           typename ValueT,
315           typename HashFcn,
316           typename EqualFcn,
317           typename Allocator,
318           typename ProbeFcn,
319           typename KeyConvertFcn>
320 size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
321                      Allocator, ProbeFcn, KeyConvertFcn>::
322 capacity() const {
323   size_t totalCap(0);
324   int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
325   FOR_EACH_RANGE(i, 0, numMaps) {
326     totalCap += subMaps_[i].load(std::memory_order_relaxed)->capacity_;
327   }
328   return totalCap;
329 }
330
331 // spaceRemaining --
332 // number of new insertions until current submaps are all at max load
333 template <typename KeyT,
334           typename ValueT,
335           typename HashFcn,
336           typename EqualFcn,
337           typename Allocator,
338           typename ProbeFcn,
339           typename KeyConvertFcn>
340 size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
341                      Allocator, ProbeFcn, KeyConvertFcn>::
342 spaceRemaining() const {
343   size_t spaceRem(0);
344   int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
345   FOR_EACH_RANGE(i, 0, numMaps) {
346     SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
347     spaceRem += std::max(
348       0,
349       thisMap->maxEntries_ - &thisMap->numEntries_.readFull()
350     );
351   }
352   return spaceRem;
353 }
354
355 // clear -- Wipes all keys and values from primary map and destroys
356 // all secondary maps.  Not thread safe.
357 template <typename KeyT,
358           typename ValueT,
359           typename HashFcn,
360           typename EqualFcn,
361           typename Allocator,
362           typename ProbeFcn,
363           typename KeyConvertFcn>
364 void AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
365                    Allocator, ProbeFcn, KeyConvertFcn>::
366 clear() {
367   subMaps_[0].load(std::memory_order_relaxed)->clear();
368   int const numMaps = numMapsAllocated_
369     .load(std::memory_order_relaxed);
370   FOR_EACH_RANGE(i, 1, numMaps) {
371     SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
372     DCHECK(thisMap);
373     SubMap::destroy(thisMap);
374     subMaps_[i].store(nullptr, std::memory_order_relaxed);
375   }
376   numMapsAllocated_.store(1, std::memory_order_relaxed);
377 }
378
379 // size --
380 template <typename KeyT,
381           typename ValueT,
382           typename HashFcn,
383           typename EqualFcn,
384           typename Allocator,
385           typename ProbeFcn,
386           typename KeyConvertFcn>
387 size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
388                      Allocator, ProbeFcn, KeyConvertFcn>::
389 size() const {
390   size_t totalSize(0);
391   int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
392   FOR_EACH_RANGE(i, 0, numMaps) {
393     totalSize += subMaps_[i].load(std::memory_order_relaxed)->size();
394   }
395   return totalSize;
396 }
397
398 // encodeIndex -- Encode the submap index and offset into return.
399 // index_ret must be pre-populated with the submap offset.
400 //
401 // We leave index_ret untouched when referring to the primary map
402 // so it can be as large as possible (31 data bits).  Max size of
403 // secondary maps is limited by what can fit in the low 27 bits.
404 //
405 // Returns the following bit-encoded data in index_ret:
406 //   if subMap == 0 (primary map) =>
407 //     bit(s)          value
408 //         31              0
409 //       0-30  submap offset (index_ret input)
410 //
411 //   if subMap > 0 (secondary maps) =>
412 //     bit(s)          value
413 //         31              1
414 //      27-30   which subMap
415 //       0-26  subMap offset (index_ret input)
416 template <typename KeyT,
417           typename ValueT,
418           typename HashFcn,
419           typename EqualFcn,
420           typename Allocator,
421           typename ProbeFcn,
422           typename KeyConvertFcn>
423 inline uint32_t
424 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
425               Allocator, ProbeFcn, KeyConvertFcn>::
426     encodeIndex(uint32_t subMap, uint32_t offset) {
427   DCHECK_EQ(offset & kSecondaryMapBit_, 0);  // offset can't be too big
428   if (subMap == 0) return offset;
429   // Make sure subMap isn't too big
430   DCHECK_EQ(subMap >> kNumSubMapBits_, 0);
431   // Make sure subMap bits of offset are clear
432   DCHECK_EQ(offset & (~kSubMapIndexMask_ | kSecondaryMapBit_), 0);
433
434   // Set high-order bits to encode which submap this index belongs to
435   return offset | (subMap << kSubMapIndexShift_) | kSecondaryMapBit_;
436 }
437
438
439 // Iterator implementation
440
441 template <typename KeyT,
442           typename ValueT,
443           typename HashFcn,
444           typename EqualFcn,
445           typename Allocator,
446           typename ProbeFcn,
447           typename KeyConvertFcn>
448 template <class ContT, class IterVal, class SubIt>
449 struct AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
450                      Allocator, ProbeFcn, KeyConvertFcn>::
451     ahm_iterator : boost::iterator_facade<ahm_iterator<ContT, IterVal, SubIt>,
452                                           IterVal,
453                                           boost::forward_traversal_tag> {
454   explicit ahm_iterator() : ahm_(0) {}
455
456   // Conversion ctor for interoperability between const_iterator and
457   // iterator.  The enable_if<> magic keeps us well-behaved for
458   // is_convertible<> (v. the iterator_facade documentation).
459   template<class OtherContT, class OtherVal, class OtherSubIt>
460   ahm_iterator(const ahm_iterator<OtherContT,OtherVal,OtherSubIt>& o,
461                typename std::enable_if<
462                std::is_convertible<OtherSubIt,SubIt>::value >::type* = 0)
463       : ahm_(o.ahm_)
464       , subMap_(o.subMap_)
465       , subIt_(o.subIt_)
466   {}
467
468   /*
469    * Returns the unique index that can be used for access directly
470    * into the data storage.
471    */
472   uint32_t getIndex() const {
473     CHECK(!isEnd());
474     return ahm_->encodeIndex(subMap_, subIt_.getIndex());
475   }
476
477  private:
478   friend class AtomicHashMap;
479   explicit ahm_iterator(ContT* ahm,
480                         uint32_t subMap,
481                         const SubIt& subIt)
482       : ahm_(ahm)
483       , subMap_(subMap)
484       , subIt_(subIt)
485   {}
486
487   friend class boost::iterator_core_access;
488
489   void increment() {
490     CHECK(!isEnd());
491     ++subIt_;
492     checkAdvanceToNextSubmap();
493   }
494
495   bool equal(const ahm_iterator& other) const {
496     if (ahm_ != other.ahm_) {
497       return false;
498     }
499
500     if (isEnd() || other.isEnd()) {
501       return isEnd() == other.isEnd();
502     }
503
504     return subMap_ == other.subMap_ &&
505       subIt_ == other.subIt_;
506   }
507
508   IterVal& dereference() const {
509     return *subIt_;
510   }
511
512   bool isEnd() const { return ahm_ == nullptr; }
513
514   void checkAdvanceToNextSubmap() {
515     if (isEnd()) {
516       return;
517     }
518
519     SubMap* thisMap = ahm_->subMaps_[subMap_].
520       load(std::memory_order_relaxed);
521     while (subIt_ == thisMap->end()) {
522       // This sub iterator is done, advance to next one
523       if (subMap_ + 1 <
524           ahm_->numMapsAllocated_.load(std::memory_order_acquire)) {
525         ++subMap_;
526         thisMap = ahm_->subMaps_[subMap_].load(std::memory_order_relaxed);
527         subIt_ = thisMap->begin();
528       } else {
529         ahm_ = nullptr;
530         return;
531       }
532     }
533   }
534
535  private:
536   ContT* ahm_;
537   uint32_t subMap_;
538   SubIt subIt_;
539 }; // ahm_iterator
540
541 } // namespace folly