X-Git-Url: http://plrg.eecs.uci.edu/git/?p=folly.git;a=blobdiff_plain;f=folly%2FAtomicHashMap-inl.h;h=a732fd9676497f7bf72a0cbcd22276d422074bf6;hp=afac177fac801100f179a01e90bf7e9a9214739e;hb=218a45f5830c9245b8ccfab3c389b56bf74db730;hpb=22afce906d7e98d95f8c45c3301072d9fd891d41 diff --git a/folly/AtomicHashMap-inl.h b/folly/AtomicHashMap-inl.h index afac177f..a732fd96 100644 --- a/folly/AtomicHashMap-inl.h +++ b/folly/AtomicHashMap-inl.h @@ -1,5 +1,5 @@ /* - * Copyright 2014 Facebook, Inc. + * Copyright 2016 Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,73 +18,93 @@ #error "This should only be included by AtomicHashMap.h" #endif -#include "folly/detail/AtomicHashUtils.h" +#include namespace folly { -template -const typename AtomicHashMap::Config -AtomicHashMap::defaultConfig; - // AtomicHashMap constructor -- Atomic wrapper that allows growth // This class has a lot of overhead (184 Bytes) so only use for big maps -template -AtomicHashMap:: -AtomicHashMap(size_t size, const Config& config) +template +AtomicHashMap:: +AtomicHashMap(size_t finalSizeEst, const Config& config) : kGrowthFrac_(config.growthFactor < 0 ? 1.0 - config.maxLoadFactor : config.growthFactor) { CHECK(config.maxLoadFactor > 0.0 && config.maxLoadFactor < 1.0); - subMaps_[0].store(SubMap::create(size, config).release(), + subMaps_[0].store(SubMap::create(finalSizeEst, config).release(), std::memory_order_relaxed); - auto numSubMaps = kNumSubMaps_; - FOR_EACH_RANGE(i, 1, numSubMaps) { + auto subMapCount = kNumSubMaps_; + FOR_EACH_RANGE(i, 1, subMapCount) { subMaps_[i].store(nullptr, std::memory_order_relaxed); } numMapsAllocated_.store(1, std::memory_order_relaxed); } -// insert -- -template -std::pair::iterator, bool> -AtomicHashMap:: -insert(key_type k, const mapped_type& v) { - SimpleRetT ret = insertInternal(k,v); - SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed); - return std::make_pair(iterator(this, ret.i, subMap->makeIter(ret.j)), - ret.success); -} - -template -std::pair::iterator, bool> -AtomicHashMap:: -insert(key_type k, mapped_type&& v) { - SimpleRetT ret = insertInternal(k, std::move(v)); +// emplace -- +template +template +std::pair::iterator, bool> +AtomicHashMap:: +emplace(LookupKeyT k, ArgTs&&... vCtorArgs) { + SimpleRetT ret = insertInternal( + k, std::forward(vCtorArgs)...); SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed); return std::make_pair(iterator(this, ret.i, subMap->makeIter(ret.j)), ret.success); } // insertInternal -- Allocates new sub maps as existing ones fill up. -template -template -typename AtomicHashMap::SimpleRetT -AtomicHashMap:: -insertInternal(key_type key, T&& value) { +template +template +typename AtomicHashMap:: + SimpleRetT +AtomicHashMap:: +insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs) { beginInsertInternal: - int nextMapIdx = // this maintains our state + auto nextMapIdx = // this maintains our state numMapsAllocated_.load(std::memory_order_acquire); typename SubMap::SimpleRetT ret; FOR_EACH_RANGE(i, 0, nextMapIdx) { // insert in each map successively. If one succeeds, we're done! SubMap* subMap = subMaps_[i].load(std::memory_order_relaxed); - ret = subMap->insertInternal(key, std::forward(value)); + ret = subMap->template insertInternal( + key, std::forward(vCtorArgs)...); if (ret.idx == subMap->capacity_) { continue; //map is full, so try the next one } @@ -130,16 +150,16 @@ insertInternal(key_type key, T&& value) { } else { // If we lost the race, we'll have to wait for the next map to get // allocated before doing any insertion here. - FOLLY_SPIN_WAIT( - nextMapIdx >= numMapsAllocated_.load(std::memory_order_acquire) - ); + detail::atomic_hash_spin_wait([&] { + return nextMapIdx >= numMapsAllocated_.load(std::memory_order_acquire); + }); } // Relaxed is ok here because either we just created this map, or we // just did a spin wait with an acquire load on numMapsAllocated_. SubMap* loadedMap = subMaps_[nextMapIdx].load(std::memory_order_relaxed); DCHECK(loadedMap && loadedMap != (SubMap*)kLockedPtr_); - ret = loadedMap->insertInternal(key, std::forward(value)); + ret = loadedMap->insertInternal(key, std::forward(vCtorArgs)...); if (ret.idx != loadedMap->capacity_) { return SimpleRetT(nextMapIdx, ret.idx, ret.success); } @@ -149,12 +169,21 @@ insertInternal(key_type key, T&& value) { } // find -- -template -typename AtomicHashMap::iterator -AtomicHashMap:: -find(KeyT k) { - SimpleRetT ret = findInternal(k); +template +template +typename AtomicHashMap:: + iterator +AtomicHashMap::find( + LookupKeyT k) { + SimpleRetT ret = findInternal(k); if (!ret.success) { return end(); } @@ -162,23 +191,44 @@ find(KeyT k) { return iterator(this, ret.i, subMap->makeIter(ret.j)); } -template +template +template typename AtomicHashMap::const_iterator -AtomicHashMap:: -find(KeyT k) const { - return const_cast(this)->find(k); + HashFcn, EqualFcn, Allocator, ProbeFcn, KeyConvertFcn>::const_iterator +AtomicHashMap:: +find(LookupKeyT k) const { + return const_cast(this)->find(k); } // findInternal -- -template -typename AtomicHashMap::SimpleRetT -AtomicHashMap:: -findInternal(const KeyT k) const { +template +template +typename AtomicHashMap:: + SimpleRetT +AtomicHashMap:: + findInternal(const LookupKeyT k) const { SubMap* const primaryMap = subMaps_[0].load(std::memory_order_relaxed); - typename SubMap::SimpleRetT ret = primaryMap->findInternal(k); + typename SubMap::SimpleRetT ret = + primaryMap->template findInternal(k); if (LIKELY(ret.idx != primaryMap->capacity_)) { return SimpleRetT(0, ret.idx, ret.success); } @@ -186,7 +236,9 @@ findInternal(const KeyT k) const { FOR_EACH_RANGE(i, 1, numMaps) { // Check each map successively. If one succeeds, we're done! SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed); - ret = thisMap->findInternal(k); + ret = thisMap->template findInternal(k); if (LIKELY(ret.idx != thisMap->capacity_)) { return SimpleRetT(i, ret.idx, ret.success); } @@ -196,10 +248,18 @@ findInternal(const KeyT k) const { } // findAtInternal -- see encodeIndex() for details. -template -typename AtomicHashMap::SimpleRetT -AtomicHashMap:: +template +typename AtomicHashMap:: + SimpleRetT +AtomicHashMap:: findAtInternal(uint32_t idx) const { uint32_t subMapIdx, subMapOffset; if (idx & kSecondaryMapBit_) { @@ -217,10 +277,18 @@ findAtInternal(uint32_t idx) const { } // erase -- -template -typename AtomicHashMap::size_type -AtomicHashMap:: +template +typename AtomicHashMap:: + size_type +AtomicHashMap:: erase(const KeyT k) { int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); FOR_EACH_RANGE(i, 0, numMaps) { @@ -234,9 +302,15 @@ erase(const KeyT k) { } // capacity -- summation of capacities of all submaps -template -size_t AtomicHashMap:: +template +size_t AtomicHashMap:: capacity() const { size_t totalCap(0); int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); @@ -248,9 +322,15 @@ capacity() const { // spaceRemaining -- // number of new insertions until current submaps are all at max load -template -size_t AtomicHashMap:: +template +size_t AtomicHashMap:: spaceRemaining() const { size_t spaceRem(0); int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); @@ -266,9 +346,15 @@ spaceRemaining() const { // clear -- Wipes all keys and values from primary map and destroys // all secondary maps. Not thread safe. -template -void AtomicHashMap:: +template +void AtomicHashMap:: clear() { subMaps_[0].load(std::memory_order_relaxed)->clear(); int const numMaps = numMapsAllocated_ @@ -283,9 +369,15 @@ clear() { } // size -- -template -size_t AtomicHashMap:: +template +size_t AtomicHashMap:: size() const { size_t totalSize(0); int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); @@ -313,10 +405,17 @@ size() const { // 31 1 // 27-30 which subMap // 0-26 subMap offset (index_ret input) -template -inline uint32_t AtomicHashMap:: -encodeIndex(uint32_t subMap, uint32_t offset) { +template +inline uint32_t +AtomicHashMap:: + encodeIndex(uint32_t subMap, uint32_t offset) { DCHECK_EQ(offset & kSecondaryMapBit_, 0); // offset can't be too big if (subMap == 0) return offset; // Make sure subMap isn't too big @@ -331,14 +430,19 @@ encodeIndex(uint32_t subMap, uint32_t offset) { // Iterator implementation -template -template -struct AtomicHashMap::ahm_iterator - : boost::iterator_facade, - IterVal, - boost::forward_traversal_tag> -{ +template +template +struct AtomicHashMap:: + ahm_iterator : boost::iterator_facade, + IterVal, + boost::forward_traversal_tag> { explicit ahm_iterator() : ahm_(0) {} // Conversion ctor for interoperability between const_iterator and @@ -370,9 +474,7 @@ struct AtomicHashMap::ahm_iterator : ahm_(ahm) , subMap_(subMap) , subIt_(subIt) - { - checkAdvanceToNextSubmap(); - } + {} friend class boost::iterator_core_access; @@ -408,7 +510,7 @@ struct AtomicHashMap::ahm_iterator SubMap* thisMap = ahm_->subMaps_[subMap_]. load(std::memory_order_relaxed); - if (subIt_ == thisMap->end()) { + while (subIt_ == thisMap->end()) { // This sub iterator is done, advance to next one if (subMap_ + 1 < ahm_->numMapsAllocated_.load(std::memory_order_acquire)) { @@ -417,6 +519,7 @@ struct AtomicHashMap::ahm_iterator subIt_ = thisMap->begin(); } else { ahm_ = nullptr; + return; } } } @@ -428,5 +531,3 @@ struct AtomicHashMap::ahm_iterator }; // ahm_iterator } // namespace folly - -#undef FOLLY_SPIN_WAIT