X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=folly%2FAtomicHashMap-inl.h;h=a732fd9676497f7bf72a0cbcd22276d422074bf6;hb=26c4e8d2c5797fa25a3ea03104bef207f13e1cd6;hp=4752cdd3aec69c345e36f12b8553e422ba421a30;hpb=e1c9764467c8c40c427c4b158342ba3642fe10a8;p=folly.git diff --git a/folly/AtomicHashMap-inl.h b/folly/AtomicHashMap-inl.h index 4752cdd3..a732fd96 100644 --- a/folly/AtomicHashMap-inl.h +++ b/folly/AtomicHashMap-inl.h @@ -1,5 +1,5 @@ /* - * Copyright 2015 Facebook, Inc. + * Copyright 2016 Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,16 +22,17 @@ namespace folly { -template -const typename AtomicHashMap::Config -AtomicHashMap::defaultConfig; - // AtomicHashMap constructor -- Atomic wrapper that allows growth // This class has a lot of overhead (184 Bytes) so only use for big maps -template -AtomicHashMap:: +template +AtomicHashMap:: AtomicHashMap(size_t finalSizeEst, const Config& config) : kGrowthFrac_(config.growthFactor < 0 ? 1.0 - config.maxLoadFactor : config.growthFactor) { @@ -45,38 +46,53 @@ AtomicHashMap(size_t finalSizeEst, const Config& config) numMapsAllocated_.store(1, std::memory_order_relaxed); } -// insert -- -template -std::pair::iterator, bool> -AtomicHashMap:: -insert(key_type k, const mapped_type& v) { - SimpleRetT ret = insertInternal(k,v); - SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed); - return std::make_pair(iterator(this, ret.i, subMap->makeIter(ret.j)), - ret.success); -} - -template -std::pair::iterator, bool> -AtomicHashMap:: -insert(key_type k, mapped_type&& v) { - SimpleRetT ret = insertInternal(k, std::move(v)); +// emplace -- +template +template +std::pair::iterator, bool> +AtomicHashMap:: +emplace(LookupKeyT k, ArgTs&&... vCtorArgs) { + SimpleRetT ret = insertInternal( + k, std::forward(vCtorArgs)...); SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed); return std::make_pair(iterator(this, ret.i, subMap->makeIter(ret.j)), ret.success); } // insertInternal -- Allocates new sub maps as existing ones fill up. -template -template -typename AtomicHashMap::SimpleRetT -AtomicHashMap:: -insertInternal(key_type key, T&& value) { +template +template +typename AtomicHashMap:: + SimpleRetT +AtomicHashMap:: +insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs) { beginInsertInternal: auto nextMapIdx = // this maintains our state numMapsAllocated_.load(std::memory_order_acquire); @@ -84,7 +100,11 @@ insertInternal(key_type key, T&& value) { FOR_EACH_RANGE(i, 0, nextMapIdx) { // insert in each map successively. If one succeeds, we're done! SubMap* subMap = subMaps_[i].load(std::memory_order_relaxed); - ret = subMap->insertInternal(key, std::forward(value)); + ret = subMap->template insertInternal( + key, std::forward(vCtorArgs)...); if (ret.idx == subMap->capacity_) { continue; //map is full, so try the next one } @@ -130,16 +150,16 @@ insertInternal(key_type key, T&& value) { } else { // If we lost the race, we'll have to wait for the next map to get // allocated before doing any insertion here. - FOLLY_SPIN_WAIT( - nextMapIdx >= numMapsAllocated_.load(std::memory_order_acquire) - ); + detail::atomic_hash_spin_wait([&] { + return nextMapIdx >= numMapsAllocated_.load(std::memory_order_acquire); + }); } // Relaxed is ok here because either we just created this map, or we // just did a spin wait with an acquire load on numMapsAllocated_. SubMap* loadedMap = subMaps_[nextMapIdx].load(std::memory_order_relaxed); DCHECK(loadedMap && loadedMap != (SubMap*)kLockedPtr_); - ret = loadedMap->insertInternal(key, std::forward(value)); + ret = loadedMap->insertInternal(key, std::forward(vCtorArgs)...); if (ret.idx != loadedMap->capacity_) { return SimpleRetT(nextMapIdx, ret.idx, ret.success); } @@ -149,12 +169,21 @@ insertInternal(key_type key, T&& value) { } // find -- -template -typename AtomicHashMap::iterator -AtomicHashMap:: -find(KeyT k) { - SimpleRetT ret = findInternal(k); +template +template +typename AtomicHashMap:: + iterator +AtomicHashMap::find( + LookupKeyT k) { + SimpleRetT ret = findInternal(k); if (!ret.success) { return end(); } @@ -162,23 +191,44 @@ find(KeyT k) { return iterator(this, ret.i, subMap->makeIter(ret.j)); } -template +template +template typename AtomicHashMap::const_iterator -AtomicHashMap:: -find(KeyT k) const { - return const_cast(this)->find(k); + HashFcn, EqualFcn, Allocator, ProbeFcn, KeyConvertFcn>::const_iterator +AtomicHashMap:: +find(LookupKeyT k) const { + return const_cast(this)->find(k); } // findInternal -- -template -typename AtomicHashMap::SimpleRetT -AtomicHashMap:: -findInternal(const KeyT k) const { +template +template +typename AtomicHashMap:: + SimpleRetT +AtomicHashMap:: + findInternal(const LookupKeyT k) const { SubMap* const primaryMap = subMaps_[0].load(std::memory_order_relaxed); - typename SubMap::SimpleRetT ret = primaryMap->findInternal(k); + typename SubMap::SimpleRetT ret = + primaryMap->template findInternal(k); if (LIKELY(ret.idx != primaryMap->capacity_)) { return SimpleRetT(0, ret.idx, ret.success); } @@ -186,7 +236,9 @@ findInternal(const KeyT k) const { FOR_EACH_RANGE(i, 1, numMaps) { // Check each map successively. If one succeeds, we're done! SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed); - ret = thisMap->findInternal(k); + ret = thisMap->template findInternal(k); if (LIKELY(ret.idx != thisMap->capacity_)) { return SimpleRetT(i, ret.idx, ret.success); } @@ -196,10 +248,18 @@ findInternal(const KeyT k) const { } // findAtInternal -- see encodeIndex() for details. -template -typename AtomicHashMap::SimpleRetT -AtomicHashMap:: +template +typename AtomicHashMap:: + SimpleRetT +AtomicHashMap:: findAtInternal(uint32_t idx) const { uint32_t subMapIdx, subMapOffset; if (idx & kSecondaryMapBit_) { @@ -217,10 +277,18 @@ findAtInternal(uint32_t idx) const { } // erase -- -template -typename AtomicHashMap::size_type -AtomicHashMap:: +template +typename AtomicHashMap:: + size_type +AtomicHashMap:: erase(const KeyT k) { int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); FOR_EACH_RANGE(i, 0, numMaps) { @@ -234,9 +302,15 @@ erase(const KeyT k) { } // capacity -- summation of capacities of all submaps -template -size_t AtomicHashMap:: +template +size_t AtomicHashMap:: capacity() const { size_t totalCap(0); int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); @@ -248,9 +322,15 @@ capacity() const { // spaceRemaining -- // number of new insertions until current submaps are all at max load -template -size_t AtomicHashMap:: +template +size_t AtomicHashMap:: spaceRemaining() const { size_t spaceRem(0); int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); @@ -266,9 +346,15 @@ spaceRemaining() const { // clear -- Wipes all keys and values from primary map and destroys // all secondary maps. Not thread safe. -template -void AtomicHashMap:: +template +void AtomicHashMap:: clear() { subMaps_[0].load(std::memory_order_relaxed)->clear(); int const numMaps = numMapsAllocated_ @@ -283,9 +369,15 @@ clear() { } // size -- -template -size_t AtomicHashMap:: +template +size_t AtomicHashMap:: size() const { size_t totalSize(0); int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); @@ -313,10 +405,17 @@ size() const { // 31 1 // 27-30 which subMap // 0-26 subMap offset (index_ret input) -template -inline uint32_t AtomicHashMap:: -encodeIndex(uint32_t subMap, uint32_t offset) { +template +inline uint32_t +AtomicHashMap:: + encodeIndex(uint32_t subMap, uint32_t offset) { DCHECK_EQ(offset & kSecondaryMapBit_, 0); // offset can't be too big if (subMap == 0) return offset; // Make sure subMap isn't too big @@ -331,14 +430,19 @@ encodeIndex(uint32_t subMap, uint32_t offset) { // Iterator implementation -template -template -struct AtomicHashMap::ahm_iterator - : boost::iterator_facade, - IterVal, - boost::forward_traversal_tag> -{ +template +template +struct AtomicHashMap:: + ahm_iterator : boost::iterator_facade, + IterVal, + boost::forward_traversal_tag> { explicit ahm_iterator() : ahm_(0) {} // Conversion ctor for interoperability between const_iterator and @@ -427,5 +531,3 @@ struct AtomicHashMap::ahm_iterator }; // ahm_iterator } // namespace folly - -#undef FOLLY_SPIN_WAIT