X-Git-Url: http://plrg.eecs.uci.edu/git/?p=folly.git;a=blobdiff_plain;f=folly%2FAtomicUnorderedMap.h;h=77b2eb303a16e9122af3a1fcc26326af7824cfdc;hp=75ab6f5571845e8df6752df3c2532e98bf99ed7d;hb=a5562cb52be583e617374b7ef66c5560946dc11b;hpb=ed8c80a0e0988e4ce687f51ca832a00e4a6b7930 diff --git a/folly/AtomicUnorderedMap.h b/folly/AtomicUnorderedMap.h index 75ab6f55..77b2eb30 100644 --- a/folly/AtomicUnorderedMap.h +++ b/folly/AtomicUnorderedMap.h @@ -1,5 +1,5 @@ /* - * Copyright 2017 Facebook, Inc. + * Copyright 2013-present Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,23 +17,23 @@ #pragma once #include +#include #include +#include #include #include #include -#include -#include +#include + #include #include #include #include +#include #include #include -#include -#include - namespace folly { /// You're probably reading this because you are looking for an @@ -129,16 +129,17 @@ namespace folly { /// which is much faster than destructing all of the keys and values. /// Feel free to override if std::is_trivial_destructor isn't recognizing /// the triviality of your destructors. -template , - typename KeyEqual = std::equal_to, - bool SkipKeyValueDeletion = - (boost::has_trivial_destructor::value && - boost::has_trivial_destructor::value), - template class Atom = std::atomic, - typename IndexType = uint32_t, - typename Allocator = folly::detail::MMapAlloc> +template < + typename Key, + typename Value, + typename Hash = std::hash, + typename KeyEqual = std::equal_to, + bool SkipKeyValueDeletion = + (boost::has_trivial_destructor::value && + boost::has_trivial_destructor::value), + template class Atom = std::atomic, + typename IndexType = uint32_t, + typename Allocator = folly::detail::MMapAlloc> struct AtomicUnorderedInsertMap { @@ -212,7 +213,7 @@ struct AtomicUnorderedInsertMap { const Allocator& alloc = Allocator()) : allocator_(alloc) { - size_t capacity = maxSize / std::min(1.0f, maxLoadFactor) + 128; + size_t capacity = size_t(maxSize / std::min(1.0f, maxLoadFactor) + 128); size_t avail = size_t{1} << (8 * sizeof(IndexType) - 2); if (capacity > avail && maxSize < avail) { // we'll do our best @@ -262,7 +263,7 @@ struct AtomicUnorderedInsertMap { /// auto value = memo.findOrConstruct(key, [=](void* raw) { /// new (raw) std::string(computation(key)); /// })->first; - template + template std::pair findOrConstruct(const Key& key, Func&& func) { auto const slot = keyToSlotIdx(key); auto prev = slots_[slot].headAndState_.load(std::memory_order_acquire); @@ -314,7 +315,7 @@ struct AtomicUnorderedInsertMap { /// Eventually we can duplicate all of the std::pair constructor /// forms, including a recursive tuple forwarding template /// http://functionalcpp.wordpress.com/2013/08/28/tuple-forwarding/). - template + template std::pair emplace(const K& key, V&& value) { return findOrConstruct(key, [&](void* raw) { new (raw) Value(std::forward(value)); @@ -338,8 +339,7 @@ struct AtomicUnorderedInsertMap { } private: - - enum { + enum : IndexType { kMaxAllocationTries = 1000, // after this we throw }; @@ -437,7 +437,7 @@ struct AtomicUnorderedInsertMap { /// Allocates a slot and returns its index. Tries to put it near /// slots_[start]. IndexType allocateNear(IndexType start) { - for (auto tries = 0; tries < kMaxAllocationTries; ++tries) { + for (IndexType tries = 0; tries < kMaxAllocationTries; ++tries) { auto slot = allocationAttempt(start, tries); auto prev = slots_[slot].headAndState_.load(std::memory_order_acquire); if ((prev & 3) == EMPTY && @@ -454,13 +454,13 @@ struct AtomicUnorderedInsertMap { /// can specialize it differently during deterministic testing IndexType allocationAttempt(IndexType start, IndexType tries) const { if (LIKELY(tries < 8 && start + tries < numSlots_)) { - return start + tries; + return IndexType(start + tries); } else { IndexType rv; if (sizeof(IndexType) <= 4) { - rv = folly::Random::rand32(numSlots_); + rv = IndexType(folly::Random::rand32(numSlots_)); } else { - rv = folly::Random::rand64(numSlots_); + rv = IndexType(folly::Random::rand64(numSlots_)); } assert(rv < numSlots_); return rv; @@ -479,15 +479,16 @@ struct AtomicUnorderedInsertMap { /// to select a 64 bit slot index type. Use this if you need a capacity /// bigger than 2^30 (about a billion). This increases memory overheads, /// obviously. -template , - typename KeyEqual = std::equal_to, - bool SkipKeyValueDeletion = - (boost::has_trivial_destructor::value && - boost::has_trivial_destructor::value), - template class Atom = std::atomic, - typename Allocator = folly::detail::MMapAlloc> +template < + typename Key, + typename Value, + typename Hash = std::hash, + typename KeyEqual = std::equal_to, + bool SkipKeyValueDeletion = + (boost::has_trivial_destructor::value && + boost::has_trivial_destructor::value), + template class Atom = std::atomic, + typename Allocator = folly::detail::MMapAlloc> using AtomicUnorderedInsertMap64 = AtomicUnorderedInsertMap>. This relies on AtomicUnorderedInsertMap's guarantee /// that it doesn't move values. -template class Atom = std::atomic> +template class Atom = std::atomic> struct MutableAtom { mutable Atom data; @@ -519,5 +519,4 @@ struct MutableData { explicit MutableData(const T& init) : data(init) {} }; - -} +} // namespace folly