X-Git-Url: http://plrg.eecs.uci.edu/git/?p=folly.git;a=blobdiff_plain;f=folly%2FAtomicHashArray.h;h=654077b1945759d30a9aa08711c2027cf3d7df90;hp=bbb3cdd4a332ca14ffb143a600c4dc822660f9a9;hb=614eb71734a284e1a9fefabcc48743a3c8efd653;hpb=0efcd8c8283cdfc57d83594637dcb8b5fec09288 diff --git a/folly/AtomicHashArray.h b/folly/AtomicHashArray.h index bbb3cdd4..654077b1 100644 --- a/folly/AtomicHashArray.h +++ b/folly/AtomicHashArray.h @@ -1,5 +1,5 @@ /* - * Copyright 2015 Facebook, Inc. + * Copyright 2017 Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,8 @@ /** * AtomicHashArray is the building block for AtomicHashMap. It provides the - * core lock-free functionality, but is limitted by the fact that it cannot - * grow past it's initialization size and is a little more awkward (no public + * core lock-free functionality, but is limited by the fact that it cannot + * grow past its initialization size and is a little more awkward (no public * constructor, for example). If you're confident that you won't run out of * space, don't mind the awkardness, and really need bare-metal performance, * feel free to use AHA directly. @@ -29,7 +29,7 @@ * @author Jordan DeLong */ -#ifndef FOLLY_ATOMICHASHARRAY_H_ +#pragma once #define FOLLY_ATOMICHASHARRAY_H_ #include @@ -37,21 +37,69 @@ #include #include -#include #include +#include +#include namespace folly { -template , - class EqualFcn = std::equal_to, - class Allocator = std::allocator> +struct AtomicHashArrayLinearProbeFcn +{ + inline size_t operator()(size_t idx, + size_t /* numProbes */, + size_t capacity) const { + idx += 1; // linear probing + + // Avoid modulus because it's slow + return LIKELY(idx < capacity) ? idx : (idx - capacity); + } +}; + +struct AtomicHashArrayQuadraticProbeFcn +{ + inline size_t operator()(size_t idx, size_t numProbes, size_t capacity) const{ + idx += numProbes; // quadratic probing + + // Avoid modulus because it's slow + return LIKELY(idx < capacity) ? idx : (idx - capacity); + } +}; + +// Enables specializing checkLegalKey without specializing its class. +namespace detail { +template +inline void checkLegalKeyIfKeyTImpl(NotKeyT /* ignored */, + KeyT /* emptyKey */, + KeyT /* lockedKey */, + KeyT /* erasedKey */) {} + +template +inline void checkLegalKeyIfKeyTImpl(KeyT key_in, KeyT emptyKey, + KeyT lockedKey, KeyT erasedKey) { + DCHECK_NE(key_in, emptyKey); + DCHECK_NE(key_in, lockedKey); + DCHECK_NE(key_in, erasedKey); +} +} // namespace detail + +template < + class KeyT, + class ValueT, + class HashFcn = std::hash, + class EqualFcn = std::equal_to, + class Allocator = std::allocator, + class ProbeFcn = AtomicHashArrayLinearProbeFcn, + class KeyConvertFcn = Identity> class AtomicHashMap; -template , - class EqualFcn = std::equal_to, - class Allocator = std::allocator> +template < + class KeyT, + class ValueT, + class HashFcn = std::hash, + class EqualFcn = std::equal_to, + class Allocator = std::allocator, + class ProbeFcn = AtomicHashArrayLinearProbeFcn, + class KeyConvertFcn = Identity> class AtomicHashArray : boost::noncopyable { static_assert((std::is_convertible::value || std::is_convertible::value || @@ -62,6 +110,9 @@ class AtomicHashArray : boost::noncopyable { public: typedef KeyT key_type; typedef ValueT mapped_type; + typedef HashFcn hasher; + typedef EqualFcn key_equal; + typedef KeyConvertFcn key_convert; typedef std::pair value_type; typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; @@ -76,7 +127,7 @@ class AtomicHashArray : boost::noncopyable { const KeyT kLockedKey_; const KeyT kErasedKey_; - template + template struct aha_iterator; typedef aha_iterator const_iterator; @@ -120,31 +171,60 @@ class AtomicHashArray : boost::noncopyable { * deleter to make sure everything is cleaned up properly. */ struct Config { - KeyT emptyKey; - KeyT lockedKey; - KeyT erasedKey; + KeyT emptyKey; + KeyT lockedKey; + KeyT erasedKey; double maxLoadFactor; double growthFactor; - int entryCountThreadCacheSize; + uint32_t entryCountThreadCacheSize; size_t capacity; // if positive, overrides maxLoadFactor - constexpr Config() : emptyKey((KeyT)-1), - lockedKey((KeyT)-2), - erasedKey((KeyT)-3), - maxLoadFactor(0.8), - growthFactor(-1), - entryCountThreadCacheSize(1000), - capacity(0) {} + // Cannot have constexpr ctor because some compilers rightly complain. + Config() : emptyKey((KeyT)-1), + lockedKey((KeyT)-2), + erasedKey((KeyT)-3), + maxLoadFactor(0.8), + growthFactor(-1), + entryCountThreadCacheSize(1000), + capacity(0) {} }; - static const Config defaultConfig; - static SmartPtr create(size_t maxSize, const Config& = defaultConfig); + // Cannot have pre-instantiated const Config instance because of SIOF. + static SmartPtr create(size_t maxSize, const Config& c = Config()); - iterator find(KeyT k) { - return iterator(this, findInternal(k).idx); + /* + * find -- + * + * + * Returns the iterator to the element if found, otherwise end(). + * + * As an optional feature, the type of the key to look up (LookupKeyT) is + * allowed to be different from the type of keys actually stored (KeyT). + * + * This enables use cases where materializing the key is costly and usually + * redudant, e.g., canonicalizing/interning a set of strings and being able + * to look up by StringPiece. To use this feature, LookupHashFcn must take + * a LookupKeyT, and LookupEqualFcn must take KeyT and LookupKeyT as first + * and second parameter, respectively. + * + * See folly/test/ArrayHashArrayTest.cpp for sample usage. + */ + template < + typename LookupKeyT = key_type, + typename LookupHashFcn = hasher, + typename LookupEqualFcn = key_equal> + iterator find(LookupKeyT k) { + return iterator(this, + findInternal(k).idx); } - const_iterator find(KeyT k) const { - return const_cast(this)->find(k); + + template < + typename LookupKeyT = key_type, + typename LookupHashFcn = hasher, + typename LookupEqualFcn = key_equal> + const_iterator find(LookupKeyT k) const { + return const_cast(this)-> + find(k); } /* @@ -159,11 +239,36 @@ class AtomicHashArray : boost::noncopyable { * iterator is set to the existing entry. */ std::pair insert(const value_type& r) { - SimpleRetT ret = insertInternal(r.first, r.second); - return std::make_pair(iterator(this, ret.idx), ret.success); + return emplace(r.first, r.second); } std::pair insert(value_type&& r) { - SimpleRetT ret = insertInternal(r.first, std::move(r.second)); + return emplace(r.first, std::move(r.second)); + } + + /* + * emplace -- + * + * Same contract as insert(), but performs in-place construction + * of the value type using the specified arguments. + * + * Also, like find(), this method optionally allows 'key_in' to have a type + * different from that stored in the table; see find(). If and only if no + * equal key is already present, this method converts 'key_in' to a key of + * type KeyT using the provided LookupKeyToKeyFcn. + */ + template < + typename LookupKeyT = key_type, + typename LookupHashFcn = hasher, + typename LookupEqualFcn = key_equal, + typename LookupKeyToKeyFcn = key_convert, + typename... ArgTs> + std::pair emplace(LookupKeyT key_in, ArgTs&&... vCtorArgs) { + SimpleRetT ret = insertInternal( + key_in, + std::forward(vCtorArgs)...); return std::make_pair(iterator(this, ret.idx), ret.success); } @@ -221,24 +326,43 @@ class AtomicHashArray : boost::noncopyable { numPendingEntries_.setCacheSize(newSize); } - int getEntryCountThreadCacheSize() const { + uint32_t getEntryCountThreadCacheSize() const { return numEntries_.getCacheSize(); } /* Private data and helper functions... */ private: - friend class AtomicHashMap; +friend class AtomicHashMap; struct SimpleRetT { size_t idx; bool success; SimpleRetT(size_t i, bool s) : idx(i), success(s) {} SimpleRetT() = default; }; - template - SimpleRetT insertInternal(KeyT key, T&& value); - - SimpleRetT findInternal(const KeyT key); + template < + typename LookupKeyT = key_type, + typename LookupHashFcn = hasher, + typename LookupEqualFcn = key_equal, + typename LookupKeyToKeyFcn = Identity, + typename... ArgTs> + SimpleRetT insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs); + + template < + typename LookupKeyT = key_type, + typename LookupHashFcn = hasher, + typename LookupEqualFcn = key_equal> + SimpleRetT findInternal(const LookupKeyT key); + + template + void checkLegalKeyIfKey(MaybeKeyT key) { + detail::checkLegalKeyIfKeyTImpl(key, kEmptyKey_, kLockedKey_, kErasedKey_); + } static std::atomic* cellKeyPtr(const value_type& r) { // We need some illegal casting here in order to actually store @@ -274,8 +398,13 @@ class AtomicHashArray : boost::noncopyable { // Force constructor/destructor private since create/destroy should be // used externally instead - AtomicHashArray(size_t capacity, KeyT emptyKey, KeyT lockedKey, - KeyT erasedKey, double maxLoadFactor, size_t cacheSize); + AtomicHashArray( + size_t capacity, + KeyT emptyKey, + KeyT lockedKey, + KeyT erasedKey, + double maxLoadFactor, + uint32_t cacheSize); ~AtomicHashArray() = default; @@ -289,22 +418,16 @@ class AtomicHashArray : boost::noncopyable { std::memory_order_acq_rel); } - inline size_t keyToAnchorIdx(const KeyT k) const { - const size_t hashVal = HashFcn()(k); + template + inline size_t keyToAnchorIdx(const LookupKeyT k) const { + const size_t hashVal = LookupHashFcn()(k); const size_t probe = hashVal & kAnchorMask_; return LIKELY(probe < capacity_) ? probe : hashVal % capacity_; } - inline size_t probeNext(size_t idx, size_t numProbes) { - //idx += numProbes; // quadratic probing - idx += 1; // linear probing - // Avoid modulus because it's slow - return LIKELY(idx < capacity_) ? idx : (idx - capacity_); - } + }; // AtomicHashArray } // namespace folly #include - -#endif // FOLLY_ATOMICHASHARRAY_H_