X-Git-Url: http://plrg.eecs.uci.edu/git/?p=folly.git;a=blobdiff_plain;f=folly%2FAtomicHashArray.h;h=96a647d1a3da6bcf0e0f907273a20da1c09f7a24;hp=71ef2c76ab5fe87ffba5b5679c7d64c5dc064963;hb=1f52e58c784503913dfb915d389cbd3ee1a3e005;hpb=99de4c5fde6fce17ac9dab77b2b99ceffce999dc diff --git a/folly/AtomicHashArray.h b/folly/AtomicHashArray.h index 71ef2c76..96a647d1 100644 --- a/folly/AtomicHashArray.h +++ b/folly/AtomicHashArray.h @@ -1,5 +1,5 @@ /* - * Copyright 2015 Facebook, Inc. + * Copyright 2017 Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,8 @@ /** * AtomicHashArray is the building block for AtomicHashMap. It provides the - * core lock-free functionality, but is limitted by the fact that it cannot - * grow past it's initialization size and is a little more awkward (no public + * core lock-free functionality, but is limited by the fact that it cannot + * grow past its initialization size and is a little more awkward (no public * constructor, for example). If you're confident that you won't run out of * space, don't mind the awkardness, and really need bare-metal performance, * feel free to use AHA directly. @@ -29,7 +29,7 @@ * @author Jordan DeLong */ -#ifndef FOLLY_ATOMICHASHARRAY_H_ +#pragma once #define FOLLY_ATOMICHASHARRAY_H_ #include @@ -44,7 +44,9 @@ namespace folly { struct AtomicHashArrayLinearProbeFcn { - inline size_t operator()(size_t idx, size_t numProbes, size_t capacity) const{ + inline size_t operator()(size_t idx, + size_t /* numProbes */, + size_t capacity) const { idx += 1; // linear probing // Avoid modulus because it's slow @@ -62,18 +64,47 @@ struct AtomicHashArrayQuadraticProbeFcn } }; +// Enables specializing checkLegalKey without specializing its class. +namespace detail { +// Local copy of folly::gen::Identity, to avoid heavy dependencies. +class AHAIdentity { + public: + template + auto operator()(Value&& value) const -> + decltype(std::forward(value)) { + return std::forward(value); + } +}; + +template +inline void checkLegalKeyIfKeyTImpl(NotKeyT /* ignored */, + KeyT /* emptyKey */, + KeyT /* lockedKey */, + KeyT /* erasedKey */) {} + +template +inline void checkLegalKeyIfKeyTImpl(KeyT key_in, KeyT emptyKey, + KeyT lockedKey, KeyT erasedKey) { + DCHECK_NE(key_in, emptyKey); + DCHECK_NE(key_in, lockedKey); + DCHECK_NE(key_in, erasedKey); +} +} // namespace detail + template , class EqualFcn = std::equal_to, class Allocator = std::allocator, - class ProbeFcn = AtomicHashArrayLinearProbeFcn> + class ProbeFcn = AtomicHashArrayLinearProbeFcn, + class KeyConvertFcn = detail::AHAIdentity> class AtomicHashMap; template , class EqualFcn = std::equal_to, class Allocator = std::allocator, - class ProbeFcn = AtomicHashArrayLinearProbeFcn> + class ProbeFcn = AtomicHashArrayLinearProbeFcn, + class KeyConvertFcn = detail::AHAIdentity> class AtomicHashArray : boost::noncopyable { static_assert((std::is_convertible::value || std::is_convertible::value || @@ -84,6 +115,9 @@ class AtomicHashArray : boost::noncopyable { public: typedef KeyT key_type; typedef ValueT mapped_type; + typedef HashFcn hasher; + typedef EqualFcn key_equal; + typedef KeyConvertFcn key_convert; typedef std::pair value_type; typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; @@ -142,12 +176,12 @@ class AtomicHashArray : boost::noncopyable { * deleter to make sure everything is cleaned up properly. */ struct Config { - KeyT emptyKey; - KeyT lockedKey; - KeyT erasedKey; + KeyT emptyKey; + KeyT lockedKey; + KeyT erasedKey; double maxLoadFactor; double growthFactor; - int entryCountThreadCacheSize; + uint32_t entryCountThreadCacheSize; size_t capacity; // if positive, overrides maxLoadFactor public: @@ -164,11 +198,37 @@ class AtomicHashArray : boost::noncopyable { // Cannot have pre-instantiated const Config instance because of SIOF. static SmartPtr create(size_t maxSize, const Config& c = Config()); - iterator find(KeyT k) { - return iterator(this, findInternal(k).idx); + /* + * find -- + * + * + * Returns the iterator to the element if found, otherwise end(). + * + * As an optional feature, the type of the key to look up (LookupKeyT) is + * allowed to be different from the type of keys actually stored (KeyT). + * + * This enables use cases where materializing the key is costly and usually + * redudant, e.g., canonicalizing/interning a set of strings and being able + * to look up by StringPiece. To use this feature, LookupHashFcn must take + * a LookupKeyT, and LookupEqualFcn must take KeyT and LookupKeyT as first + * and second parameter, respectively. + * + * See folly/test/ArrayHashArrayTest.cpp for sample usage. + */ + template + iterator find(LookupKeyT k) { + return iterator(this, + findInternal(k).idx); } - const_iterator find(KeyT k) const { - return const_cast(this)->find(k); + + template + const_iterator find(LookupKeyT k) const { + return const_cast(this)-> + find(k); } /* @@ -194,10 +254,24 @@ class AtomicHashArray : boost::noncopyable { * * Same contract as insert(), but performs in-place construction * of the value type using the specified arguments. + * + * Also, like find(), this method optionally allows 'key_in' to have a type + * different from that stored in the table; see find(). If and only if no + * equal key is already present, this method converts 'key_in' to a key of + * type KeyT using the provided LookupKeyToKeyFcn. */ - template - std::pair emplace(KeyT key_in, ArgTs&&... vCtorArgs) { - SimpleRetT ret = insertInternal(key_in, std::forward(vCtorArgs)...); + template + std::pair emplace(LookupKeyT key_in, ArgTs&&... vCtorArgs) { + SimpleRetT ret = insertInternal( + key_in, + std::forward(vCtorArgs)...); return std::make_pair(iterator(this, ret.idx), ret.success); } @@ -255,7 +329,7 @@ class AtomicHashArray : boost::noncopyable { numPendingEntries_.setCacheSize(newSize); } - int getEntryCountThreadCacheSize() const { + uint32_t getEntryCountThreadCacheSize() const { return numEntries_.getCacheSize(); } @@ -276,10 +350,22 @@ friend class AtomicHashMap - SimpleRetT insertInternal(KeyT key, ArgTs&&... vCtorArgs); + template + SimpleRetT insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs); + + template + SimpleRetT findInternal(const LookupKeyT key); - SimpleRetT findInternal(const KeyT key); + template + void checkLegalKeyIfKey(MaybeKeyT key) { + detail::checkLegalKeyIfKeyTImpl(key, kEmptyKey_, kLockedKey_, kErasedKey_); + } static std::atomic* cellKeyPtr(const value_type& r) { // We need some illegal casting here in order to actually store @@ -315,8 +401,13 @@ friend class AtomicHashMap + inline size_t keyToAnchorIdx(const LookupKeyT k) const { + const size_t hashVal = LookupHashFcn()(k); const size_t probe = hashVal & kAnchorMask_; return LIKELY(probe < capacity_) ? probe : hashVal % capacity_; } @@ -342,5 +434,3 @@ friend class AtomicHashMap - -#endif // FOLLY_ATOMICHASHARRAY_H_