/*
- * Copyright 2012 Facebook, Inc.
+ * Copyright 2012-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
/**
* AtomicHashArray is the building block for AtomicHashMap. It provides the
- * core lock-free functionality, but is limitted by the fact that it cannot
- * grow past it's initialization size and is a little more awkward (no public
+ * core lock-free functionality, but is limited by the fact that it cannot
+ * grow past its initialization size and is a little more awkward (no public
* constructor, for example). If you're confident that you won't run out of
* space, don't mind the awkardness, and really need bare-metal performance,
* feel free to use AHA directly.
* @author Jordan DeLong <delong.j@fb.com>
*/
-#ifndef FOLLY_ATOMICHASHARRAY_H_
+#pragma once
#define FOLLY_ATOMICHASHARRAY_H_
#include <atomic>
#include <boost/iterator/iterator_facade.hpp>
#include <boost/noncopyable.hpp>
-#include "folly/Hash.h"
-#include "folly/ThreadCachedInt.h"
+#include <folly/ThreadCachedInt.h>
+#include <folly/Utility.h>
+#include <folly/hash/Hash.h>
namespace folly {
-template <class KeyT, class ValueT, class HashFcn = std::hash<KeyT>>
+struct AtomicHashArrayLinearProbeFcn
+{
+ inline size_t operator()(size_t idx,
+ size_t /* numProbes */,
+ size_t capacity) const {
+ idx += 1; // linear probing
+
+ // Avoid modulus because it's slow
+ return LIKELY(idx < capacity) ? idx : (idx - capacity);
+ }
+};
+
+struct AtomicHashArrayQuadraticProbeFcn
+{
+ inline size_t operator()(size_t idx, size_t numProbes, size_t capacity) const{
+ idx += numProbes; // quadratic probing
+
+ // Avoid modulus because it's slow
+ return LIKELY(idx < capacity) ? idx : (idx - capacity);
+ }
+};
+
+// Enables specializing checkLegalKey without specializing its class.
+namespace detail {
+template <typename NotKeyT, typename KeyT>
+inline void checkLegalKeyIfKeyTImpl(NotKeyT /* ignored */,
+ KeyT /* emptyKey */,
+ KeyT /* lockedKey */,
+ KeyT /* erasedKey */) {}
+
+template <typename KeyT>
+inline void checkLegalKeyIfKeyTImpl(KeyT key_in, KeyT emptyKey,
+ KeyT lockedKey, KeyT erasedKey) {
+ DCHECK_NE(key_in, emptyKey);
+ DCHECK_NE(key_in, lockedKey);
+ DCHECK_NE(key_in, erasedKey);
+}
+} // namespace detail
+
+template <
+ class KeyT,
+ class ValueT,
+ class HashFcn = std::hash<KeyT>,
+ class EqualFcn = std::equal_to<KeyT>,
+ class Allocator = std::allocator<char>,
+ class ProbeFcn = AtomicHashArrayLinearProbeFcn,
+ class KeyConvertFcn = Identity>
class AtomicHashMap;
-template <class KeyT, class ValueT, class HashFcn = std::hash<KeyT>>
+template <
+ class KeyT,
+ class ValueT,
+ class HashFcn = std::hash<KeyT>,
+ class EqualFcn = std::equal_to<KeyT>,
+ class Allocator = std::allocator<char>,
+ class ProbeFcn = AtomicHashArrayLinearProbeFcn,
+ class KeyConvertFcn = Identity>
class AtomicHashArray : boost::noncopyable {
static_assert((std::is_convertible<KeyT,int32_t>::value ||
- std::is_convertible<KeyT,int64_t>::value),
+ std::is_convertible<KeyT,int64_t>::value ||
+ std::is_convertible<KeyT,const void*>::value),
"You are trying to use AtomicHashArray with disallowed key "
"types. You must use atomically compare-and-swappable integer "
"keys, or a different container class.");
public:
typedef KeyT key_type;
typedef ValueT mapped_type;
+ typedef HashFcn hasher;
+ typedef EqualFcn key_equal;
+ typedef KeyConvertFcn key_convert;
typedef std::pair<const KeyT, ValueT> value_type;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
const KeyT kLockedKey_;
const KeyT kErasedKey_;
- template<class ContT, class IterVal>
+ template <class ContT, class IterVal>
struct aha_iterator;
typedef aha_iterator<const AtomicHashArray,const value_type> const_iterator;
* deleter to make sure everything is cleaned up properly.
*/
struct Config {
- KeyT emptyKey;
- KeyT lockedKey;
- KeyT erasedKey;
+ KeyT emptyKey;
+ KeyT lockedKey;
+ KeyT erasedKey;
double maxLoadFactor;
- int entryCountThreadCacheSize;
+ double growthFactor;
+ uint32_t entryCountThreadCacheSize;
size_t capacity; // if positive, overrides maxLoadFactor
- constexpr Config() : emptyKey(static_cast<KeyT>(-1ul)),
- lockedKey(static_cast<KeyT>(-2ul)),
- erasedKey(static_cast<KeyT>(-3ul)),
- maxLoadFactor(0.8),
- entryCountThreadCacheSize(1000),
- capacity(0) {}
+ // Cannot have constexpr ctor because some compilers rightly complain.
+ Config() : emptyKey((KeyT)-1),
+ lockedKey((KeyT)-2),
+ erasedKey((KeyT)-3),
+ maxLoadFactor(0.8),
+ growthFactor(-1),
+ entryCountThreadCacheSize(1000),
+ capacity(0) {}
};
- static const Config defaultConfig;
- static SmartPtr create(size_t maxSize, const Config& = defaultConfig);
+ // Cannot have pre-instantiated const Config instance because of SIOF.
+ static SmartPtr create(size_t maxSize, const Config& c = Config());
- iterator find(KeyT k) {
- return iterator(this, findInternal(k).idx);
+ /*
+ * find --
+ *
+ *
+ * Returns the iterator to the element if found, otherwise end().
+ *
+ * As an optional feature, the type of the key to look up (LookupKeyT) is
+ * allowed to be different from the type of keys actually stored (KeyT).
+ *
+ * This enables use cases where materializing the key is costly and usually
+ * redudant, e.g., canonicalizing/interning a set of strings and being able
+ * to look up by StringPiece. To use this feature, LookupHashFcn must take
+ * a LookupKeyT, and LookupEqualFcn must take KeyT and LookupKeyT as first
+ * and second parameter, respectively.
+ *
+ * See folly/test/ArrayHashArrayTest.cpp for sample usage.
+ */
+ template <
+ typename LookupKeyT = key_type,
+ typename LookupHashFcn = hasher,
+ typename LookupEqualFcn = key_equal>
+ iterator find(LookupKeyT k) {
+ return iterator(this,
+ findInternal<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k).idx);
}
- const_iterator find(KeyT k) const {
- return const_cast<AtomicHashArray*>(this)->find(k);
+
+ template <
+ typename LookupKeyT = key_type,
+ typename LookupHashFcn = hasher,
+ typename LookupEqualFcn = key_equal>
+ const_iterator find(LookupKeyT k) const {
+ return const_cast<AtomicHashArray*>(this)->
+ find<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k);
}
/*
* iterator is set to the existing entry.
*/
std::pair<iterator,bool> insert(const value_type& r) {
- SimpleRetT ret = insertInternal(r.first, r.second);
- return std::make_pair(iterator(this, ret.idx), ret.success);
+ return emplace(r.first, r.second);
}
std::pair<iterator,bool> insert(value_type&& r) {
- SimpleRetT ret = insertInternal(r.first, std::move(r.second));
+ return emplace(r.first, std::move(r.second));
+ }
+
+ /*
+ * emplace --
+ *
+ * Same contract as insert(), but performs in-place construction
+ * of the value type using the specified arguments.
+ *
+ * Also, like find(), this method optionally allows 'key_in' to have a type
+ * different from that stored in the table; see find(). If and only if no
+ * equal key is already present, this method converts 'key_in' to a key of
+ * type KeyT using the provided LookupKeyToKeyFcn.
+ */
+ template <
+ typename LookupKeyT = key_type,
+ typename LookupHashFcn = hasher,
+ typename LookupEqualFcn = key_equal,
+ typename LookupKeyToKeyFcn = key_convert,
+ typename... ArgTs>
+ std::pair<iterator,bool> emplace(LookupKeyT key_in, ArgTs&&... vCtorArgs) {
+ SimpleRetT ret = insertInternal<LookupKeyT,
+ LookupHashFcn,
+ LookupEqualFcn,
+ LookupKeyToKeyFcn>(
+ key_in,
+ std::forward<ArgTs>(vCtorArgs)...);
return std::make_pair(iterator(this, ret.idx), ret.success);
}
bool empty() const { return size() == 0; }
- iterator begin() { return iterator(this, 0); }
+ iterator begin() {
+ iterator it(this, 0);
+ it.advancePastEmpty();
+ return it;
+ }
+ const_iterator begin() const {
+ const_iterator it(this, 0);
+ it.advancePastEmpty();
+ return it;
+ }
+
iterator end() { return iterator(this, capacity_); }
- const_iterator begin() const { return const_iterator(this, 0); }
const_iterator end() const { return const_iterator(this, capacity_); }
// See AtomicHashMap::findAt - access elements directly
numPendingEntries_.setCacheSize(newSize);
}
- int getEntryCountThreadCacheSize() const {
+ uint32_t getEntryCountThreadCacheSize() const {
return numEntries_.getCacheSize();
}
/* Private data and helper functions... */
private:
- friend class AtomicHashMap<KeyT,ValueT,HashFcn>;
+friend class AtomicHashMap<KeyT,
+ ValueT,
+ HashFcn,
+ EqualFcn,
+ Allocator,
+ ProbeFcn>;
struct SimpleRetT { size_t idx; bool success;
SimpleRetT(size_t i, bool s) : idx(i), success(s) {}
- SimpleRetT() {}
+ SimpleRetT() = default;
};
- template <class T>
- SimpleRetT insertInternal(KeyT key, T&& value);
-
- SimpleRetT findInternal(const KeyT key);
+ template <
+ typename LookupKeyT = key_type,
+ typename LookupHashFcn = hasher,
+ typename LookupEqualFcn = key_equal,
+ typename LookupKeyToKeyFcn = Identity,
+ typename... ArgTs>
+ SimpleRetT insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs);
+
+ template <
+ typename LookupKeyT = key_type,
+ typename LookupHashFcn = hasher,
+ typename LookupEqualFcn = key_equal>
+ SimpleRetT findInternal(const LookupKeyT key);
+
+ template <typename MaybeKeyT>
+ void checkLegalKeyIfKey(MaybeKeyT key) {
+ detail::checkLegalKeyIfKeyTImpl(key, kEmptyKey_, kLockedKey_, kErasedKey_);
+ }
static std::atomic<KeyT>* cellKeyPtr(const value_type& r) {
// We need some illegal casting here in order to actually store
// reading the value, so be careful of calling size() too frequently. This
// increases insertion throughput several times over while keeping the count
// accurate.
- ThreadCachedInt<int64_t> numEntries_; // Successful key inserts
- ThreadCachedInt<int64_t> numPendingEntries_; // Used by insertInternal
+ ThreadCachedInt<uint64_t> numEntries_; // Successful key inserts
+ ThreadCachedInt<uint64_t> numPendingEntries_; // Used by insertInternal
std::atomic<int64_t> isFull_; // Used by insertInternal
std::atomic<int64_t> numErases_; // Successful key erases
// Force constructor/destructor private since create/destroy should be
// used externally instead
- AtomicHashArray(size_t capacity, KeyT emptyKey, KeyT lockedKey,
- KeyT erasedKey, double maxLoadFactor, size_t cacheSize);
+ AtomicHashArray(
+ size_t capacity,
+ KeyT emptyKey,
+ KeyT lockedKey,
+ KeyT erasedKey,
+ double maxLoadFactor,
+ uint32_t cacheSize);
- ~AtomicHashArray() {}
+ ~AtomicHashArray() = default;
inline void unlockCell(value_type* const cell, KeyT newKey) {
cellKeyPtr(*cell)->store(newKey, std::memory_order_release);
std::memory_order_acq_rel);
}
- inline size_t keyToAnchorIdx(const KeyT k) const {
- const size_t hashVal = HashFcn()(k);
+ template <class LookupKeyT = key_type, class LookupHashFcn = hasher>
+ inline size_t keyToAnchorIdx(const LookupKeyT k) const {
+ const size_t hashVal = LookupHashFcn()(k);
const size_t probe = hashVal & kAnchorMask_;
return LIKELY(probe < capacity_) ? probe : hashVal % capacity_;
}
- inline size_t probeNext(size_t idx, size_t numProbes) {
- //idx += numProbes; // quadratic probing
- idx += 1; // linear probing
- // Avoid modulus because it's slow
- return LIKELY(idx < capacity_) ? idx : (idx - capacity_);
- }
+
}; // AtomicHashArray
} // namespace folly
-#include "AtomicHashArray-inl.h"
-
-#endif // FOLLY_ATOMICHASHARRAY_H_
+#include <folly/AtomicHashArray-inl.h>