X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=include%2Fllvm%2FADT%2FHashing.h;h=de56f91eddb154cc2c69c5dbae2b795f370aaaaa;hb=5e7bb43066143d35d57dee88a113d56bdc9375c8;hp=5efaa72705e3311c88c39d00921020dbd354571b;hpb=344224b3a34bda62bea86c06807584ec7558e157;p=oota-llvm.git diff --git a/include/llvm/ADT/Hashing.h b/include/llvm/ADT/Hashing.h index 5efaa72705e..de56f91eddb 100644 --- a/include/llvm/ADT/Hashing.h +++ b/include/llvm/ADT/Hashing.h @@ -45,7 +45,6 @@ #ifndef LLVM_ADT_HASHING_H #define LLVM_ADT_HASHING_H -#include "llvm/ADT/STLExtras.h" #include "llvm/Support/DataTypes.h" #include "llvm/Support/Host.h" #include "llvm/Support/SwapByteOrder.h" @@ -54,14 +53,9 @@ #include #include #include +#include #include -// Allow detecting C++11 feature availability when building with Clang without -// breaking other compilers. -#ifndef __has_feature -# define __has_feature(x) 0 -#endif - namespace llvm { /// \brief An opaque object representing a hash code. @@ -76,17 +70,13 @@ namespace llvm { /// using llvm::hash_value; /// llvm::hash_code code = hash_value(x); /// \endcode -/// -/// Also note that there are two numerical values which are reserved, and the -/// implementation ensures will never be produced for real hash_codes. These -/// can be used as sentinels within hashing data structures. class hash_code { size_t value; public: /// \brief Default construct a hash_code. /// Note that this leaves the value uninitialized. - hash_code() {} + hash_code() = default; /// \brief Form a hash code directly from a numerical value. hash_code(size_t value) : value(value) {} @@ -113,7 +103,8 @@ public: /// differing argument types even if they would implicit promote to a common /// type without changing the value. template -typename enable_if, hash_code>::type hash_value(T value); +typename std::enable_if::value, hash_code>::type +hash_value(T value); /// \brief Compute a hash_code for a pointer's address. /// @@ -155,16 +146,16 @@ namespace detail { inline uint64_t fetch64(const char *p) { uint64_t result; memcpy(&result, p, sizeof(result)); - if (sys::isBigEndianHost()) - return sys::SwapByteOrder(result); + if (sys::IsBigEndianHost) + sys::swapByteOrder(result); return result; } inline uint32_t fetch32(const char *p) { uint32_t result; memcpy(&result, p, sizeof(result)); - if (sys::isBigEndianHost()) - return sys::SwapByteOrder(result); + if (sys::IsBigEndianHost) + sys::swapByteOrder(result); return result; } @@ -269,7 +260,6 @@ inline uint64_t hash_short(const char *s, size_t length, uint64_t seed) { /// keeps 56 bytes of arbitrary state. struct hash_state { uint64_t h0, h1, h2, h3, h4, h5, h6; - uint64_t seed; /// \brief Create a new hash_state structure and initialize it based on the /// seed and the first 64-byte chunk. @@ -277,7 +267,7 @@ struct hash_state { static hash_state create(const char *s, uint64_t seed) { hash_state state = { 0, seed, hash_16_bytes(seed, k1), rotate(seed ^ k1, 49), - seed * k1, shift_mix(seed), 0, seed }; + seed * k1, shift_mix(seed), 0 }; state.h6 = hash_16_bytes(state.h4, state.h5); state.mix(s); return state; @@ -349,30 +339,31 @@ inline size_t get_execution_seed() { /// reading the underlying data. It is false if values of this type must /// first be passed to hash_value, and the resulting hash_codes combined. // -// FIXME: We want to replace is_integral and is_pointer here with a predicate -// which asserts that comparing the underlying storage of two values of the -// type for equality is equivalent to comparing the two values for equality. -// For all the platforms we care about, this holds for integers and pointers, -// but there are platforms where it doesn't and we would like to support -// user-defined types which happen to satisfy this property. +// FIXME: We want to replace is_integral_or_enum and is_pointer here with +// a predicate which asserts that comparing the underlying storage of two +// values of the type for equality is equivalent to comparing the two values +// for equality. For all the platforms we care about, this holds for integers +// and pointers, but there are platforms where it doesn't and we would like to +// support user-defined types which happen to satisfy this property. template struct is_hashable_data - : integral_constant::value || is_pointer::value) && - 64 % sizeof(T) == 0)> {}; + : std::integral_constant::value || + std::is_pointer::value) && + 64 % sizeof(T) == 0)> {}; // Special case std::pair to detect when both types are viable and when there // is no alignment-derived padding in the pair. This is a bit of a lie because // std::pair isn't truly POD, but it's close enough in all reasonable // implementations for our use case of hashing the underlying data. template struct is_hashable_data > - : integral_constant::value && - is_hashable_data::value && - (sizeof(T) + sizeof(U)) == - sizeof(std::pair))> {}; + : std::integral_constant::value && + is_hashable_data::value && + (sizeof(T) + sizeof(U)) == + sizeof(std::pair))> {}; /// \brief Helper to get the hashable data representation for a type. /// This variant is enabled when the type itself can be used. template -typename enable_if, T>::type +typename std::enable_if::value, T>::type get_hashable_data(const T &value) { return value; } @@ -380,7 +371,7 @@ get_hashable_data(const T &value) { /// This variant is enabled when we must first call hash_value and use the /// result as our data. template -typename enable_if_c::value, size_t>::type +typename std::enable_if::value, size_t>::type get_hashable_data(const T &value) { using ::llvm::hash_value; return hash_value(value); @@ -412,10 +403,9 @@ bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T& value, /// combining them, this (as an optimization) directly combines the integers. template hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) { - typedef typename std::iterator_traits::value_type ValueT; const size_t seed = get_execution_seed(); char buffer[64], *buffer_ptr = buffer; - char *const buffer_end = buffer_ptr + array_lengthof(buffer); + char *const buffer_end = std::end(buffer); while (first != last && store_and_advance(buffer_ptr, buffer_end, get_hashable_data(*first))) ++first; @@ -455,8 +445,8 @@ hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) { /// are stored in contiguous memory, this routine avoids copying each value /// and directly reads from the underlying memory. template -typename enable_if, hash_code>::type -hash_combine_range_impl(const ValueT *first, const ValueT *last) { +typename std::enable_if::value, hash_code>::type +hash_combine_range_impl(ValueT *first, ValueT *last) { const size_t seed = get_execution_seed(); const char *s_begin = reinterpret_cast(first); const char *s_end = reinterpret_cast(last); @@ -504,13 +494,10 @@ namespace detail { /// recursive combining of arguments used in hash_combine. It is particularly /// useful at minimizing the code in the recursive calls to ease the pain /// caused by a lack of variadic functions. -class hash_combine_recursive_helper { - const size_t seed; +struct hash_combine_recursive_helper { char buffer[64]; - char *const buffer_end; - char *buffer_ptr; - size_t length; hash_state state; + const size_t seed; public: /// \brief Construct a recursive hash combining helper. @@ -518,10 +505,7 @@ public: /// This sets up the state for a recursive hash combine, including getting /// the seed and buffer setup. hash_combine_recursive_helper() - : seed(get_execution_seed()), - buffer_end(buffer + array_lengthof(buffer)), - buffer_ptr(buffer), - length(0) {} + : seed(get_execution_seed()) {} /// \brief Combine one chunk of data into the current in-flight hash. /// @@ -529,7 +513,8 @@ public: /// the data. If the buffer is full, it hashes the buffer into its /// hash_state, empties it, and then merges the new chunk in. This also /// handles cases where the data straddles the end of the buffer. - template void combine_data(T data) { + template + char *combine_data(size_t &length, char *buffer_ptr, char *buffer_end, T data) { if (!store_and_advance(buffer_ptr, buffer_end, data)) { // Check for skew which prevents the buffer from being packed, and do // a partial store into the buffer to fill it. This is only a concern @@ -560,69 +545,28 @@ public: partial_store_size)) abort(); } + return buffer_ptr; } -#if defined(__has_feature) && __has_feature(__cxx_variadic_templates__) - /// \brief Recursive, variadic combining method. /// /// This function recurses through each argument, combining that argument /// into a single hash. template - hash_code combine(const T &arg, const Ts &...args) { - combine_data( get_hashable_data(arg)); + hash_code combine(size_t length, char *buffer_ptr, char *buffer_end, + const T &arg, const Ts &...args) { + buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg)); // Recurse to the next argument. - return combine(args...); - } - -#else - // Manually expanded recursive combining methods. See variadic above for - // documentation. - - template - hash_code combine(const T1 &arg1, const T2 &arg2, const T3 &arg3, - const T4 &arg4, const T5 &arg5, const T6 &arg6) { - combine_data(get_hashable_data(arg1)); - return combine(arg2, arg3, arg4, arg5, arg6); - } - template - hash_code combine(const T1 &arg1, const T2 &arg2, const T3 &arg3, - const T4 &arg4, const T5 &arg5) { - combine_data(get_hashable_data(arg1)); - return combine(arg2, arg3, arg4, arg5); - } - template - hash_code combine(const T1 &arg1, const T2 &arg2, const T3 &arg3, - const T4 &arg4) { - combine_data(get_hashable_data(arg1)); - return combine(arg2, arg3, arg4); - } - template - hash_code combine(const T1 &arg1, const T2 &arg2, const T3 &arg3) { - combine_data(get_hashable_data(arg1)); - return combine(arg2, arg3); - } - template - hash_code combine(const T1 &arg1, const T2 &arg2) { - combine_data(get_hashable_data(arg1)); - return combine(arg2); - } - template - hash_code combine(const T1 &arg1) { - combine_data(get_hashable_data(arg1)); - return combine(); + return combine(length, buffer_ptr, buffer_end, args...); } -#endif - /// \brief Base case for recursive, variadic combining. /// /// The base case when combining arguments recursively is reached when all /// arguments have been handled. It flushes the remaining buffer and /// constructs a hash_code. - hash_code combine() { + hash_code combine(size_t length, char *buffer_ptr, char *buffer_end) { // Check whether the entire set of values fit in the buffer. If so, we'll // use the optimized short hashing routine and skip state entirely. if (length == 0) @@ -645,9 +589,6 @@ public: } // namespace detail } // namespace hashing - -#if __has_feature(__cxx_variadic_templates__) - /// \brief Combine values into a single hash_code. /// /// This routine accepts a varying number of arguments of any type. It will @@ -662,53 +603,10 @@ public: template hash_code hash_combine(const Ts &...args) { // Recursively hash each argument using a helper class. ::llvm::hashing::detail::hash_combine_recursive_helper helper; - return helper.combine(args...); + return helper.combine(0, helper.buffer, helper.buffer + 64, args...); } -#else - -// What follows are manually exploded overloads for each argument width. See -// the above variadic definition for documentation and specification. - -template -hash_code hash_combine(const T1 &arg1, const T2 &arg2, const T3 &arg3, - const T4 &arg4, const T5 &arg5, const T6 &arg6) { - ::llvm::hashing::detail::hash_combine_recursive_helper helper; - return helper.combine(arg1, arg2, arg3, arg4, arg5, arg6); -} -template -hash_code hash_combine(const T1 &arg1, const T2 &arg2, const T3 &arg3, - const T4 &arg4, const T5 &arg5) { - ::llvm::hashing::detail::hash_combine_recursive_helper helper; - return helper.combine(arg1, arg2, arg3, arg4, arg5); -} -template -hash_code hash_combine(const T1 &arg1, const T2 &arg2, const T3 &arg3, - const T4 &arg4) { - ::llvm::hashing::detail::hash_combine_recursive_helper helper; - return helper.combine(arg1, arg2, arg3, arg4); -} -template -hash_code hash_combine(const T1 &arg1, const T2 &arg2, const T3 &arg3) { - ::llvm::hashing::detail::hash_combine_recursive_helper helper; - return helper.combine(arg1, arg2, arg3); -} -template -hash_code hash_combine(const T1 &arg1, const T2 &arg2) { - ::llvm::hashing::detail::hash_combine_recursive_helper helper; - return helper.combine(arg1, arg2); -} -template -hash_code hash_combine(const T1 &arg1) { - ::llvm::hashing::detail::hash_combine_recursive_helper helper; - return helper.combine(arg1); -} - -#endif - - -// Implementation details for implementatinos of hash_value overloads provided +// Implementation details for implementations of hash_value overloads provided // here. namespace hashing { namespace detail { @@ -732,7 +630,8 @@ inline hash_code hash_integer_value(uint64_t value) { // Declared and documented above, but defined here so that any of the hashing // infrastructure is available. template -typename enable_if, hash_code>::type hash_value(T value) { +typename std::enable_if::value, hash_code>::type +hash_value(T value) { return ::llvm::hashing::detail::hash_integer_value(value); }