X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=folly%2FBits.h;h=e4b1b5ea0c38e4d660c2e1af443d07942174da9f;hb=a393e1cc0e1d29c8b4c7daef6ff4c1d9bf11f78a;hp=4d4656d20e784938b5d87278a21648506ed1151b;hpb=7fd87e7e86bb78dc693da2111bf915761346d540;p=folly.git diff --git a/folly/Bits.h b/folly/Bits.h index 4d4656d2..e4b1b5ea 100644 --- a/folly/Bits.h +++ b/folly/Bits.h @@ -1,5 +1,5 @@ /* - * Copyright 2012 Facebook, Inc. + * Copyright 2017 Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,21 +17,24 @@ /** * Various low-level, bit-manipulation routines. * - * findFirstSet(x) + * findFirstSet(x) [constexpr] * find first (least significant) bit set in a value of an integral type, * 1-based (like ffs()). 0 = no bits are set (x == 0) * - * findLastSet(x) + * findLastSet(x) [constexpr] * find last (most significant) bit set in a value of an integral type, * 1-based. 0 = no bits are set (x == 0) * for x != 0, findLastSet(x) == 1 + floor(log2(x)) * + * nextPowTwo(x) [constexpr] + * Finds the next power of two >= x. + * + * isPowTwo(x) [constexpr] + * return true iff x is a power of two + * * popcount(x) * return the number of 1 bits in x * - * nextPowTwo(x) - * Finds the next power of two >= x. - * * Endian * convert between native, big, and little endian representation * Endian::big(x) big <-> native @@ -49,28 +52,30 @@ * @author Tudor Bosman (tudorb@fb.com) */ -#ifndef FOLLY_BITS_H_ -#define FOLLY_BITS_H_ - -#include "folly/Portability.h" +#pragma once -#ifndef _GNU_SOURCE -#define _GNU_SOURCE 1 +#if !defined(__clang__) && !(defined(_MSC_VER) && (_MSC_VER < 1900)) +#define FOLLY_INTRINSIC_CONSTEXPR constexpr +#else +// GCC and MSVC 2015+ are the only compilers with +// intrinsics constexpr. +#define FOLLY_INTRINSIC_CONSTEXPR const #endif -#ifndef __GNUC__ -#error GCC required -#endif +#include +#include -#include "folly/detail/BitsDetail.h" -#include "folly/detail/BitIteratorDetail.h" -#include "folly/Likely.h" +#include +#include +#include +#include + +#if FOLLY_HAVE_BYTESWAP_H +# include +#endif -#include #include #include -#include // for ffs, ffsl, ffsll -#include #include #include #include @@ -80,99 +85,96 @@ namespace folly { // Generate overloads for findFirstSet as wrappers around -// appropriate ffs, ffsl, ffsll functions from glibc. -// We first define these overloads for signed types (because ffs, ffsl, ffsll -// take int, long, and long long as arguments, respectively) and then -// define an overload for unsigned that forwards to the overload for the -// corresponding signed type. +// appropriate ffs, ffsl, ffsll gcc builtins template +inline FOLLY_INTRINSIC_CONSTEXPR typename std::enable_if< (std::is_integral::value && - std::is_signed::value && - (std::numeric_limits::digits <= std::numeric_limits::digits)), + std::is_unsigned::value && + sizeof(T) <= sizeof(unsigned int)), unsigned int>::type findFirstSet(T x) { - return ::ffs(static_cast(x)); + return __builtin_ffs(x); } template +inline FOLLY_INTRINSIC_CONSTEXPR typename std::enable_if< (std::is_integral::value && - std::is_signed::value && - (std::numeric_limits::digits > std::numeric_limits::digits) && - (std::numeric_limits::digits <= std::numeric_limits::digits)), + std::is_unsigned::value && + sizeof(T) > sizeof(unsigned int) && + sizeof(T) <= sizeof(unsigned long)), unsigned int>::type findFirstSet(T x) { - return ::ffsl(static_cast(x)); + return __builtin_ffsl(x); } -#ifdef FOLLY_HAVE_FFSLL - template +inline FOLLY_INTRINSIC_CONSTEXPR typename std::enable_if< (std::is_integral::value && - std::is_signed::value && - (std::numeric_limits::digits > std::numeric_limits::digits) && - (std::numeric_limits::digits <= std::numeric_limits::digits)), + std::is_unsigned::value && + sizeof(T) > sizeof(unsigned long) && + sizeof(T) <= sizeof(unsigned long long)), unsigned int>::type findFirstSet(T x) { - return ::ffsll(static_cast(x)); + return __builtin_ffsll(x); } -#endif - template +inline FOLLY_INTRINSIC_CONSTEXPR typename std::enable_if< - (std::is_integral::value && - !std::is_signed::value), + (std::is_integral::value && std::is_signed::value), unsigned int>::type findFirstSet(T x) { - // Note that conversion from an unsigned type to the corresponding signed + // Note that conversion from a signed type to the corresponding unsigned // type is technically implementation-defined, but will likely work // on any impementation that uses two's complement. - return findFirstSet(static_cast::type>(x)); + return findFirstSet(static_cast::type>(x)); } // findLastSet: return the 1-based index of the highest bit set // for x > 0, findLastSet(x) == 1 + floor(log2(x)) template +inline FOLLY_INTRINSIC_CONSTEXPR typename std::enable_if< (std::is_integral::value && std::is_unsigned::value && - (std::numeric_limits::digits <= - std::numeric_limits::digits)), + sizeof(T) <= sizeof(unsigned int)), unsigned int>::type findLastSet(T x) { - return x ? 8 * sizeof(unsigned int) - __builtin_clz(x) : 0; + // If X is a power of two X - Y = ((X - 1) ^ Y) + 1. Doing this transformation + // allows GCC to remove its own xor that it adds to implement clz using bsr + return x ? ((8 * sizeof(unsigned int) - 1) ^ __builtin_clz(x)) + 1 : 0; } template +inline FOLLY_INTRINSIC_CONSTEXPR typename std::enable_if< (std::is_integral::value && std::is_unsigned::value && - (std::numeric_limits::digits > - std::numeric_limits::digits) && - (std::numeric_limits::digits <= - std::numeric_limits::digits)), + sizeof(T) > sizeof(unsigned int) && + sizeof(T) <= sizeof(unsigned long)), unsigned int>::type findLastSet(T x) { - return x ? 8 * sizeof(unsigned long) - __builtin_clzl(x) : 0; + return x ? ((8 * sizeof(unsigned long) - 1) ^ __builtin_clzl(x)) + 1 : 0; } template +inline FOLLY_INTRINSIC_CONSTEXPR typename std::enable_if< (std::is_integral::value && std::is_unsigned::value && - (std::numeric_limits::digits > - std::numeric_limits::digits) && - (std::numeric_limits::digits <= - std::numeric_limits::digits)), + sizeof(T) > sizeof(unsigned long) && + sizeof(T) <= sizeof(unsigned long long)), unsigned int>::type findLastSet(T x) { - return x ? 8 * sizeof(unsigned long long) - __builtin_clzll(x) : 0; + return x ? ((8 * sizeof(unsigned long long) - 1) ^ __builtin_clzll(x)) + 1 + : 0; } template +inline FOLLY_INTRINSIC_CONSTEXPR typename std::enable_if< (std::is_integral::value && std::is_signed::value), @@ -182,15 +184,27 @@ typename std::enable_if< } template -inline +inline FOLLY_INTRINSIC_CONSTEXPR typename std::enable_if< std::is_integral::value && std::is_unsigned::value, T>::type nextPowTwo(T v) { - if (UNLIKELY(v == 0)) { - return 1; - } - return 1ul << findLastSet(v - 1); + return v ? (T(1) << findLastSet(v - 1)) : 1; +} + +template +inline FOLLY_INTRINSIC_CONSTEXPR typename std:: + enable_if::value && std::is_unsigned::value, T>::type + prevPowTwo(T v) { + return v ? (T(1) << (findLastSet(v) - 1)) : 0; +} + +template +inline constexpr typename std::enable_if< + std::is_integral::value && std::is_unsigned::value, + bool>::type +isPowTwo(T v) { + return (v != 0) && !(v & (v - 1)); } /** @@ -228,44 +242,67 @@ struct EndianIntBase { static T swap(T x); }; -#define FB_GEN(t, fn) \ -template<> inline t EndianIntBase::swap(t x) { return fn(x); } +#ifndef _MSC_VER + +/** + * If we have the bswap_16 macro from byteswap.h, use it; otherwise, provide our + * own definition. + */ +#ifdef bswap_16 +# define our_bswap16 bswap_16 +#else + +template +inline constexpr typename std::enable_if< + sizeof(Int16) == 2, + Int16>::type +our_bswap16(Int16 x) { + return ((x >> 8) & 0xff) | ((x & 0xff) << 8); +} +#endif + +#endif + +#define FB_GEN(t, fn) \ + template <> \ + inline t EndianIntBase::swap(t x) { \ + return t(fn(std::make_unsigned::type(x))); \ + } // fn(x) expands to (x) if the second argument is empty, which is exactly -// what we want for [u]int8_t +// what we want for [u]int8_t. Also, gcc 4.7 on Intel doesn't have +// __builtin_bswap16 for some reason, so we have to provide our own. FB_GEN( int8_t,) FB_GEN(uint8_t,) -FB_GEN( int64_t, bswap_64) -FB_GEN(uint64_t, bswap_64) -FB_GEN( int32_t, bswap_32) -FB_GEN(uint32_t, bswap_32) -FB_GEN( int16_t, bswap_16) -FB_GEN(uint16_t, bswap_16) +#ifdef _MSC_VER +FB_GEN( int64_t, _byteswap_uint64) +FB_GEN(uint64_t, _byteswap_uint64) +FB_GEN( int32_t, _byteswap_ulong) +FB_GEN(uint32_t, _byteswap_ulong) +FB_GEN( int16_t, _byteswap_ushort) +FB_GEN(uint16_t, _byteswap_ushort) +#else +FB_GEN( int64_t, __builtin_bswap64) +FB_GEN(uint64_t, __builtin_bswap64) +FB_GEN( int32_t, __builtin_bswap32) +FB_GEN(uint32_t, __builtin_bswap32) +FB_GEN( int16_t, our_bswap16) +FB_GEN(uint16_t, our_bswap16) +#endif #undef FB_GEN -#if __BYTE_ORDER == __LITTLE_ENDIAN - template -struct EndianInt : public detail::EndianIntBase { +struct EndianInt : public EndianIntBase { public: - static T big(T x) { return EndianInt::swap(x); } - static T little(T x) { return x; } -}; - -#elif __BYTE_ORDER == __BIG_ENDIAN - -template -struct EndianInt : public detail::EndianIntBase { - public: - static T big(T x) { return x; } - static T little(T x) { return EndianInt::swap(x); } + static T big(T x) { + return kIsLittleEndian ? EndianInt::swap(x) : x; + } + static T little(T x) { + return kIsBigEndian ? EndianInt::swap(x) : x; + } }; -#else -# error Your machine uses a weird endianness! -#endif /* __BYTE_ORDER */ - } // namespace detail // big* convert between native and big-endian representations @@ -293,29 +330,24 @@ class Endian { BIG }; - static constexpr Order order = -#if __BYTE_ORDER == __LITTLE_ENDIAN - Order::LITTLE; -#elif __BYTE_ORDER == __BIG_ENDIAN - Order::BIG; -#else -# error Your machine uses a weird endianness! -#endif /* __BYTE_ORDER */ + static constexpr Order order = kIsLittleEndian ? Order::LITTLE : Order::BIG; template static T swap(T x) { - return detail::EndianInt::swap(x); + return folly::detail::EndianInt::swap(x); } template static T big(T x) { - return detail::EndianInt::big(x); + return folly::detail::EndianInt::big(x); } template static T little(T x) { - return detail::EndianInt::little(x); + return folly::detail::EndianInt::little(x); } +#if !defined(__ANDROID__) FB_GEN(64) FB_GEN(32) FB_GEN(16) FB_GEN(8) +#endif }; #undef FB_GEN @@ -344,7 +376,7 @@ class BitIterator /** * Return the number of bits in an element of the underlying iterator. */ - static size_t bitsPerBlock() { + static unsigned int bitsPerBlock() { return std::numeric_limits< typename std::make_unsigned< typename std::iterator_traits::value_type @@ -356,9 +388,9 @@ class BitIterator * Construct a BitIterator that points at a given bit offset (default 0) * in iter. */ - explicit BitIterator(const BaseIter& iter, size_t bitOffset=0) + explicit BitIterator(const BaseIter& iter, size_t bitOff=0) : bititerator_detail::BitIteratorBase::type(iter), - bitOffset_(bitOffset) { + bitOffset_(bitOff) { assert(bitOffset_ < bitsPerBlock()); } @@ -427,10 +459,10 @@ class BitIterator ssize_t distance_to(const BitIterator& other) const { return (other.base_reference() - this->base_reference()) * bitsPerBlock() + - (other.bitOffset_ - bitOffset_); + other.bitOffset_ - bitOffset_; } - ssize_t bitOffset_; + size_t bitOffset_; }; /** @@ -492,14 +524,16 @@ template struct Unaligned; /** * Representation of an unaligned value of a POD type. */ +FOLLY_PACK_PUSH template struct Unaligned< T, typename std::enable_if::value>::type> { - Unaligned() { } // uninitialized + Unaligned() = default; // uninitialized /* implicit */ Unaligned(T v) : value(v) { } T value; -} __attribute__((packed)); +} FOLLY_PACK_ATTR; +FOLLY_PACK_POP /** * Read an unaligned value of type T and return it. @@ -508,7 +542,13 @@ template inline T loadUnaligned(const void* p) { static_assert(sizeof(Unaligned) == sizeof(T), "Invalid unaligned size"); static_assert(alignof(Unaligned) == 1, "Invalid alignment"); - return static_cast*>(p)->value; + if (kHasUnalignedAccess) { + return static_cast*>(p)->value; + } else { + T value; + memcpy(&value, p, sizeof(T)); + return value; + } } /** @@ -518,10 +558,17 @@ template inline void storeUnaligned(void* p, T value) { static_assert(sizeof(Unaligned) == sizeof(T), "Invalid unaligned size"); static_assert(alignof(Unaligned) == 1, "Invalid alignment"); - new (p) Unaligned(value); + if (kHasUnalignedAccess) { + // Prior to C++14, the spec says that a placement new like this + // is required to check that p is not nullptr, and to do nothing + // if p is a nullptr. By assuming it's not a nullptr, we get a + // nice loud segfault in optimized builds if p is nullptr, rather + // than just silently doing nothing. + folly::assume(p != nullptr); + new (p) Unaligned(value); + } else { + memcpy(p, &value, sizeof(T)); + } } } // namespace folly - -#endif /* FOLLY_BITS_H_ */ -