2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * Various low-level, bit-manipulation routines.
20 * findFirstSet(x) [constexpr]
21 * find first (least significant) bit set in a value of an integral type,
22 * 1-based (like ffs()). 0 = no bits are set (x == 0)
24 * findLastSet(x) [constexpr]
25 * find last (most significant) bit set in a value of an integral type,
26 * 1-based. 0 = no bits are set (x == 0)
27 * for x != 0, findLastSet(x) == 1 + floor(log2(x))
29 * nextPowTwo(x) [constexpr]
30 * Finds the next power of two >= x.
32 * isPowTwo(x) [constexpr]
33 * return true iff x is a power of two
36 * return the number of 1 bits in x
39 * convert between native, big, and little endian representation
40 * Endian::big(x) big <-> native
41 * Endian::little(x) little <-> native
42 * Endian::swap(x) big <-> little
45 * Wrapper around an iterator over an integral type that iterates
46 * over its underlying bits in MSb to LSb order
48 * findFirstSet(BitIterator begin, BitIterator end)
49 * return a BitIterator pointing to the first 1 bit in [begin, end), or
50 * end if all bits in [begin, end) are 0
52 * @author Tudor Bosman (tudorb@fb.com)
57 #if !defined(__clang__) && !(defined(_MSC_VER) && (_MSC_VER < 1900))
58 #define FOLLY_INTRINSIC_CONSTEXPR constexpr
60 // GCC and MSVC 2015+ are the only compilers with
61 // intrinsics constexpr.
62 #define FOLLY_INTRINSIC_CONSTEXPR const
65 #include <folly/Portability.h>
66 #include <folly/portability/Builtins.h>
68 #include <folly/Assume.h>
69 #include <folly/detail/BitsDetail.h>
70 #include <folly/detail/BitIteratorDetail.h>
71 #include <folly/Likely.h>
73 #if FOLLY_HAVE_BYTESWAP_H
74 # include <byteswap.h>
81 #include <type_traits>
82 #include <boost/iterator/iterator_adaptor.hpp>
87 // Generate overloads for findFirstSet as wrappers around
88 // appropriate ffs, ffsl, ffsll gcc builtins
90 inline FOLLY_INTRINSIC_CONSTEXPR
91 typename std::enable_if<
92 (std::is_integral<T>::value &&
93 std::is_unsigned<T>::value &&
94 sizeof(T) <= sizeof(unsigned int)),
97 return __builtin_ffs(x);
101 inline FOLLY_INTRINSIC_CONSTEXPR
102 typename std::enable_if<
103 (std::is_integral<T>::value &&
104 std::is_unsigned<T>::value &&
105 sizeof(T) > sizeof(unsigned int) &&
106 sizeof(T) <= sizeof(unsigned long)),
109 return __builtin_ffsl(x);
113 inline FOLLY_INTRINSIC_CONSTEXPR
114 typename std::enable_if<
115 (std::is_integral<T>::value &&
116 std::is_unsigned<T>::value &&
117 sizeof(T) > sizeof(unsigned long) &&
118 sizeof(T) <= sizeof(unsigned long long)),
121 return __builtin_ffsll(x);
125 inline FOLLY_INTRINSIC_CONSTEXPR
126 typename std::enable_if<
127 (std::is_integral<T>::value && std::is_signed<T>::value),
130 // Note that conversion from a signed type to the corresponding unsigned
131 // type is technically implementation-defined, but will likely work
132 // on any impementation that uses two's complement.
133 return findFirstSet(static_cast<typename std::make_unsigned<T>::type>(x));
136 // findLastSet: return the 1-based index of the highest bit set
137 // for x > 0, findLastSet(x) == 1 + floor(log2(x))
139 inline FOLLY_INTRINSIC_CONSTEXPR
140 typename std::enable_if<
141 (std::is_integral<T>::value &&
142 std::is_unsigned<T>::value &&
143 sizeof(T) <= sizeof(unsigned int)),
146 return x ? 8 * sizeof(unsigned int) - __builtin_clz(x) : 0;
150 inline FOLLY_INTRINSIC_CONSTEXPR
151 typename std::enable_if<
152 (std::is_integral<T>::value &&
153 std::is_unsigned<T>::value &&
154 sizeof(T) > sizeof(unsigned int) &&
155 sizeof(T) <= sizeof(unsigned long)),
158 return x ? 8 * sizeof(unsigned long) - __builtin_clzl(x) : 0;
162 inline FOLLY_INTRINSIC_CONSTEXPR
163 typename std::enable_if<
164 (std::is_integral<T>::value &&
165 std::is_unsigned<T>::value &&
166 sizeof(T) > sizeof(unsigned long) &&
167 sizeof(T) <= sizeof(unsigned long long)),
170 return x ? 8 * sizeof(unsigned long long) - __builtin_clzll(x) : 0;
174 inline FOLLY_INTRINSIC_CONSTEXPR
175 typename std::enable_if<
176 (std::is_integral<T>::value &&
177 std::is_signed<T>::value),
180 return findLastSet(static_cast<typename std::make_unsigned<T>::type>(x));
184 inline FOLLY_INTRINSIC_CONSTEXPR
185 typename std::enable_if<
186 std::is_integral<T>::value && std::is_unsigned<T>::value,
189 return v ? (T(1) << findLastSet(v - 1)) : 1;
194 typename std::enable_if<
195 std::is_integral<T>::value && std::is_unsigned<T>::value,
198 return (v != 0) && !(v & (v - 1));
205 inline typename std::enable_if<
206 (std::is_integral<T>::value &&
207 std::is_unsigned<T>::value &&
208 sizeof(T) <= sizeof(unsigned int)),
211 return detail::popcount(x);
215 inline typename std::enable_if<
216 (std::is_integral<T>::value &&
217 std::is_unsigned<T>::value &&
218 sizeof(T) > sizeof(unsigned int) &&
219 sizeof(T) <= sizeof(unsigned long long)),
222 return detail::popcountll(x);
226 * Endianness detection and manipulation primitives.
231 struct EndianIntBase {
239 * If we have the bswap_16 macro from byteswap.h, use it; otherwise, provide our
243 # define our_bswap16 bswap_16
246 template<class Int16>
247 inline constexpr typename std::enable_if<
250 our_bswap16(Int16 x) {
251 return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
257 #define FB_GEN(t, fn) \
258 template<> inline t EndianIntBase<t>::swap(t x) { return fn(x); }
260 // fn(x) expands to (x) if the second argument is empty, which is exactly
261 // what we want for [u]int8_t. Also, gcc 4.7 on Intel doesn't have
262 // __builtin_bswap16 for some reason, so we have to provide our own.
266 FB_GEN( int64_t, _byteswap_uint64)
267 FB_GEN(uint64_t, _byteswap_uint64)
268 FB_GEN( int32_t, _byteswap_ulong)
269 FB_GEN(uint32_t, _byteswap_ulong)
270 FB_GEN( int16_t, _byteswap_ushort)
271 FB_GEN(uint16_t, _byteswap_ushort)
273 FB_GEN( int64_t, __builtin_bswap64)
274 FB_GEN(uint64_t, __builtin_bswap64)
275 FB_GEN( int32_t, __builtin_bswap32)
276 FB_GEN(uint32_t, __builtin_bswap32)
277 FB_GEN( int16_t, our_bswap16)
278 FB_GEN(uint16_t, our_bswap16)
284 struct EndianInt : public EndianIntBase<T> {
287 return kIsLittleEndian ? EndianInt::swap(x) : x;
289 static T little(T x) {
290 return kIsBigEndian ? EndianInt::swap(x) : x;
294 } // namespace detail
296 // big* convert between native and big-endian representations
297 // little* convert between native and little-endian representations
298 // swap* convert between big-endian and little-endian representations
300 // ntohs, htons == big16
301 // ntohl, htonl == big32
302 #define FB_GEN1(fn, t, sz) \
303 static t fn##sz(t x) { return fn<t>(x); } \
305 #define FB_GEN2(t, sz) \
306 FB_GEN1(swap, t, sz) \
307 FB_GEN1(big, t, sz) \
308 FB_GEN1(little, t, sz)
311 FB_GEN2(uint##sz##_t, sz) \
312 FB_GEN2(int##sz##_t, sz)
316 enum class Order : uint8_t {
321 static constexpr Order order = kIsLittleEndian ? Order::LITTLE : Order::BIG;
323 template <class T> static T swap(T x) {
324 return folly::detail::EndianInt<T>::swap(x);
326 template <class T> static T big(T x) {
327 return folly::detail::EndianInt<T>::big(x);
329 template <class T> static T little(T x) {
330 return folly::detail::EndianInt<T>::little(x);
333 #if !defined(__ANDROID__)
346 * Fast bit iteration facility.
350 template <class BaseIter> class BitIterator;
351 template <class BaseIter>
352 BitIterator<BaseIter> findFirstSet(BitIterator<BaseIter>,
353 BitIterator<BaseIter>);
355 * Wrapper around an iterator over an integer type that iterates
356 * over its underlying bits in LSb to MSb order.
358 * BitIterator models the same iterator concepts as the base iterator.
360 template <class BaseIter>
362 : public bititerator_detail::BitIteratorBase<BaseIter>::type {
365 * Return the number of bits in an element of the underlying iterator.
367 static unsigned int bitsPerBlock() {
368 return std::numeric_limits<
369 typename std::make_unsigned<
370 typename std::iterator_traits<BaseIter>::value_type
376 * Construct a BitIterator that points at a given bit offset (default 0)
379 explicit BitIterator(const BaseIter& iter, size_t bitOff=0)
380 : bititerator_detail::BitIteratorBase<BaseIter>::type(iter),
382 assert(bitOffset_ < bitsPerBlock());
385 size_t bitOffset() const {
389 void advanceToNextBlock() {
391 ++this->base_reference();
394 BitIterator& operator=(const BaseIter& other) {
395 this->~BitIterator();
396 new (this) BitIterator(other);
401 friend class boost::iterator_core_access;
402 friend BitIterator findFirstSet<>(BitIterator, BitIterator);
404 typedef bititerator_detail::BitReference<
405 typename std::iterator_traits<BaseIter>::reference,
406 typename std::iterator_traits<BaseIter>::value_type
409 void advanceInBlock(size_t n) {
411 assert(bitOffset_ < bitsPerBlock());
414 BitRef dereference() const {
415 return BitRef(*this->base_reference(), bitOffset_);
418 void advance(ssize_t n) {
419 size_t bpb = bitsPerBlock();
420 ssize_t blocks = n / bpb;
421 bitOffset_ += n % bpb;
422 if (bitOffset_ >= bpb) {
426 this->base_reference() += blocks;
430 if (++bitOffset_ == bitsPerBlock()) {
431 advanceToNextBlock();
436 if (bitOffset_-- == 0) {
437 bitOffset_ = bitsPerBlock() - 1;
438 --this->base_reference();
442 bool equal(const BitIterator& other) const {
443 return (bitOffset_ == other.bitOffset_ &&
444 this->base_reference() == other.base_reference());
447 ssize_t distance_to(const BitIterator& other) const {
449 (other.base_reference() - this->base_reference()) * bitsPerBlock() +
450 other.bitOffset_ - bitOffset_;
453 unsigned int bitOffset_;
457 * Helper function, so you can write
458 * auto bi = makeBitIterator(container.begin());
460 template <class BaseIter>
461 BitIterator<BaseIter> makeBitIterator(const BaseIter& iter) {
462 return BitIterator<BaseIter>(iter);
467 * Find first bit set in a range of bit iterators.
468 * 4.5x faster than the obvious std::find(begin, end, true);
470 template <class BaseIter>
471 BitIterator<BaseIter> findFirstSet(BitIterator<BaseIter> begin,
472 BitIterator<BaseIter> end) {
473 // shortcut to avoid ugly static_cast<>
474 static const typename BaseIter::value_type one = 1;
476 while (begin.base() != end.base()) {
477 typename BaseIter::value_type v = *begin.base();
478 // mask out the bits that don't matter (< begin.bitOffset)
479 v &= ~((one << begin.bitOffset()) - 1);
480 size_t firstSet = findFirstSet(v);
482 --firstSet; // now it's 0-based
483 assert(firstSet >= begin.bitOffset());
484 begin.advanceInBlock(firstSet - begin.bitOffset());
487 begin.advanceToNextBlock();
490 // now begin points to the same block as end
491 if (end.bitOffset() != 0) { // assume end is dereferenceable
492 typename BaseIter::value_type v = *begin.base();
493 // mask out the bits that don't matter (< begin.bitOffset)
494 v &= ~((one << begin.bitOffset()) - 1);
495 // mask out the bits that don't matter (>= end.bitOffset)
496 v &= (one << end.bitOffset()) - 1;
497 size_t firstSet = findFirstSet(v);
499 --firstSet; // now it's 0-based
500 assert(firstSet >= begin.bitOffset());
501 begin.advanceInBlock(firstSet - begin.bitOffset());
510 template <class T, class Enable=void> struct Unaligned;
513 * Representation of an unaligned value of a POD type.
519 typename std::enable_if<std::is_pod<T>::value>::type> {
520 Unaligned() = default; // uninitialized
521 /* implicit */ Unaligned(T v) : value(v) { }
527 * Read an unaligned value of type T and return it.
530 inline T loadUnaligned(const void* p) {
531 static_assert(sizeof(Unaligned<T>) == sizeof(T), "Invalid unaligned size");
532 static_assert(alignof(Unaligned<T>) == 1, "Invalid alignment");
533 if (kHasUnalignedAccess) {
534 return static_cast<const Unaligned<T>*>(p)->value;
537 memcpy(&value, p, sizeof(T));
543 * Write an unaligned value of type T.
546 inline void storeUnaligned(void* p, T value) {
547 static_assert(sizeof(Unaligned<T>) == sizeof(T), "Invalid unaligned size");
548 static_assert(alignof(Unaligned<T>) == 1, "Invalid alignment");
549 if (kHasUnalignedAccess) {
550 // Prior to C++14, the spec says that a placement new like this
551 // is required to check that p is not nullptr, and to do nothing
552 // if p is a nullptr. By assuming it's not a nullptr, we get a
553 // nice loud segfault in optimized builds if p is nullptr, rather
554 // than just silently doing nothing.
555 folly::assume(p != nullptr);
556 new (p) Unaligned<T>(value);
558 memcpy(p, &value, sizeof(T));