2 * Copyright 2013 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * Various low-level, bit-manipulation routines.
20 * findFirstSet(x) [constexpr]
21 * find first (least significant) bit set in a value of an integral type,
22 * 1-based (like ffs()). 0 = no bits are set (x == 0)
24 * findLastSet(x) [constexpr]
25 * find last (most significant) bit set in a value of an integral type,
26 * 1-based. 0 = no bits are set (x == 0)
27 * for x != 0, findLastSet(x) == 1 + floor(log2(x))
29 * nextPowTwo(x) [constexpr]
30 * Finds the next power of two >= x.
32 * isPowTwo(x) [constexpr]
33 * return true iff x is a power of two
36 * return the number of 1 bits in x
39 * convert between native, big, and little endian representation
40 * Endian::big(x) big <-> native
41 * Endian::little(x) little <-> native
42 * Endian::swap(x) big <-> little
45 * Wrapper around an iterator over an integral type that iterates
46 * over its underlying bits in MSb to LSb order
48 * findFirstSet(BitIterator begin, BitIterator end)
49 * return a BitIterator pointing to the first 1 bit in [begin, end), or
50 * end if all bits in [begin, end) are 0
52 * @author Tudor Bosman (tudorb@fb.com)
58 #include "folly/Portability.h"
64 #ifndef FOLLY_NO_CONFIG
65 #include "folly/folly-config.h"
68 #include "folly/detail/BitsDetail.h"
69 #include "folly/detail/BitIteratorDetail.h"
70 #include "folly/Likely.h"
72 #if FOLLY_HAVE_BYTESWAP_H
73 # include <byteswap.h>
80 #include <type_traits>
81 #include <boost/iterator/iterator_adaptor.hpp>
86 // Generate overloads for findFirstSet as wrappers around
87 // appropriate ffs, ffsl, ffsll gcc builtins
90 typename std::enable_if<
91 (std::is_integral<T>::value &&
92 std::is_unsigned<T>::value &&
93 sizeof(T) <= sizeof(unsigned int)),
96 return __builtin_ffs(x);
101 typename std::enable_if<
102 (std::is_integral<T>::value &&
103 std::is_unsigned<T>::value &&
104 sizeof(T) > sizeof(unsigned int) &&
105 sizeof(T) <= sizeof(unsigned long)),
108 return __builtin_ffsl(x);
113 typename std::enable_if<
114 (std::is_integral<T>::value &&
115 std::is_unsigned<T>::value &&
116 sizeof(T) > sizeof(unsigned long) &&
117 sizeof(T) <= sizeof(unsigned long long)),
120 return __builtin_ffsll(x);
125 typename std::enable_if<
126 (std::is_integral<T>::value && std::is_signed<T>::value),
129 // Note that conversion from a signed type to the corresponding unsigned
130 // type is technically implementation-defined, but will likely work
131 // on any impementation that uses two's complement.
132 return findFirstSet(static_cast<typename std::make_unsigned<T>::type>(x));
135 // findLastSet: return the 1-based index of the highest bit set
136 // for x > 0, findLastSet(x) == 1 + floor(log2(x))
139 typename std::enable_if<
140 (std::is_integral<T>::value &&
141 std::is_unsigned<T>::value &&
142 sizeof(T) <= sizeof(unsigned int)),
145 return x ? 8 * sizeof(unsigned int) - __builtin_clz(x) : 0;
150 typename std::enable_if<
151 (std::is_integral<T>::value &&
152 std::is_unsigned<T>::value &&
153 sizeof(T) > sizeof(unsigned int) &&
154 sizeof(T) <= sizeof(unsigned long)),
157 return x ? 8 * sizeof(unsigned long) - __builtin_clzl(x) : 0;
162 typename std::enable_if<
163 (std::is_integral<T>::value &&
164 std::is_unsigned<T>::value &&
165 sizeof(T) > sizeof(unsigned long) &&
166 sizeof(T) <= sizeof(unsigned long long)),
169 return x ? 8 * sizeof(unsigned long long) - __builtin_clzll(x) : 0;
174 typename std::enable_if<
175 (std::is_integral<T>::value &&
176 std::is_signed<T>::value),
179 return findLastSet(static_cast<typename std::make_unsigned<T>::type>(x));
184 typename std::enable_if<
185 std::is_integral<T>::value && std::is_unsigned<T>::value,
188 return v ? (1ul << findLastSet(v - 1)) : 1;
193 typename std::enable_if<
194 std::is_integral<T>::value && std::is_unsigned<T>::value,
197 return (v != 0) && !(v & (v - 1));
204 inline typename std::enable_if<
205 (std::is_integral<T>::value &&
206 std::is_unsigned<T>::value &&
207 sizeof(T) <= sizeof(unsigned int)),
210 return detail::popcount(x);
214 inline typename std::enable_if<
215 (std::is_integral<T>::value &&
216 std::is_unsigned<T>::value &&
217 sizeof(T) > sizeof(unsigned int) &&
218 sizeof(T) <= sizeof(unsigned long long)),
221 return detail::popcountll(x);
225 * Endianness detection and manipulation primitives.
230 struct EndianIntBase {
236 * If we have the bswap_16 macro from byteswap.h, use it; otherwise, provide our
240 # define our_bswap16 bswap_16
243 template<class Int16>
244 inline constexpr typename std::enable_if<
247 our_bswap16(Int16 x) {
248 return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
252 #define FB_GEN(t, fn) \
253 template<> inline t EndianIntBase<t>::swap(t x) { return fn(x); }
255 // fn(x) expands to (x) if the second argument is empty, which is exactly
256 // what we want for [u]int8_t. Also, gcc 4.7 on Intel doesn't have
257 // __builtin_bswap16 for some reason, so we have to provide our own.
260 FB_GEN( int64_t, __builtin_bswap64)
261 FB_GEN(uint64_t, __builtin_bswap64)
262 FB_GEN( int32_t, __builtin_bswap32)
263 FB_GEN(uint32_t, __builtin_bswap32)
264 FB_GEN( int16_t, our_bswap16)
265 FB_GEN(uint16_t, our_bswap16)
269 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
272 struct EndianInt : public detail::EndianIntBase<T> {
274 static T big(T x) { return EndianInt::swap(x); }
275 static T little(T x) { return x; }
278 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
281 struct EndianInt : public detail::EndianIntBase<T> {
283 static T big(T x) { return x; }
284 static T little(T x) { return EndianInt::swap(x); }
288 # error Your machine uses a weird endianness!
289 #endif /* __BYTE_ORDER__ */
291 } // namespace detail
293 // big* convert between native and big-endian representations
294 // little* convert between native and little-endian representations
295 // swap* convert between big-endian and little-endian representations
297 // ntohs, htons == big16
298 // ntohl, htonl == big32
299 #define FB_GEN1(fn, t, sz) \
300 static t fn##sz(t x) { return fn<t>(x); } \
302 #define FB_GEN2(t, sz) \
303 FB_GEN1(swap, t, sz) \
304 FB_GEN1(big, t, sz) \
305 FB_GEN1(little, t, sz)
308 FB_GEN2(uint##sz##_t, sz) \
309 FB_GEN2(int##sz##_t, sz)
313 enum class Order : uint8_t {
318 static constexpr Order order =
319 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
321 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
324 # error Your machine uses a weird endianness!
325 #endif /* __BYTE_ORDER__ */
327 template <class T> static T swap(T x) {
328 return detail::EndianInt<T>::swap(x);
330 template <class T> static T big(T x) {
331 return detail::EndianInt<T>::big(x);
333 template <class T> static T little(T x) {
334 return detail::EndianInt<T>::little(x);
348 * Fast bit iteration facility.
352 template <class BaseIter> class BitIterator;
353 template <class BaseIter>
354 BitIterator<BaseIter> findFirstSet(BitIterator<BaseIter>,
355 BitIterator<BaseIter>);
357 * Wrapper around an iterator over an integer type that iterates
358 * over its underlying bits in LSb to MSb order.
360 * BitIterator models the same iterator concepts as the base iterator.
362 template <class BaseIter>
364 : public bititerator_detail::BitIteratorBase<BaseIter>::type {
367 * Return the number of bits in an element of the underlying iterator.
369 static size_t bitsPerBlock() {
370 return std::numeric_limits<
371 typename std::make_unsigned<
372 typename std::iterator_traits<BaseIter>::value_type
378 * Construct a BitIterator that points at a given bit offset (default 0)
381 explicit BitIterator(const BaseIter& iter, size_t bitOffset=0)
382 : bititerator_detail::BitIteratorBase<BaseIter>::type(iter),
383 bitOffset_(bitOffset) {
384 assert(bitOffset_ < bitsPerBlock());
387 size_t bitOffset() const {
391 void advanceToNextBlock() {
393 ++this->base_reference();
396 BitIterator& operator=(const BaseIter& other) {
397 this->~BitIterator();
398 new (this) BitIterator(other);
403 friend class boost::iterator_core_access;
404 friend BitIterator findFirstSet<>(BitIterator, BitIterator);
406 typedef bititerator_detail::BitReference<
407 typename std::iterator_traits<BaseIter>::reference,
408 typename std::iterator_traits<BaseIter>::value_type
411 void advanceInBlock(size_t n) {
413 assert(bitOffset_ < bitsPerBlock());
416 BitRef dereference() const {
417 return BitRef(*this->base_reference(), bitOffset_);
420 void advance(ssize_t n) {
421 size_t bpb = bitsPerBlock();
422 ssize_t blocks = n / bpb;
423 bitOffset_ += n % bpb;
424 if (bitOffset_ >= bpb) {
428 this->base_reference() += blocks;
432 if (++bitOffset_ == bitsPerBlock()) {
433 advanceToNextBlock();
438 if (bitOffset_-- == 0) {
439 bitOffset_ = bitsPerBlock() - 1;
440 --this->base_reference();
444 bool equal(const BitIterator& other) const {
445 return (bitOffset_ == other.bitOffset_ &&
446 this->base_reference() == other.base_reference());
449 ssize_t distance_to(const BitIterator& other) const {
451 (other.base_reference() - this->base_reference()) * bitsPerBlock() +
452 (other.bitOffset_ - bitOffset_);
459 * Helper function, so you can write
460 * auto bi = makeBitIterator(container.begin());
462 template <class BaseIter>
463 BitIterator<BaseIter> makeBitIterator(const BaseIter& iter) {
464 return BitIterator<BaseIter>(iter);
469 * Find first bit set in a range of bit iterators.
470 * 4.5x faster than the obvious std::find(begin, end, true);
472 template <class BaseIter>
473 BitIterator<BaseIter> findFirstSet(BitIterator<BaseIter> begin,
474 BitIterator<BaseIter> end) {
475 // shortcut to avoid ugly static_cast<>
476 static const typename BaseIter::value_type one = 1;
478 while (begin.base() != end.base()) {
479 typename BaseIter::value_type v = *begin.base();
480 // mask out the bits that don't matter (< begin.bitOffset)
481 v &= ~((one << begin.bitOffset()) - 1);
482 size_t firstSet = findFirstSet(v);
484 --firstSet; // now it's 0-based
485 assert(firstSet >= begin.bitOffset());
486 begin.advanceInBlock(firstSet - begin.bitOffset());
489 begin.advanceToNextBlock();
492 // now begin points to the same block as end
493 if (end.bitOffset() != 0) { // assume end is dereferenceable
494 typename BaseIter::value_type v = *begin.base();
495 // mask out the bits that don't matter (< begin.bitOffset)
496 v &= ~((one << begin.bitOffset()) - 1);
497 // mask out the bits that don't matter (>= end.bitOffset)
498 v &= (one << end.bitOffset()) - 1;
499 size_t firstSet = findFirstSet(v);
501 --firstSet; // now it's 0-based
502 assert(firstSet >= begin.bitOffset());
503 begin.advanceInBlock(firstSet - begin.bitOffset());
512 template <class T, class Enable=void> struct Unaligned;
515 * Representation of an unaligned value of a POD type.
520 typename std::enable_if<std::is_pod<T>::value>::type> {
521 Unaligned() = default; // uninitialized
522 /* implicit */ Unaligned(T v) : value(v) { }
524 } __attribute__((packed));
527 * Read an unaligned value of type T and return it.
530 inline T loadUnaligned(const void* p) {
531 static_assert(sizeof(Unaligned<T>) == sizeof(T), "Invalid unaligned size");
532 static_assert(alignof(Unaligned<T>) == 1, "Invalid alignment");
533 return static_cast<const Unaligned<T>*>(p)->value;
537 * Write an unaligned value of type T.
540 inline void storeUnaligned(void* p, T value) {
541 static_assert(sizeof(Unaligned<T>) == sizeof(T), "Invalid unaligned size");
542 static_assert(alignof(Unaligned<T>) == 1, "Invalid alignment");
543 new (p) Unaligned<T>(value);
548 #endif /* FOLLY_BITS_H_ */