/*
- * Copyright 2014 Facebook, Inc.
+ * Copyright 2015 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#error EliasFanoCoding.h requires GCC
#endif
-#if !defined(__x86_64__)
+#if !FOLLY_X64
#error EliasFanoCoding.h requires x86_64
#endif
-#include <algorithm>
#include <cstdlib>
#include <limits>
#include <type_traits>
#include <boost/noncopyable.hpp>
#include <glog/logging.h>
-#include "folly/Bits.h"
-#include "folly/CpuId.h"
-#include "folly/Likely.h"
-#include "folly/Range.h"
+#include <folly/Bits.h>
+#include <folly/CpuId.h>
+#include <folly/Likely.h>
+#include <folly/Range.h>
+#include <folly/experimental/Select64.h>
#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
#error EliasFanoCoding.h requires little endianness
namespace folly { namespace compression {
-struct EliasFanoCompressedList {
- EliasFanoCompressedList()
- : size(0), numLowerBits(0) { }
+template <class Pointer>
+struct EliasFanoCompressedListBase {
+ EliasFanoCompressedListBase() = default;
+
+ template <class OtherPointer>
+ EliasFanoCompressedListBase(
+ const EliasFanoCompressedListBase<OtherPointer>& other)
+ : size(other.size),
+ numLowerBits(other.numLowerBits),
+ data(other.data),
+ skipPointers(reinterpret_cast<Pointer>(other.skipPointers)),
+ forwardPointers(reinterpret_cast<Pointer>(other.forwardPointers)),
+ lower(reinterpret_cast<Pointer>(other.lower)),
+ upper(reinterpret_cast<Pointer>(other.upper)) {
+ }
void free() {
- ::free(const_cast<unsigned char*>(lower.data()));
- ::free(const_cast<unsigned char*>(upper.data()));
- ::free(const_cast<unsigned char*>(skipPointers.data()));
- ::free(const_cast<unsigned char*>(forwardPointers.data()));
+ ::free(const_cast<unsigned char*>(data.data()));
+ }
+
+ size_t upperSize() const {
+ return data.end() - upper;
}
- size_t size;
- uint8_t numLowerBits;
+ size_t size = 0;
+ uint8_t numLowerBits = 0;
- // WARNING: EliasFanoCompressedList has no ownership of
- // lower, upper, skipPointers and forwardPointers.
- // The 7 bytes following the last byte of lower and upper
- // sequences should be readable.
- folly::ByteRange lower;
- folly::ByteRange upper;
+ // WARNING: EliasFanoCompressedList has no ownership of data. The 7
+ // bytes following the last byte should be readable.
+ folly::Range<Pointer> data;
- folly::ByteRange skipPointers;
- folly::ByteRange forwardPointers;
+ Pointer skipPointers = nullptr;
+ Pointer forwardPointers = nullptr;
+ Pointer lower = nullptr;
+ Pointer upper = nullptr;
};
-// Version history:
-// In version 1 skip / forward pointers encoding has been changed,
-// so SkipValue = uint32_t is able to address up to ~4B elements,
-// instead of only ~2B.
+typedef EliasFanoCompressedListBase<const uint8_t*> EliasFanoCompressedList;
+typedef EliasFanoCompressedListBase<uint8_t*> MutableEliasFanoCompressedList;
+
template <class Value,
class SkipValue = size_t,
size_t kSkipQuantum = 0, // 0 = disabled
- size_t kForwardQuantum = 0, // 0 = disabled
- size_t kVersion = 0>
-struct EliasFanoEncoder {
+ size_t kForwardQuantum = 0> // 0 = disabled
+struct EliasFanoEncoderV2 {
static_assert(std::is_integral<Value>::value &&
std::is_unsigned<Value>::value,
"Value should be unsigned integral");
typedef Value ValueType;
typedef SkipValue SkipValueType;
+ struct Layout;
static constexpr size_t skipQuantum = kSkipQuantum;
static constexpr size_t forwardQuantum = kForwardQuantum;
- static constexpr size_t version = kVersion;
static uint8_t defaultNumLowerBits(size_t upperBound, size_t size) {
if (size == 0 || upperBound < size) {
return folly::findLastSet(upperBound / size) - 1;
}
+ // Requires: input range (begin, end) is sorted (encoding
+ // crashes if it's not).
// WARNING: encode() mallocates lower, upper, skipPointers
// and forwardPointers. As EliasFanoCompressedList has
// no ownership of them, you need to call free() explicitly.
- static void encode(const ValueType* list, size_t size,
- EliasFanoCompressedList& result) {
- encode(list, list + size, result);
+ template <class RandomAccessIterator>
+ static EliasFanoCompressedList encode(RandomAccessIterator begin,
+ RandomAccessIterator end) {
+ if (begin == end) {
+ return EliasFanoCompressedList();
+ }
+ EliasFanoEncoderV2 encoder(end - begin, *(end - 1));
+ for (; begin != end; ++begin) {
+ encoder.add(*begin);
+ }
+ return encoder.finish();
}
- // Range (begin, end) should be sorted.
- template <class RandomAccessIterator>
- static void encode(RandomAccessIterator begin,
- RandomAccessIterator end,
- EliasFanoCompressedList& result) {
- CHECK(std::is_sorted(begin, end));
+ explicit EliasFanoEncoderV2(const MutableEliasFanoCompressedList& result)
+ : lower_(result.lower),
+ upper_(result.upper),
+ skipPointers_(reinterpret_cast<SkipValueType*>(
+ result.skipPointers)),
+ forwardPointers_(reinterpret_cast<SkipValueType*>(
+ result.forwardPointers)),
+ result_(result) {
+ }
- auto list = begin;
- const size_t size = end - begin;
+ EliasFanoEncoderV2(size_t size, ValueType upperBound)
+ : EliasFanoEncoderV2(
+ Layout::fromUpperBoundAndSize(upperBound, size).allocList()) {
+ }
- if (size == 0) {
- result = EliasFanoCompressedList();
- return;
- }
+ void add(ValueType value) {
+ CHECK_GE(value, lastValue_);
- const ValueType upperBound = list[size - 1];
- uint8_t numLowerBits = defaultNumLowerBits(upperBound, size);
+ const auto numLowerBits = result_.numLowerBits;
+ const ValueType upperBits = value >> numLowerBits;
- // This is detail::writeBits56 limitation.
- numLowerBits = std::min<uint8_t>(numLowerBits, 56);
- CHECK_LT(numLowerBits, 8 * sizeof(Value)); // As we shift by numLowerBits.
+ // Upper sequence consists of upperBits 0-bits and (size_ + 1) 1-bits.
+ const size_t pos = upperBits + size_;
+ upper_[pos / 8] |= 1U << (pos % 8);
+ // Append numLowerBits bits to lower sequence.
+ if (numLowerBits != 0) {
+ const ValueType lowerBits = value & ((ValueType(1) << numLowerBits) - 1);
+ writeBits56(lower_, size_ * numLowerBits, numLowerBits, lowerBits);
+ }
- // WARNING: Current read/write logic assumes that the 7 bytes
- // following the last byte of lower and upper sequences are
- // readable (stored value doesn't matter and won't be changed),
- // so we allocate additional 7B, but do not include them in size
- // of returned value.
+ /* static */ if (skipQuantum != 0) {
+ while ((skipPointersSize_ + 1) * skipQuantum <= upperBits) {
+ // Store the number of preceding 1-bits.
+ skipPointers_[skipPointersSize_++] = size_;
+ }
+ }
- // *** Lower bits.
- const size_t lowerSize = (numLowerBits * size + 7) / 8;
- unsigned char* lower = nullptr;
- if (lowerSize > 0) { // numLowerBits != 0
- lower = static_cast<unsigned char*>(calloc(lowerSize + 7, 1));
- const ValueType lowerMask = (ValueType(1) << numLowerBits) - 1;
- for (size_t i = 0; i < size; ++i) {
- const ValueType lowerBits = list[i] & lowerMask;
- writeBits56(lower, i * numLowerBits, numLowerBits, lowerBits);
+ /* static */ if (forwardQuantum != 0) {
+ if ((size_ + 1) % forwardQuantum == 0) {
+ const auto pos = size_ / forwardQuantum;
+ // Store the number of preceding 0-bits.
+ forwardPointers_[pos] = upperBits;
}
}
+ lastValue_ = value;
+ ++size_;
+ }
+
+ const EliasFanoCompressedList& finish() const {
+ CHECK_EQ(size_, result_.size);
+ return result_;
+ }
+
+ private:
+ // Writes value (with len up to 56 bits) to data starting at pos-th bit.
+ static void writeBits56(unsigned char* data, size_t pos,
+ uint8_t len, uint64_t value) {
+ DCHECK_LE(uint32_t(len), 56);
+ DCHECK_EQ(0, value & ~((uint64_t(1) << len) - 1));
+ unsigned char* const ptr = data + (pos / 8);
+ uint64_t ptrv = folly::loadUnaligned<uint64_t>(ptr);
+ ptrv |= value << (pos % 8);
+ folly::storeUnaligned<uint64_t>(ptr, ptrv);
+ }
+
+ unsigned char* lower_ = nullptr;
+ unsigned char* upper_ = nullptr;
+ SkipValueType* skipPointers_ = nullptr;
+ SkipValueType* forwardPointers_ = nullptr;
+
+ ValueType lastValue_ = 0;
+ size_t size_ = 0;
+ size_t skipPointersSize_ = 0;
+
+ EliasFanoCompressedList result_;
+};
+
+template <class Value,
+ class SkipValue,
+ size_t kSkipQuantum,
+ size_t kForwardQuantum>
+struct EliasFanoEncoderV2<Value,
+ SkipValue,
+ kSkipQuantum,
+ kForwardQuantum>::Layout {
+ static Layout fromUpperBoundAndSize(size_t upperBound, size_t size) {
+ // numLowerBits can be at most 56 because of detail::writeBits56.
+ const uint8_t numLowerBits = std::min(defaultNumLowerBits(upperBound,
+ size),
+ uint8_t(56));
// *** Upper bits.
// Upper bits are stored using unary delta encoding.
// For example, (3 5 5 9) will be encoded as 1000011001000_2.
const size_t upperSizeBits =
(upperBound >> numLowerBits) + // Number of 0-bits to be stored.
size; // 1-bits.
- const size_t upperSize = (upperSizeBits + 7) / 8;
- unsigned char* const upper =
- static_cast<unsigned char*>(calloc(upperSize + 7, 1));
- for (size_t i = 0; i < size; ++i) {
- const ValueType upperBits = list[i] >> numLowerBits;
- const size_t pos = upperBits + i; // upperBits 0-bits and (i + 1) 1-bits.
- upper[pos / 8] |= 1U << (pos % 8);
- }
+ const size_t upper = (upperSizeBits + 7) / 8;
+
+ // *** Validity checks.
+ // Shift by numLowerBits must be valid.
+ CHECK_LT(numLowerBits, 8 * sizeof(Value));
+ CHECK_LT(size, std::numeric_limits<SkipValueType>::max());
+ CHECK_LT(upperBound >> numLowerBits,
+ std::numeric_limits<SkipValueType>::max());
+
+ return fromInternalSizes(numLowerBits, upper, size);
+ }
+
+ static Layout fromInternalSizes(uint8_t numLowerBits,
+ size_t upper,
+ size_t size) {
+ Layout layout;
+ layout.size = size;
+ layout.numLowerBits = numLowerBits;
+
+ layout.lower = (numLowerBits * size + 7) / 8;
+ layout.upper = upper;
// *** Skip pointers.
// Store (1-indexed) position of every skipQuantum-th
// 0-bit in upper bits sequence.
- SkipValueType* skipPointers = nullptr;
- size_t numSkipPointers = 0;
/* static */ if (skipQuantum != 0) {
- // Workaround to avoid 'division by zero' compile-time error.
- constexpr size_t q = skipQuantum ?: 1;
- /* static */ if (kVersion > 0) {
- CHECK_LT(size, std::numeric_limits<SkipValueType>::max());
- } else {
- CHECK_LT(upperSizeBits, std::numeric_limits<SkipValueType>::max());
- }
- // 8 * upperSize is used here instead of upperSizeBits, as that is
- // more serialization-friendly way (upperSizeBits isn't known outside of
- // this function, unlike upperSize; thus numSkipPointers could easily be
- // deduced from upperSize).
- numSkipPointers = (8 * upperSize - size) / q;
- skipPointers = static_cast<SkipValueType*>(
- numSkipPointers == 0
- ? nullptr
- : calloc(numSkipPointers, sizeof(SkipValueType)));
-
- for (size_t i = 0, pos = 0; i < size; ++i) {
- const ValueType upperBits = list[i] >> numLowerBits;
- for (; (pos + 1) * q <= upperBits; ++pos) {
- /* static */ if (kVersion > 0) {
- // Since version 1, just the number of preceding 1-bits is stored.
- skipPointers[pos] = i;
- } else {
- skipPointers[pos] = i + (pos + 1) * q;
- }
- }
- }
+ // 8 * upper is used here instead of upperSizeBits, as that is
+ // more serialization-friendly way (upperSizeBits doesn't need
+ // to be known by this function, unlike upper).
+
+ // '?: 1' is a workaround for false 'division by zero'
+ // compile-time error.
+ size_t numSkipPointers = (8 * upper - size) / (skipQuantum ?: 1);
+ layout.skipPointers = numSkipPointers * sizeof(SkipValueType);
}
// *** Forward pointers.
// Store (1-indexed) position of every forwardQuantum-th
// 1-bit in upper bits sequence.
- SkipValueType* forwardPointers = nullptr;
- size_t numForwardPointers = 0;
/* static */ if (forwardQuantum != 0) {
- // Workaround to avoid 'division by zero' compile-time error.
- constexpr size_t q = forwardQuantum ?: 1;
- /* static */ if (kVersion > 0) {
- CHECK_LT(upperBound >> numLowerBits,
- std::numeric_limits<SkipValueType>::max());
- } else {
- CHECK_LT(upperSizeBits, std::numeric_limits<SkipValueType>::max());
- }
-
- numForwardPointers = size / q;
- forwardPointers = static_cast<SkipValueType*>(
- numForwardPointers == 0
- ? nullptr
- : malloc(numForwardPointers * sizeof(SkipValueType)));
-
- for (size_t i = q - 1, pos = 0; i < size; i += q, ++pos) {
- const ValueType upperBits = list[i] >> numLowerBits;
- /* static */ if (kVersion > 0) {
- // Since version 1, just the number of preceding 0-bits is stored.
- forwardPointers[pos] = upperBits;
- } else {
- forwardPointers[pos] = upperBits + i + 1;
- }
- }
+ size_t numForwardPointers = size / (forwardQuantum ?: 1);
+ layout.forwardPointers = numForwardPointers * sizeof(SkipValueType);
}
- // *** Result.
+ return layout;
+ }
+
+ size_t bytes() const {
+ return lower + upper + skipPointers + forwardPointers;
+ }
+
+ template <typename Range>
+ EliasFanoCompressedListBase<typename Range::iterator>
+ openList(Range& buf) const {
+ EliasFanoCompressedListBase<typename Range::iterator> result;
result.size = size;
result.numLowerBits = numLowerBits;
- result.lower.reset(lower, lowerSize);
- result.upper.reset(upper, upperSize);
- result.skipPointers.reset(
- reinterpret_cast<unsigned char*>(skipPointers),
- numSkipPointers * sizeof(SkipValueType));
- result.forwardPointers.reset(
- reinterpret_cast<unsigned char*>(forwardPointers),
- numForwardPointers * sizeof(SkipValueType));
+ result.data = buf.subpiece(0, bytes());
+
+ auto advance = [&] (size_t n) {
+ auto begin = buf.data();
+ buf.advance(n);
+ return begin;
+ };
+
+ result.skipPointers = advance(skipPointers);
+ result.forwardPointers = advance(forwardPointers);
+ result.lower = advance(lower);
+ result.upper = advance(upper);
+
+ return result;
}
- private:
- // Writes value (with len up to 56 bits) to data starting at pos-th bit.
- static void writeBits56(unsigned char* data, size_t pos,
- uint8_t len, uint64_t value) {
- DCHECK_LE(uint32_t(len), 56);
- DCHECK_EQ(0, value & ~((uint64_t(1) << len) - 1));
- unsigned char* const ptr = data + (pos / 8);
- uint64_t ptrv = folly::loadUnaligned<uint64_t>(ptr);
- ptrv |= value << (pos % 8);
- folly::storeUnaligned<uint64_t>(ptr, ptrv);
+ MutableEliasFanoCompressedList allocList() const {
+ uint8_t* buf = nullptr;
+ // WARNING: Current read/write logic assumes that the 7 bytes
+ // following the last byte of lower and upper sequences are
+ // readable (stored value doesn't matter and won't be changed),
+ // so we allocate additional 7B, but do not include them in size
+ // of returned value.
+ if (size > 0) {
+ buf = static_cast<uint8_t*>(calloc(bytes() + 7, 1));
+ }
+ folly::MutableByteRange bufRange(buf, bytes());
+ return openList(bufRange);
}
+
+ size_t size = 0;
+ uint8_t numLowerBits = 0;
+
+ // Sizes in bytes.
+ size_t lower = 0;
+ size_t upper = 0;
+ size_t skipPointers = 0;
+ size_t forwardPointers = 0;
};
// NOTE: It's recommended to compile EF coding with -msse4.2, starting
namespace instructions {
struct Default {
- static bool supported() {
+ static bool supported(const folly::CpuId& cpuId = {}) {
return true;
}
static inline uint64_t popcount(uint64_t value) {
DCHECK_GT(value, 0);
return __builtin_ctzll(value);
}
+ static inline uint64_t blsr(uint64_t value) {
+ return value & (value - 1);
+ }
};
-struct Fast : public Default {
- static bool supported() {
- folly::CpuId cpuId;
+struct Nehalem : public Default {
+ static bool supported(const folly::CpuId& cpuId = {}) {
return cpuId.popcnt();
}
static inline uint64_t popcount(uint64_t value) {
+ // POPCNT is supported starting with Intel Nehalem, AMD K10.
uint64_t result;
asm ("popcntq %1, %0" : "=r" (result) : "r" (value));
return result;
}
};
+struct Haswell : public Nehalem {
+ static bool supported(const folly::CpuId& cpuId = {}) {
+ return Nehalem::supported(cpuId) && cpuId.bmi1();
+ }
+ static inline uint64_t blsr(uint64_t value) {
+ // BMI1 is supported starting with Intel Haswell, AMD Piledriver.
+ // BLSR combines two instuctions into one and reduces register pressure.
+ uint64_t result;
+ asm ("blsrq %1, %0" : "=r" (result) : "r" (value));
+ return result;
+ }
+};
+
} // namespace instructions
namespace detail {
typedef typename Encoder::ValueType ValueType;
explicit UpperBitsReader(const EliasFanoCompressedList& list)
- : forwardPointers_(list.forwardPointers.data()),
- skipPointers_(list.skipPointers.data()),
- start_(list.upper.data()),
- block_(start_ != nullptr ? folly::loadUnaligned<block_t>(start_) : 0),
- outer_(0), // outer offset: number of consumed bytes in upper.
- inner_(-1), // inner offset: (bit) position in current block.
- position_(-1), // index of current value (= #reads - 1).
- value_(0) { }
+ : forwardPointers_(list.forwardPointers),
+ skipPointers_(list.skipPointers),
+ start_(list.upper) {
+ reset();
+ }
+
+ void reset() {
+ block_ = start_ != nullptr ? folly::loadUnaligned<block_t>(start_) : 0;
+ outer_ = 0;
+ inner_ = -1;
+ position_ = -1;
+ value_ = 0;
+ }
size_t position() const { return position_; }
ValueType value() const { return value_; }
++position_;
inner_ = Instructions::ctz(block_);
- block_ &= block_ - 1;
+ block_ = Instructions::blsr(block_);
return setValue();
}
folly::loadUnaligned<SkipValueType>(
forwardPointers_ + (steps - 1) * sizeof(SkipValueType));
- /* static */ if (Encoder::version > 0) {
- reposition(dest + steps * q);
- } else {
- reposition(dest);
- }
+ reposition(dest + steps * q);
n = position_ + 1 - steps * q; // n is > 0.
- // correct inner_ will be set at the end.
+ // Correct inner_ will be set at the end.
}
size_t cnt;
block_ = folly::loadUnaligned<block_t>(start_ + outer_);
}
- // NOTE: Trying to skip half-block here didn't show any
- // performance improvements.
-
+ // Skip to the n-th one in the block.
DCHECK_GT(n, 0);
-
- // Kill n - 1 least significant 1-bits.
- for (size_t i = 0; i < n - 1; ++i) {
- block_ &= block_ - 1;
- }
-
- inner_ = Instructions::ctz(block_);
- block_ &= block_ - 1;
+ inner_ = select64<Instructions>(block_, n - 1);
+ block_ &= (block_t(-1) << inner_) << 1;
return setValue();
}
folly::loadUnaligned<SkipValueType>(
skipPointers_ + (steps - 1) * sizeof(SkipValueType));
- /* static */ if (Encoder::version > 0) {
- reposition(dest + q * steps);
- position_ = dest - 1;
- } else {
- reposition(dest);
- position_ = dest - q * steps - 1;
- }
+ reposition(dest + q * steps);
+ position_ = dest - 1;
+
// Correct inner_ and value_ will be set during the next()
// call at the end.
block_ = folly::loadUnaligned<block_t>(start_ + outer_);
}
- // Try to skip half-block.
- constexpr size_t kBitsPerHalfBlock = 4 * sizeof(block_t);
- constexpr block_t halfBlockMask = (block_t(1) << kBitsPerHalfBlock) - 1;
- if ((cnt = Instructions::popcount(~block_ & halfBlockMask)) < skip) {
- position_ += kBitsPerHalfBlock - cnt;
- block_ &= ~halfBlockMask;
+ if (LIKELY(skip)) {
+ auto inner = select64<Instructions>(~block_, skip - 1);
+ position_ += inner - skip + 1;
+ block_ &= block_t(-1) << inner;
}
- // Just skip until we see expected value.
- while (next() < v) { }
+ next();
return value_;
}
+ ValueType jump(size_t n) {
+ if (Encoder::forwardQuantum == 0 || n <= Encoder::forwardQuantum) {
+ reset();
+ } else {
+ position_ = -1; // Avoid reading the head, skip() will reposition.
+ }
+ return skip(n);
+ }
+
+ ValueType jumpToNext(ValueType v) {
+ if (Encoder::skipQuantum == 0 || v < Encoder::skipQuantum) {
+ reset();
+ } else {
+ value_ = 0; // Avoid reading the head, skipToNext() will reposition.
+ }
+ return skipToNext(v);
+ }
+
private:
ValueType setValue() {
value_ = static_cast<ValueType>(8 * outer_ + inner_ - position_);
const unsigned char* const skipPointers_;
const unsigned char* const start_;
block_t block_;
- size_t outer_;
- size_t inner_;
- size_t position_;
+ size_t outer_; // Outer offset: number of consumed bytes in upper.
+ size_t inner_; // Inner offset: (bit) position in current block.
+ size_t position_; // Index of current value (= #reads - 1).
ValueType value_;
};
class Instructions = instructions::Default>
class EliasFanoReader : private boost::noncopyable {
public:
+ typedef Encoder EncoderType;
typedef typename Encoder::ValueType ValueType;
explicit EliasFanoReader(const EliasFanoCompressedList& list)
: list_(list),
lowerMask_((ValueType(1) << list_.numLowerBits) - 1),
- upper_(list),
- progress_(0),
- value_(0) {
+ upper_(list_) {
DCHECK(Instructions::supported());
// To avoid extra branching during skipTo() while reading
// upper sequence we need to know the last element.
lastValue_ = 0;
return;
}
- ValueType lastUpperValue = 8 * list_.upper.size() - list_.size;
- auto it = list_.upper.end() - 1;
+ ValueType lastUpperValue = 8 * list_.upperSize() - list_.size;
+ auto it = list_.upper + list_.upperSize() - 1;
DCHECK_NE(*it, 0);
lastUpperValue -= 8 - folly::findLastSet(*it);
lastValue_ = readLowerPart(list_.size - 1) |
(lastUpperValue << list_.numLowerBits);
}
- size_t size() const { return list_.size; }
-
- size_t position() const { return progress_ - 1; }
- ValueType value() const { return value_; }
+ void reset() {
+ upper_.reset();
+ progress_ = 0;
+ value_ = 0;
+ }
bool next() {
- if (UNLIKELY(progress_ == list_.size)) {
- value_ = std::numeric_limits<ValueType>::max();
- return false;
+ if (UNLIKELY(progress_ >= list_.size)) {
+ return setDone();
}
value_ = readLowerPart(progress_) |
(upper_.next() << list_.numLowerBits);
bool skip(size_t n) {
CHECK_GT(n, 0);
- progress_ += n - 1;
- if (LIKELY(progress_ < list_.size)) {
- value_ = readLowerPart(progress_) |
- (upper_.skip(n) << list_.numLowerBits);
- ++progress_;
+ progress_ += n;
+ if (LIKELY(progress_ <= list_.size)) {
+ if (LIKELY(n < kLinearScanThreshold)) {
+ for (size_t i = 0; i < n; ++i) upper_.next();
+ } else {
+ upper_.skip(n);
+ }
+ value_ = readLowerPart(progress_ - 1) |
+ (upper_.value() << list_.numLowerBits);
return true;
}
- progress_ = list_.size;
- value_ = std::numeric_limits<ValueType>::max();
- return false;
+ return setDone();
}
bool skipTo(ValueType value) {
DCHECK_GE(value, value_);
if (value <= value_) {
return true;
+ } else if (value > lastValue_) {
+ return setDone();
}
- if (value > lastValue_) {
- progress_ = list_.size;
- value_ = std::numeric_limits<ValueType>::max();
- return false;
+
+ size_t upperValue = (value >> list_.numLowerBits);
+ size_t upperSkip = upperValue - upper_.value();
+ // The average density of ones in upper bits is 1/2.
+ // LIKELY here seems to make things worse, even for small skips.
+ if (upperSkip < 2 * kLinearScanThreshold) {
+ do {
+ upper_.next();
+ } while (UNLIKELY(upper_.value() < upperValue));
+ } else {
+ upper_.skipToNext(upperValue);
}
- upper_.skipToNext(value >> list_.numLowerBits);
- progress_ = upper_.position();
- value_ = readLowerPart(progress_) |
- (upper_.value() << list_.numLowerBits);
- ++progress_;
- while (value_ < value) {
- value_ = readLowerPart(progress_) |
- (upper_.next() << list_.numLowerBits);
- ++progress_;
+ iterateTo(value);
+ return true;
+ }
+
+ bool jump(size_t n) {
+ if (LIKELY(n - 1 < list_.size)) { // n > 0 && n <= list_.size
+ progress_ = n;
+ value_ = readLowerPart(n - 1) | (upper_.jump(n) << list_.numLowerBits);
+ return true;
+ } else if (n == 0) {
+ reset();
+ return true;
}
+ return setDone();
+ }
+ bool jumpTo(ValueType value) {
+ if (value <= 0) {
+ reset();
+ return true;
+ } else if (value > lastValue_) {
+ return setDone();
+ }
+
+ upper_.jumpToNext(value >> list_.numLowerBits);
+ iterateTo(value);
return true;
}
+ size_t size() const { return list_.size; }
+
+ size_t position() const { return progress_ - 1; }
+ ValueType value() const { return value_; }
+
private:
+ bool setDone() {
+ value_ = std::numeric_limits<ValueType>::max();
+ progress_ = list_.size + 1;
+ return false;
+ }
+
ValueType readLowerPart(size_t i) const {
+ DCHECK_LT(i, list_.size);
const size_t pos = i * list_.numLowerBits;
- const unsigned char* ptr = list_.lower.data() + (pos / 8);
+ const unsigned char* ptr = list_.lower + (pos / 8);
const uint64_t ptrv = folly::loadUnaligned<uint64_t>(ptr);
return lowerMask_ & (ptrv >> (pos % 8));
}
+ void iterateTo(ValueType value) {
+ while (true) {
+ value_ = readLowerPart(upper_.position()) |
+ (upper_.value() << list_.numLowerBits);
+ if (LIKELY(value_ >= value)) break;
+ upper_.next();
+ }
+ progress_ = upper_.position() + 1;
+ }
+
+ constexpr static size_t kLinearScanThreshold = 8;
+
const EliasFanoCompressedList list_;
const ValueType lowerMask_;
detail::UpperBitsReader<Encoder, Instructions> upper_;
- size_t progress_;
- ValueType value_;
+ size_t progress_ = 0;
+ ValueType value_ = 0;
ValueType lastValue_;
};