2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * @author Philip Pronin (philipp@fb.com)
20 * Based on the paper by Sebastiano Vigna,
21 * "Quasi-succinct indices" (arxiv:1206.4300).
29 #include <type_traits>
31 #include <folly/Assume.h>
32 #include <folly/Bits.h>
33 #include <folly/Likely.h>
34 #include <folly/Portability.h>
35 #include <folly/Range.h>
36 #include <folly/experimental/Instructions.h>
37 #include <folly/experimental/Select64.h>
38 #include <glog/logging.h>
41 #error EliasFanoCoding.h requires x86_64
44 namespace folly { namespace compression {
46 static_assert(kIsLittleEndian, "EliasFanoCoding.h requires little endianness");
48 template <class Pointer>
49 struct EliasFanoCompressedListBase {
50 EliasFanoCompressedListBase() = default;
52 template <class OtherPointer>
53 EliasFanoCompressedListBase(
54 const EliasFanoCompressedListBase<OtherPointer>& other)
56 numLowerBits(other.numLowerBits),
58 skipPointers(reinterpret_cast<Pointer>(other.skipPointers)),
59 forwardPointers(reinterpret_cast<Pointer>(other.forwardPointers)),
60 lower(reinterpret_cast<Pointer>(other.lower)),
61 upper(reinterpret_cast<Pointer>(other.upper)) { }
63 template <class T = Pointer>
64 auto free() -> decltype(::free(T(nullptr))) {
65 return ::free(data.data());
68 size_t upperSize() const {
69 return size_t(data.end() - upper);
73 uint8_t numLowerBits = 0;
75 // WARNING: EliasFanoCompressedList has no ownership of data. The 7
76 // bytes following the last byte should be readable.
77 folly::Range<Pointer> data;
79 Pointer skipPointers = nullptr;
80 Pointer forwardPointers = nullptr;
81 Pointer lower = nullptr;
82 Pointer upper = nullptr;
85 typedef EliasFanoCompressedListBase<const uint8_t*> EliasFanoCompressedList;
86 typedef EliasFanoCompressedListBase<uint8_t*> MutableEliasFanoCompressedList;
88 template <class Value,
89 class SkipValue = size_t,
90 size_t kSkipQuantum = 0, // 0 = disabled
91 size_t kForwardQuantum = 0> // 0 = disabled
92 struct EliasFanoEncoderV2 {
93 static_assert(std::is_integral<Value>::value &&
94 std::is_unsigned<Value>::value,
95 "Value should be unsigned integral");
97 typedef EliasFanoCompressedList CompressedList;
98 typedef MutableEliasFanoCompressedList MutableCompressedList;
100 typedef Value ValueType;
101 typedef SkipValue SkipValueType;
104 static constexpr size_t skipQuantum = kSkipQuantum;
105 static constexpr size_t forwardQuantum = kForwardQuantum;
107 static uint8_t defaultNumLowerBits(size_t upperBound, size_t size) {
108 if (UNLIKELY(size == 0 || upperBound < size)) {
111 // Result that should be returned is "floor(log(upperBound / size))".
112 // In order to avoid expensive division, we rely on
113 // "floor(a) - floor(b) - 1 <= floor(a - b) <= floor(a) - floor(b)".
114 // Assuming "candidate = floor(log(upperBound)) - floor(log(upperBound))",
115 // then result is either "candidate - 1" or "candidate".
116 auto candidate = folly::findLastSet(upperBound) - folly::findLastSet(size);
117 // NOTE: As size != 0, "candidate" is always < 64.
118 return (size > (upperBound >> candidate)) ? candidate - 1 : candidate;
121 // Requires: input range (begin, end) is sorted (encoding
122 // crashes if it's not).
123 // WARNING: encode() mallocates EliasFanoCompressedList::data. As
124 // EliasFanoCompressedList has no ownership of it, you need to call
125 // free() explicitly.
126 template <class RandomAccessIterator>
127 static MutableCompressedList encode(RandomAccessIterator begin,
128 RandomAccessIterator end) {
130 return MutableCompressedList();
132 EliasFanoEncoderV2 encoder(size_t(end - begin), *(end - 1));
133 for (; begin != end; ++begin) {
136 return encoder.finish();
139 explicit EliasFanoEncoderV2(const MutableCompressedList& result)
140 : lower_(result.lower),
141 upper_(result.upper),
142 skipPointers_(reinterpret_cast<SkipValueType*>(
143 result.skipPointers)),
144 forwardPointers_(reinterpret_cast<SkipValueType*>(
145 result.forwardPointers)),
147 std::fill(result.data.begin(), result.data.end(), 0);
150 EliasFanoEncoderV2(size_t size, ValueType upperBound)
151 : EliasFanoEncoderV2(
152 Layout::fromUpperBoundAndSize(upperBound, size).allocList()) { }
154 void add(ValueType value) {
155 CHECK_LT(value, std::numeric_limits<ValueType>::max());
156 CHECK_GE(value, lastValue_);
158 const auto numLowerBits = result_.numLowerBits;
159 const ValueType upperBits = value >> numLowerBits;
161 // Upper sequence consists of upperBits 0-bits and (size_ + 1) 1-bits.
162 const size_t pos = upperBits + size_;
163 upper_[pos / 8] |= 1U << (pos % 8);
164 // Append numLowerBits bits to lower sequence.
165 if (numLowerBits != 0) {
166 const ValueType lowerBits = value & ((ValueType(1) << numLowerBits) - 1);
167 writeBits56(lower_, size_ * numLowerBits, numLowerBits, lowerBits);
170 /* static */ if (skipQuantum != 0) {
171 while ((skipPointersSize_ + 1) * skipQuantum <= upperBits) {
172 // Store the number of preceding 1-bits.
173 skipPointers_[skipPointersSize_++] = SkipValue(size_);
177 /* static */ if (forwardQuantum != 0) {
178 if ((size_ + 1) % forwardQuantum == 0) {
179 const auto k = size_ / forwardQuantum;
180 // Store the number of preceding 0-bits.
181 forwardPointers_[k] = upperBits;
189 const MutableCompressedList& finish() const {
190 CHECK_EQ(size_, result_.size);
195 // Writes value (with len up to 56 bits) to data starting at pos-th bit.
196 static void writeBits56(unsigned char* data, size_t pos,
197 uint8_t len, uint64_t value) {
198 DCHECK_LE(uint32_t(len), 56);
199 DCHECK_EQ(0, value & ~((uint64_t(1) << len) - 1));
200 unsigned char* const ptr = data + (pos / 8);
201 uint64_t ptrv = folly::loadUnaligned<uint64_t>(ptr);
202 ptrv |= value << (pos % 8);
203 folly::storeUnaligned<uint64_t>(ptr, ptrv);
206 unsigned char* lower_ = nullptr;
207 unsigned char* upper_ = nullptr;
208 SkipValueType* skipPointers_ = nullptr;
209 SkipValueType* forwardPointers_ = nullptr;
211 ValueType lastValue_ = 0;
213 size_t skipPointersSize_ = 0;
215 MutableCompressedList result_;
218 template <class Value,
221 size_t kForwardQuantum>
222 struct EliasFanoEncoderV2<Value,
225 kForwardQuantum>::Layout {
226 static Layout fromUpperBoundAndSize(size_t upperBound, size_t size) {
227 // numLowerBits can be at most 56 because of detail::writeBits56.
228 const uint8_t numLowerBits = std::min(defaultNumLowerBits(upperBound,
232 // Upper bits are stored using unary delta encoding.
233 // For example, (3 5 5 9) will be encoded as 1000011001000_2.
234 const size_t upperSizeBits =
235 (upperBound >> numLowerBits) + // Number of 0-bits to be stored.
237 const size_t upper = (upperSizeBits + 7) / 8;
239 // *** Validity checks.
240 // Shift by numLowerBits must be valid.
241 CHECK_LT(numLowerBits, 8 * sizeof(Value));
242 CHECK_LT(size, std::numeric_limits<SkipValueType>::max());
243 CHECK_LT(upperBound >> numLowerBits,
244 std::numeric_limits<SkipValueType>::max());
246 return fromInternalSizes(numLowerBits, upper, size);
249 static Layout fromInternalSizes(uint8_t numLowerBits,
254 layout.numLowerBits = numLowerBits;
256 layout.lower = (numLowerBits * size + 7) / 8;
257 layout.upper = upper;
259 // *** Skip pointers.
260 // Store (1-indexed) position of every skipQuantum-th
261 // 0-bit in upper bits sequence.
262 /* static */ if (skipQuantum != 0) {
263 // 8 * upper is used here instead of upperSizeBits, as that is
264 // more serialization-friendly way (upperSizeBits doesn't need
265 // to be known by this function, unlike upper).
267 size_t numSkipPointers = (8 * upper - size) / skipQuantum;
268 layout.skipPointers = numSkipPointers * sizeof(SkipValueType);
271 // *** Forward pointers.
272 // Store (1-indexed) position of every forwardQuantum-th
273 // 1-bit in upper bits sequence.
274 /* static */ if (forwardQuantum != 0) {
275 size_t numForwardPointers = size / forwardQuantum;
276 layout.forwardPointers = numForwardPointers * sizeof(SkipValueType);
282 size_t bytes() const {
283 return lower + upper + skipPointers + forwardPointers;
286 template <class Range>
287 EliasFanoCompressedListBase<typename Range::iterator>
288 openList(Range& buf) const {
289 EliasFanoCompressedListBase<typename Range::iterator> result;
291 result.numLowerBits = numLowerBits;
292 result.data = buf.subpiece(0, bytes());
294 auto advance = [&] (size_t n) {
295 auto begin = buf.data();
300 result.skipPointers = advance(skipPointers);
301 result.forwardPointers = advance(forwardPointers);
302 result.lower = advance(lower);
303 result.upper = advance(upper);
308 MutableCompressedList allocList() const {
309 uint8_t* buf = nullptr;
310 // WARNING: Current read/write logic assumes that the 7 bytes
311 // following the last byte of lower and upper sequences are
312 // readable (stored value doesn't matter and won't be changed), so
313 // we allocate additional 7 bytes, but do not include them in size
314 // of returned value.
316 buf = static_cast<uint8_t*>(malloc(bytes() + 7));
318 folly::MutableByteRange bufRange(buf, bytes());
319 return openList(bufRange);
323 uint8_t numLowerBits = 0;
328 size_t skipPointers = 0;
329 size_t forwardPointers = 0;
334 template <class Encoder, class Instructions, class SizeType>
335 class UpperBitsReader {
336 typedef typename Encoder::SkipValueType SkipValueType;
338 typedef typename Encoder::ValueType ValueType;
340 explicit UpperBitsReader(const typename Encoder::CompressedList& list)
341 : forwardPointers_(list.forwardPointers),
342 skipPointers_(list.skipPointers),
348 block_ = start_ != nullptr ? folly::loadUnaligned<block_t>(start_) : 0;
349 position_ = std::numeric_limits<SizeType>::max();
354 SizeType position() const {
357 ValueType value() const {
362 // Skip to the first non-zero block.
363 while (block_ == 0) {
364 outer_ += sizeof(block_t);
365 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
369 size_t inner = Instructions::ctz(block_);
370 block_ = Instructions::blsr(block_);
372 return setValue(inner);
375 ValueType skip(SizeType n) {
378 position_ += n; // n 1-bits will be read.
380 // Use forward pointer.
381 if (Encoder::forwardQuantum > 0 && n > Encoder::forwardQuantum) {
382 const size_t steps = position_ / Encoder::forwardQuantum;
384 folly::loadUnaligned<SkipValueType>(
385 forwardPointers_ + (steps - 1) * sizeof(SkipValueType));
387 reposition(dest + steps * Encoder::forwardQuantum);
388 n = position_ + 1 - steps * Encoder::forwardQuantum; // n is > 0.
392 // Find necessary block.
393 while ((cnt = Instructions::popcount(block_)) < n) {
395 outer_ += sizeof(block_t);
396 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
399 // Skip to the n-th one in the block.
401 size_t inner = select64<Instructions>(block_, n - 1);
402 block_ &= (block_t(-1) << inner) << 1;
404 return setValue(inner);
407 // Skip to the first element that is >= v and located *after* the current
408 // one (so even if current value equals v, position will be increased by 1).
409 ValueType skipToNext(ValueType v) {
410 DCHECK_GE(v, value_);
413 if (Encoder::skipQuantum > 0 && v >= value_ + Encoder::skipQuantum) {
414 const size_t steps = v / Encoder::skipQuantum;
416 folly::loadUnaligned<SkipValueType>(
417 skipPointers_ + (steps - 1) * sizeof(SkipValueType));
419 reposition(dest + Encoder::skipQuantum * steps);
420 position_ = dest - 1;
422 // Correct value_ will be set during the next() call at the end.
424 // NOTE: Corresponding block of lower bits sequence may be
425 // prefetched here (via __builtin_prefetch), but experiments
426 // didn't show any significant improvements.
431 size_t skip = v - (8 * outer_ - position_ - 1);
433 constexpr size_t kBitsPerBlock = 8 * sizeof(block_t);
434 while ((cnt = Instructions::popcount(~block_)) < skip) {
436 position_ += kBitsPerBlock - cnt;
437 outer_ += sizeof(block_t);
438 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
442 auto inner = select64<Instructions>(~block_, skip - 1);
443 position_ += inner - skip + 1;
444 block_ &= block_t(-1) << inner;
451 ValueType jump(size_t n) {
452 if (Encoder::forwardQuantum == 0 || n <= Encoder::forwardQuantum) {
455 // Avoid reading the head, skip() will reposition.
456 position_ = std::numeric_limits<SizeType>::max();
461 ValueType jumpToNext(ValueType v) {
462 if (Encoder::skipQuantum == 0 || v < Encoder::skipQuantum) {
465 value_ = 0; // Avoid reading the head, skipToNext() will reposition.
467 return skipToNext(v);
470 ValueType previousValue() const {
471 DCHECK_NE(position(), std::numeric_limits<SizeType>::max());
472 DCHECK_GT(position(), 0);
475 auto inner = size_t(value_) - 8 * outer_ + position_;
476 block_t block = folly::loadUnaligned<block_t>(start_ + outer);
477 block &= (block_t(1) << inner) - 1;
479 while (UNLIKELY(block == 0)) {
481 outer -= std::min<OuterType>(sizeof(block_t), outer);
482 block = folly::loadUnaligned<block_t>(start_ + outer);
485 inner = 8 * sizeof(block_t) - 1 - Instructions::clz(block);
486 return static_cast<ValueType>(8 * outer + inner - (position_ - 1));
489 void setDone(SizeType endPos) {
494 ValueType setValue(size_t inner) {
495 value_ = static_cast<ValueType>(8 * outer_ + inner - position_);
499 void reposition(SizeType dest) {
501 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
502 block_ &= ~((block_t(1) << (dest % 8)) - 1);
505 using block_t = uint64_t;
506 // The size in bytes of the upper bits is limited by n + universe / 8,
507 // so a type that can hold either sizes or values is sufficient.
508 using OuterType = typename std::common_type<ValueType, SizeType>::type;
510 const unsigned char* const forwardPointers_;
511 const unsigned char* const skipPointers_;
512 const unsigned char* const start_;
514 SizeType position_; // Index of current value (= #reads - 1).
515 OuterType outer_; // Outer offset: number of consumed bytes in upper.
519 } // namespace detail
521 // If kUnchecked = true the caller must guarantee that all the
522 // operations return valid elements, i.e., they would never return
526 class Instructions = instructions::Default,
527 bool kUnchecked = false,
528 class SizeType = size_t>
529 class EliasFanoReader {
531 typedef Encoder EncoderType;
532 typedef typename Encoder::ValueType ValueType;
534 explicit EliasFanoReader(const typename Encoder::CompressedList& list)
538 numLowerBits_(list.numLowerBits) {
539 DCHECK(Instructions::supported());
540 // To avoid extra branching during skipTo() while reading
541 // upper sequence we need to know the last element.
542 // If kUnchecked == true, we do not check that skipTo() is called
543 // within the bounds, so we can avoid initializing lastValue_.
544 if (kUnchecked || UNLIKELY(list.size == 0)) {
548 ValueType lastUpperValue = ValueType(8 * list.upperSize() - size_);
549 auto it = list.upper + list.upperSize() - 1;
551 lastUpperValue -= 8 - folly::findLastSet(*it);
552 lastValue_ = readLowerPart(size_ - 1) | (lastUpperValue << numLowerBits_);
557 value_ = kInvalidValue;
561 if (!kUnchecked && UNLIKELY(position() + 1 >= size_)) {
565 value_ = readLowerPart(upper_.position()) |
566 (upper_.value() << numLowerBits_);
570 bool skip(SizeType n) {
573 if (kUnchecked || LIKELY(position() + n < size_)) {
574 if (LIKELY(n < kLinearScanThreshold)) {
575 for (SizeType i = 0; i < n; ++i)
580 value_ = readLowerPart(upper_.position()) |
581 (upper_.value() << numLowerBits_);
588 bool skipTo(ValueType value) {
589 // Also works when value_ == kInvalidValue.
590 if (value != kInvalidValue) { DCHECK_GE(value + 1, value_ + 1); }
592 if (!kUnchecked && value > lastValue_) {
594 } else if (value == value_) {
598 ValueType upperValue = (value >> numLowerBits_);
599 ValueType upperSkip = upperValue - upper_.value();
600 // The average density of ones in upper bits is 1/2.
601 // LIKELY here seems to make things worse, even for small skips.
602 if (upperSkip < 2 * kLinearScanThreshold) {
605 } while (UNLIKELY(upper_.value() < upperValue));
607 upper_.skipToNext(upperValue);
614 bool jump(SizeType n) {
615 if (LIKELY(n < size_)) { // Also checks that n != -1.
616 value_ = readLowerPart(n) | (upper_.jump(n + 1) << numLowerBits_);
622 bool jumpTo(ValueType value) {
623 if (!kUnchecked && value > lastValue_) {
627 upper_.jumpToNext(value >> numLowerBits_);
632 ValueType previousValue() const {
633 DCHECK_GT(position(), 0);
634 DCHECK_LT(position(), size());
635 return readLowerPart(upper_.position() - 1) |
636 (upper_.previousValue() << numLowerBits_);
639 SizeType size() const {
644 return position() < size(); // Also checks that position() != -1.
647 SizeType position() const {
648 return upper_.position();
650 ValueType value() const {
656 // Must hold kInvalidValue + 1 == 0.
657 constexpr static ValueType kInvalidValue =
658 std::numeric_limits<ValueType>::max();
661 value_ = kInvalidValue;
662 upper_.setDone(size_);
666 ValueType readLowerPart(SizeType i) const {
668 const size_t pos = i * numLowerBits_;
669 const unsigned char* ptr = lower_ + (pos / 8);
670 const uint64_t ptrv = folly::loadUnaligned<uint64_t>(ptr);
671 // This removes the branch in the fallback implementation of
672 // bzhi. The condition is verified at encoding time.
673 assume(numLowerBits_ < sizeof(ValueType) * 8);
674 return Instructions::bzhi(ptrv >> (pos % 8), numLowerBits_);
677 void iterateTo(ValueType value) {
679 value_ = readLowerPart(upper_.position()) |
680 (upper_.value() << numLowerBits_);
681 if (LIKELY(value_ >= value)) break;
686 constexpr static size_t kLinearScanThreshold = 8;
688 detail::UpperBitsReader<Encoder, Instructions, SizeType> upper_;
689 const uint8_t* lower_;
691 ValueType value_ = kInvalidValue;
692 ValueType lastValue_;
693 uint8_t numLowerBits_;