2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * @author Philip Pronin (philipp@fb.com)
20 * Based on the paper by Sebastiano Vigna,
21 * "Quasi-succinct indices" (arxiv:1206.4300).
29 #include <type_traits>
31 #include <folly/Assume.h>
32 #include <folly/Bits.h>
33 #include <folly/Likely.h>
34 #include <folly/Portability.h>
35 #include <folly/Range.h>
36 #include <folly/experimental/CodingDetail.h>
37 #include <folly/experimental/Instructions.h>
38 #include <folly/experimental/Select64.h>
39 #include <glog/logging.h>
42 #error EliasFanoCoding.h requires x86_64
45 namespace folly { namespace compression {
47 static_assert(kIsLittleEndian, "EliasFanoCoding.h requires little endianness");
49 template <class Pointer>
50 struct EliasFanoCompressedListBase {
51 EliasFanoCompressedListBase() = default;
53 template <class OtherPointer>
54 EliasFanoCompressedListBase(
55 const EliasFanoCompressedListBase<OtherPointer>& other)
57 numLowerBits(other.numLowerBits),
59 skipPointers(reinterpret_cast<Pointer>(other.skipPointers)),
60 forwardPointers(reinterpret_cast<Pointer>(other.forwardPointers)),
61 lower(reinterpret_cast<Pointer>(other.lower)),
62 upper(reinterpret_cast<Pointer>(other.upper)) { }
64 template <class T = Pointer>
65 auto free() -> decltype(::free(T(nullptr))) {
66 return ::free(data.data());
69 size_t upperSize() const {
70 return size_t(data.end() - upper);
74 uint8_t numLowerBits = 0;
76 // WARNING: EliasFanoCompressedList has no ownership of data. The 7
77 // bytes following the last byte should be readable.
78 folly::Range<Pointer> data;
80 Pointer skipPointers = nullptr;
81 Pointer forwardPointers = nullptr;
82 Pointer lower = nullptr;
83 Pointer upper = nullptr;
86 typedef EliasFanoCompressedListBase<const uint8_t*> EliasFanoCompressedList;
87 typedef EliasFanoCompressedListBase<uint8_t*> MutableEliasFanoCompressedList;
89 template <class Value,
90 class SkipValue = size_t,
91 size_t kSkipQuantum = 0, // 0 = disabled
92 size_t kForwardQuantum = 0> // 0 = disabled
93 struct EliasFanoEncoderV2 {
94 static_assert(std::is_integral<Value>::value &&
95 std::is_unsigned<Value>::value,
96 "Value should be unsigned integral");
98 typedef EliasFanoCompressedList CompressedList;
99 typedef MutableEliasFanoCompressedList MutableCompressedList;
101 typedef Value ValueType;
102 typedef SkipValue SkipValueType;
105 static constexpr size_t skipQuantum = kSkipQuantum;
106 static constexpr size_t forwardQuantum = kForwardQuantum;
108 static uint8_t defaultNumLowerBits(size_t upperBound, size_t size) {
109 if (UNLIKELY(size == 0 || upperBound < size)) {
112 // Result that should be returned is "floor(log(upperBound / size))".
113 // In order to avoid expensive division, we rely on
114 // "floor(a) - floor(b) - 1 <= floor(a - b) <= floor(a) - floor(b)".
115 // Assuming "candidate = floor(log(upperBound)) - floor(log(upperBound))",
116 // then result is either "candidate - 1" or "candidate".
117 auto candidate = folly::findLastSet(upperBound) - folly::findLastSet(size);
118 // NOTE: As size != 0, "candidate" is always < 64.
119 return (size > (upperBound >> candidate)) ? candidate - 1 : candidate;
122 // Requires: input range (begin, end) is sorted (encoding
123 // crashes if it's not).
124 // WARNING: encode() mallocates EliasFanoCompressedList::data. As
125 // EliasFanoCompressedList has no ownership of it, you need to call
126 // free() explicitly.
127 template <class RandomAccessIterator>
128 static MutableCompressedList encode(RandomAccessIterator begin,
129 RandomAccessIterator end) {
131 return MutableCompressedList();
133 EliasFanoEncoderV2 encoder(size_t(end - begin), *(end - 1));
134 for (; begin != end; ++begin) {
137 return encoder.finish();
140 explicit EliasFanoEncoderV2(const MutableCompressedList& result)
141 : lower_(result.lower),
142 upper_(result.upper),
143 skipPointers_(reinterpret_cast<SkipValueType*>(
144 result.skipPointers)),
145 forwardPointers_(reinterpret_cast<SkipValueType*>(
146 result.forwardPointers)),
148 std::fill(result.data.begin(), result.data.end(), '\0');
151 EliasFanoEncoderV2(size_t size, ValueType upperBound)
152 : EliasFanoEncoderV2(
153 Layout::fromUpperBoundAndSize(upperBound, size).allocList()) { }
155 void add(ValueType value) {
156 CHECK_LT(value, std::numeric_limits<ValueType>::max());
157 CHECK_GE(value, lastValue_);
159 const auto numLowerBits = result_.numLowerBits;
160 const ValueType upperBits = value >> numLowerBits;
162 // Upper sequence consists of upperBits 0-bits and (size_ + 1) 1-bits.
163 const size_t pos = upperBits + size_;
164 upper_[pos / 8] |= 1U << (pos % 8);
165 // Append numLowerBits bits to lower sequence.
166 if (numLowerBits != 0) {
167 const ValueType lowerBits = value & ((ValueType(1) << numLowerBits) - 1);
168 writeBits56(lower_, size_ * numLowerBits, numLowerBits, lowerBits);
171 /* static */ if (skipQuantum != 0) {
172 while ((skipPointersSize_ + 1) * skipQuantum <= upperBits) {
173 // Store the number of preceding 1-bits.
174 skipPointers_[skipPointersSize_++] = SkipValue(size_);
178 /* static */ if (forwardQuantum != 0) {
179 if ((size_ + 1) % forwardQuantum == 0) {
180 const auto k = size_ / forwardQuantum;
181 // Store the number of preceding 0-bits.
182 forwardPointers_[k] = upperBits;
190 const MutableCompressedList& finish() const {
191 CHECK_EQ(size_, result_.size);
196 // Writes value (with len up to 56 bits) to data starting at pos-th bit.
197 static void writeBits56(unsigned char* data, size_t pos,
198 uint8_t len, uint64_t value) {
199 DCHECK_LE(uint32_t(len), 56);
200 DCHECK_EQ(0, value & ~((uint64_t(1) << len) - 1));
201 unsigned char* const ptr = data + (pos / 8);
202 uint64_t ptrv = folly::loadUnaligned<uint64_t>(ptr);
203 ptrv |= value << (pos % 8);
204 folly::storeUnaligned<uint64_t>(ptr, ptrv);
207 unsigned char* lower_ = nullptr;
208 unsigned char* upper_ = nullptr;
209 SkipValueType* skipPointers_ = nullptr;
210 SkipValueType* forwardPointers_ = nullptr;
212 ValueType lastValue_ = 0;
214 size_t skipPointersSize_ = 0;
216 MutableCompressedList result_;
219 template <class Value,
222 size_t kForwardQuantum>
223 struct EliasFanoEncoderV2<Value,
226 kForwardQuantum>::Layout {
227 static Layout fromUpperBoundAndSize(size_t upperBound, size_t size) {
228 // numLowerBits can be at most 56 because of detail::writeBits56.
229 const uint8_t numLowerBits = std::min(defaultNumLowerBits(upperBound,
233 // Upper bits are stored using unary delta encoding.
234 // For example, (3 5 5 9) will be encoded as 1000011001000_2.
235 const size_t upperSizeBits =
236 (upperBound >> numLowerBits) + // Number of 0-bits to be stored.
238 const size_t upper = (upperSizeBits + 7) / 8;
240 // *** Validity checks.
241 // Shift by numLowerBits must be valid.
242 CHECK_LT(numLowerBits, 8 * sizeof(Value));
243 CHECK_LT(size, std::numeric_limits<SkipValueType>::max());
244 CHECK_LT(upperBound >> numLowerBits,
245 std::numeric_limits<SkipValueType>::max());
247 return fromInternalSizes(numLowerBits, upper, size);
250 static Layout fromInternalSizes(uint8_t numLowerBits,
255 layout.numLowerBits = numLowerBits;
257 layout.lower = (numLowerBits * size + 7) / 8;
258 layout.upper = upper;
260 // *** Skip pointers.
261 // Store (1-indexed) position of every skipQuantum-th
262 // 0-bit in upper bits sequence.
263 /* static */ if (skipQuantum != 0) {
264 // 8 * upper is used here instead of upperSizeBits, as that is
265 // more serialization-friendly way (upperSizeBits doesn't need
266 // to be known by this function, unlike upper).
268 size_t numSkipPointers = (8 * upper - size) / skipQuantum;
269 layout.skipPointers = numSkipPointers * sizeof(SkipValueType);
272 // *** Forward pointers.
273 // Store (1-indexed) position of every forwardQuantum-th
274 // 1-bit in upper bits sequence.
275 /* static */ if (forwardQuantum != 0) {
276 size_t numForwardPointers = size / forwardQuantum;
277 layout.forwardPointers = numForwardPointers * sizeof(SkipValueType);
283 size_t bytes() const {
284 return lower + upper + skipPointers + forwardPointers;
287 template <class Range>
288 EliasFanoCompressedListBase<typename Range::iterator>
289 openList(Range& buf) const {
290 EliasFanoCompressedListBase<typename Range::iterator> result;
292 result.numLowerBits = numLowerBits;
293 result.data = buf.subpiece(0, bytes());
295 auto advance = [&] (size_t n) {
296 auto begin = buf.data();
301 result.skipPointers = advance(skipPointers);
302 result.forwardPointers = advance(forwardPointers);
303 result.lower = advance(lower);
304 result.upper = advance(upper);
309 MutableCompressedList allocList() const {
310 uint8_t* buf = nullptr;
311 // WARNING: Current read/write logic assumes that the 7 bytes
312 // following the last byte of lower and upper sequences are
313 // readable (stored value doesn't matter and won't be changed), so
314 // we allocate additional 7 bytes, but do not include them in size
315 // of returned value.
317 buf = static_cast<uint8_t*>(malloc(bytes() + 7));
319 folly::MutableByteRange bufRange(buf, bytes());
320 return openList(bufRange);
324 uint8_t numLowerBits = 0;
329 size_t skipPointers = 0;
330 size_t forwardPointers = 0;
335 template <class Encoder, class Instructions, class SizeType>
336 class UpperBitsReader : ForwardPointers<Encoder::forwardQuantum>,
337 SkipPointers<Encoder::skipQuantum> {
338 typedef typename Encoder::SkipValueType SkipValueType;
340 typedef typename Encoder::ValueType ValueType;
342 explicit UpperBitsReader(const typename Encoder::CompressedList& list)
343 : ForwardPointers<Encoder::forwardQuantum>(list.forwardPointers),
344 SkipPointers<Encoder::skipQuantum>(list.skipPointers),
350 block_ = start_ != nullptr ? folly::loadUnaligned<block_t>(start_) : 0;
351 position_ = std::numeric_limits<SizeType>::max();
356 SizeType position() const {
359 ValueType value() const {
364 // Skip to the first non-zero block.
365 while (block_ == 0) {
366 outer_ += sizeof(block_t);
367 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
371 size_t inner = Instructions::ctz(block_);
372 block_ = Instructions::blsr(block_);
374 return setValue(inner);
377 ValueType skip(SizeType n) {
380 position_ += n; // n 1-bits will be read.
382 // Use forward pointer.
383 if (Encoder::forwardQuantum > 0 && n > Encoder::forwardQuantum) {
384 const size_t steps = position_ / Encoder::forwardQuantum;
385 const size_t dest = folly::loadUnaligned<SkipValueType>(
386 this->forwardPointers_ + (steps - 1) * sizeof(SkipValueType));
388 reposition(dest + steps * Encoder::forwardQuantum);
389 n = position_ + 1 - steps * Encoder::forwardQuantum; // n is > 0.
393 // Find necessary block.
394 while ((cnt = Instructions::popcount(block_)) < n) {
396 outer_ += sizeof(block_t);
397 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
400 // Skip to the n-th one in the block.
402 size_t inner = select64<Instructions>(block_, n - 1);
403 block_ &= (block_t(-1) << inner) << 1;
405 return setValue(inner);
408 // Skip to the first element that is >= v and located *after* the current
409 // one (so even if current value equals v, position will be increased by 1).
410 ValueType skipToNext(ValueType v) {
411 DCHECK_GE(v, value_);
414 if (Encoder::skipQuantum > 0 && v >= value_ + Encoder::skipQuantum) {
415 const size_t steps = v / Encoder::skipQuantum;
416 const size_t dest = folly::loadUnaligned<SkipValueType>(
417 this->skipPointers_ + (steps - 1) * sizeof(SkipValueType));
419 reposition(dest + Encoder::skipQuantum * steps);
420 position_ = dest - 1;
422 // Correct value_ will be set during the next() call at the end.
424 // NOTE: Corresponding block of lower bits sequence may be
425 // prefetched here (via __builtin_prefetch), but experiments
426 // didn't show any significant improvements.
431 size_t skip = v - (8 * outer_ - position_ - 1);
433 constexpr size_t kBitsPerBlock = 8 * sizeof(block_t);
434 while ((cnt = Instructions::popcount(~block_)) < skip) {
436 position_ += kBitsPerBlock - cnt;
437 outer_ += sizeof(block_t);
438 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
442 auto inner = select64<Instructions>(~block_, skip - 1);
443 position_ += inner - skip + 1;
444 block_ &= block_t(-1) << inner;
451 ValueType jump(size_t n) {
452 if (Encoder::forwardQuantum == 0 || n <= Encoder::forwardQuantum) {
455 // Avoid reading the head, skip() will reposition.
456 position_ = std::numeric_limits<SizeType>::max();
461 ValueType jumpToNext(ValueType v) {
462 if (Encoder::skipQuantum == 0 || v < Encoder::skipQuantum) {
465 value_ = 0; // Avoid reading the head, skipToNext() will reposition.
467 return skipToNext(v);
470 ValueType previousValue() const {
471 DCHECK_NE(position(), std::numeric_limits<SizeType>::max());
472 DCHECK_GT(position(), 0);
475 auto inner = size_t(value_) - 8 * outer_ + position_;
476 block_t block = folly::loadUnaligned<block_t>(start_ + outer);
477 block &= (block_t(1) << inner) - 1;
479 while (UNLIKELY(block == 0)) {
481 outer -= std::min<OuterType>(sizeof(block_t), outer);
482 block = folly::loadUnaligned<block_t>(start_ + outer);
485 inner = 8 * sizeof(block_t) - 1 - Instructions::clz(block);
486 return static_cast<ValueType>(8 * outer + inner - (position_ - 1));
489 void setDone(SizeType endPos) {
494 ValueType setValue(size_t inner) {
495 value_ = static_cast<ValueType>(8 * outer_ + inner - position_);
499 void reposition(SizeType dest) {
501 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
502 block_ &= ~((block_t(1) << (dest % 8)) - 1);
505 using block_t = uint64_t;
506 // The size in bytes of the upper bits is limited by n + universe / 8,
507 // so a type that can hold either sizes or values is sufficient.
508 using OuterType = typename std::common_type<ValueType, SizeType>::type;
510 const unsigned char* const start_;
512 SizeType position_; // Index of current value (= #reads - 1).
513 OuterType outer_; // Outer offset: number of consumed bytes in upper.
517 } // namespace detail
519 // If kUnchecked = true the caller must guarantee that all the
520 // operations return valid elements, i.e., they would never return
524 class Instructions = instructions::Default,
525 bool kUnchecked = false,
526 class SizeType = size_t>
527 class EliasFanoReader {
529 typedef Encoder EncoderType;
530 typedef typename Encoder::ValueType ValueType;
532 explicit EliasFanoReader(const typename Encoder::CompressedList& list)
536 numLowerBits_(list.numLowerBits) {
537 DCHECK(Instructions::supported());
538 // To avoid extra branching during skipTo() while reading
539 // upper sequence we need to know the last element.
540 // If kUnchecked == true, we do not check that skipTo() is called
541 // within the bounds, so we can avoid initializing lastValue_.
542 if (kUnchecked || UNLIKELY(list.size == 0)) {
546 ValueType lastUpperValue = ValueType(8 * list.upperSize() - size_);
547 auto it = list.upper + list.upperSize() - 1;
549 lastUpperValue -= 8 - folly::findLastSet(*it);
550 lastValue_ = readLowerPart(size_ - 1) | (lastUpperValue << numLowerBits_);
555 value_ = kInvalidValue;
559 if (!kUnchecked && UNLIKELY(position() + 1 >= size_)) {
563 value_ = readLowerPart(upper_.position()) |
564 (upper_.value() << numLowerBits_);
568 bool skip(SizeType n) {
571 if (kUnchecked || LIKELY(position() + n < size_)) {
572 if (LIKELY(n < kLinearScanThreshold)) {
573 for (SizeType i = 0; i < n; ++i)
578 value_ = readLowerPart(upper_.position()) |
579 (upper_.value() << numLowerBits_);
586 bool skipTo(ValueType value) {
587 // Also works when value_ == kInvalidValue.
588 if (value != kInvalidValue) { DCHECK_GE(value + 1, value_ + 1); }
590 if (!kUnchecked && value > lastValue_) {
592 } else if (value == value_) {
596 ValueType upperValue = (value >> numLowerBits_);
597 ValueType upperSkip = upperValue - upper_.value();
598 // The average density of ones in upper bits is 1/2.
599 // LIKELY here seems to make things worse, even for small skips.
600 if (upperSkip < 2 * kLinearScanThreshold) {
603 } while (UNLIKELY(upper_.value() < upperValue));
605 upper_.skipToNext(upperValue);
612 bool jump(SizeType n) {
613 if (LIKELY(n < size_)) { // Also checks that n != -1.
614 value_ = readLowerPart(n) | (upper_.jump(n + 1) << numLowerBits_);
620 bool jumpTo(ValueType value) {
621 if (!kUnchecked && value > lastValue_) {
625 upper_.jumpToNext(value >> numLowerBits_);
630 ValueType previousValue() const {
631 DCHECK_GT(position(), 0);
632 DCHECK_LT(position(), size());
633 return readLowerPart(upper_.position() - 1) |
634 (upper_.previousValue() << numLowerBits_);
637 SizeType size() const {
642 return position() < size(); // Also checks that position() != -1.
645 SizeType position() const {
646 return upper_.position();
648 ValueType value() const {
654 // Must hold kInvalidValue + 1 == 0.
655 constexpr static ValueType kInvalidValue =
656 std::numeric_limits<ValueType>::max();
659 value_ = kInvalidValue;
660 upper_.setDone(size_);
664 ValueType readLowerPart(SizeType i) const {
666 const size_t pos = i * numLowerBits_;
667 const unsigned char* ptr = lower_ + (pos / 8);
668 const uint64_t ptrv = folly::loadUnaligned<uint64_t>(ptr);
669 // This removes the branch in the fallback implementation of
670 // bzhi. The condition is verified at encoding time.
671 assume(numLowerBits_ < sizeof(ValueType) * 8);
672 return Instructions::bzhi(ptrv >> (pos % 8), numLowerBits_);
675 void iterateTo(ValueType value) {
677 value_ = readLowerPart(upper_.position()) |
678 (upper_.value() << numLowerBits_);
679 if (LIKELY(value_ >= value)) break;
684 constexpr static size_t kLinearScanThreshold = 8;
686 detail::UpperBitsReader<Encoder, Instructions, SizeType> upper_;
687 const uint8_t* lower_;
689 ValueType value_ = kInvalidValue;
690 ValueType lastValue_;
691 uint8_t numLowerBits_;