2 * Copyright 2014 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * @author Philip Pronin (philipp@fb.com)
20 * Based on the paper by Sebastiano Vigna,
21 * "Quasi-succinct indices" (arxiv:1206.4300).
24 #ifndef FOLLY_EXPERIMENTAL_ELIAS_FANO_CODING_H
25 #define FOLLY_EXPERIMENTAL_ELIAS_FANO_CODING_H
28 #error EliasFanoCoding.h requires GCC
31 #if !defined(__x86_64__)
32 #error EliasFanoCoding.h requires x86_64
38 #include <type_traits>
39 #include <boost/noncopyable.hpp>
40 #include <glog/logging.h>
42 #include "folly/Bits.h"
43 #include "folly/CpuId.h"
44 #include "folly/Likely.h"
45 #include "folly/Range.h"
47 #if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
48 #error EliasFanoCoding.h requires little endianness
51 namespace folly { namespace compression {
53 struct EliasFanoCompressedList {
54 EliasFanoCompressedList()
55 : size(0), numLowerBits(0) { }
58 ::free(const_cast<unsigned char*>(lower.data()));
59 ::free(const_cast<unsigned char*>(upper.data()));
60 ::free(const_cast<unsigned char*>(skipPointers.data()));
61 ::free(const_cast<unsigned char*>(forwardPointers.data()));
67 // WARNING: EliasFanoCompressedList has no ownership of
68 // lower, upper, skipPointers and forwardPointers.
69 // The 7 bytes following the last byte of lower and upper
70 // sequences should be readable.
71 folly::ByteRange lower;
72 folly::ByteRange upper;
74 folly::ByteRange skipPointers;
75 folly::ByteRange forwardPointers;
79 // In version 1 skip / forward pointers encoding has been changed,
80 // so SkipValue = uint32_t is able to address up to ~4B elements,
81 // instead of only ~2B.
82 template <class Value,
83 class SkipValue = size_t,
84 size_t kSkipQuantum = 0, // 0 = disabled
85 size_t kForwardQuantum = 0, // 0 = disabled
87 struct EliasFanoEncoder {
88 static_assert(std::is_integral<Value>::value &&
89 std::is_unsigned<Value>::value,
90 "Value should be unsigned integral");
92 typedef EliasFanoCompressedList CompressedList;
94 typedef Value ValueType;
95 typedef SkipValue SkipValueType;
97 static constexpr size_t skipQuantum = kSkipQuantum;
98 static constexpr size_t forwardQuantum = kForwardQuantum;
99 static constexpr size_t version = kVersion;
101 static uint8_t defaultNumLowerBits(size_t upperBound, size_t size) {
102 if (size == 0 || upperBound < size) {
105 // floor(log(upperBound / size));
106 return folly::findLastSet(upperBound / size) - 1;
109 // WARNING: encode() mallocates lower, upper, skipPointers
110 // and forwardPointers. As EliasFanoCompressedList has
111 // no ownership of them, you need to call free() explicitly.
112 static void encode(const ValueType* list, size_t size,
113 EliasFanoCompressedList& result) {
114 encode(list, list + size, result);
117 // Range (begin, end) should be sorted.
118 template <class RandomAccessIterator>
119 static void encode(RandomAccessIterator begin,
120 RandomAccessIterator end,
121 EliasFanoCompressedList& result) {
122 CHECK(std::is_sorted(begin, end));
125 const size_t size = end - begin;
128 result = EliasFanoCompressedList();
132 const ValueType upperBound = list[size - 1];
133 uint8_t numLowerBits = defaultNumLowerBits(upperBound, size);
135 // This is detail::writeBits56 limitation.
136 numLowerBits = std::min<uint8_t>(numLowerBits, 56);
137 CHECK_LT(numLowerBits, 8 * sizeof(Value)); // As we shift by numLowerBits.
139 // WARNING: Current read/write logic assumes that the 7 bytes
140 // following the last byte of lower and upper sequences are
141 // readable (stored value doesn't matter and won't be changed),
142 // so we allocate additional 7B, but do not include them in size
143 // of returned value.
146 const size_t lowerSize = (numLowerBits * size + 7) / 8;
147 unsigned char* lower = nullptr;
148 if (lowerSize > 0) { // numLowerBits != 0
149 lower = static_cast<unsigned char*>(calloc(lowerSize + 7, 1));
150 const ValueType lowerMask = (ValueType(1) << numLowerBits) - 1;
151 for (size_t i = 0; i < size; ++i) {
152 const ValueType lowerBits = list[i] & lowerMask;
153 writeBits56(lower, i * numLowerBits, numLowerBits, lowerBits);
158 // Upper bits are stored using unary delta encoding.
159 // For example, (3 5 5 9) will be encoded as 1000011001000_2.
160 const size_t upperSizeBits =
161 (upperBound >> numLowerBits) + // Number of 0-bits to be stored.
163 const size_t upperSize = (upperSizeBits + 7) / 8;
164 unsigned char* const upper =
165 static_cast<unsigned char*>(calloc(upperSize + 7, 1));
166 for (size_t i = 0; i < size; ++i) {
167 const ValueType upperBits = list[i] >> numLowerBits;
168 const size_t pos = upperBits + i; // upperBits 0-bits and (i + 1) 1-bits.
169 upper[pos / 8] |= 1U << (pos % 8);
172 // *** Skip pointers.
173 // Store (1-indexed) position of every skipQuantum-th
174 // 0-bit in upper bits sequence.
175 SkipValueType* skipPointers = nullptr;
176 size_t numSkipPointers = 0;
177 /* static */ if (skipQuantum != 0) {
178 // Workaround to avoid 'division by zero' compile-time error.
179 constexpr size_t q = skipQuantum ?: 1;
180 /* static */ if (kVersion > 0) {
181 CHECK_LT(size, std::numeric_limits<SkipValueType>::max());
183 CHECK_LT(upperSizeBits, std::numeric_limits<SkipValueType>::max());
185 // 8 * upperSize is used here instead of upperSizeBits, as that is
186 // more serialization-friendly way (upperSizeBits isn't known outside of
187 // this function, unlike upperSize; thus numSkipPointers could easily be
188 // deduced from upperSize).
189 numSkipPointers = (8 * upperSize - size) / q;
190 skipPointers = static_cast<SkipValueType*>(
193 : calloc(numSkipPointers, sizeof(SkipValueType)));
195 for (size_t i = 0, pos = 0; i < size; ++i) {
196 const ValueType upperBits = list[i] >> numLowerBits;
197 for (; (pos + 1) * q <= upperBits; ++pos) {
198 /* static */ if (kVersion > 0) {
199 // Since version 1, just the number of preceding 1-bits is stored.
200 skipPointers[pos] = i;
202 skipPointers[pos] = i + (pos + 1) * q;
208 // *** Forward pointers.
209 // Store (1-indexed) position of every forwardQuantum-th
210 // 1-bit in upper bits sequence.
211 SkipValueType* forwardPointers = nullptr;
212 size_t numForwardPointers = 0;
213 /* static */ if (forwardQuantum != 0) {
214 // Workaround to avoid 'division by zero' compile-time error.
215 constexpr size_t q = forwardQuantum ?: 1;
216 CHECK_LT(upperSizeBits, std::numeric_limits<SkipValueType>::max());
218 numForwardPointers = size / q;
219 forwardPointers = static_cast<SkipValueType*>(
220 numForwardPointers == 0
222 : malloc(numForwardPointers * sizeof(SkipValueType)));
224 for (size_t i = q - 1, pos = 0; i < size; i += q, ++pos) {
225 const ValueType upperBits = list[i] >> numLowerBits;
226 /* static */ if (kVersion > 0) {
227 // Since version 1, just the number of preceding 0-bits is stored.
228 forwardPointers[pos] = upperBits;
230 forwardPointers[pos] = upperBits + i + 1;
237 result.numLowerBits = numLowerBits;
238 result.lower.reset(lower, lowerSize);
239 result.upper.reset(upper, upperSize);
240 result.skipPointers.reset(
241 reinterpret_cast<unsigned char*>(skipPointers),
242 numSkipPointers * sizeof(SkipValueType));
243 result.forwardPointers.reset(
244 reinterpret_cast<unsigned char*>(forwardPointers),
245 numForwardPointers * sizeof(SkipValueType));
249 // Writes value (with len up to 56 bits) to data starting at pos-th bit.
250 static void writeBits56(unsigned char* data, size_t pos,
251 uint8_t len, uint64_t value) {
252 DCHECK_LE(uint32_t(len), 56);
253 DCHECK_EQ(0, value & ~((uint64_t(1) << len) - 1));
254 unsigned char* const ptr = data + (pos / 8);
255 uint64_t ptrv = folly::loadUnaligned<uint64_t>(ptr);
256 ptrv |= value << (pos % 8);
257 folly::storeUnaligned<uint64_t>(ptr, ptrv);
261 // NOTE: It's recommended to compile EF coding with -msse4.2, starting
262 // with Nehalem, Intel CPUs support POPCNT instruction and gcc will emit
263 // it for __builtin_popcountll intrinsic.
264 // But we provide an alternative way for the client code: it can switch to
265 // the appropriate version of EliasFanoReader<> in realtime (client should
266 // implement this switching logic itself) by specifying instruction set to
268 namespace instructions {
271 static bool supported() {
274 static inline uint64_t popcount(uint64_t value) {
275 return __builtin_popcountll(value);
277 static inline int ctz(uint64_t value) {
279 return __builtin_ctzll(value);
283 struct Fast : public Default {
284 static bool supported() {
286 return cpuId.popcnt();
288 static inline uint64_t popcount(uint64_t value) {
290 asm ("popcntq %1, %0" : "=r" (result) : "r" (value));
295 } // namespace instructions
299 template <class Encoder, class Instructions>
300 class UpperBitsReader {
301 typedef typename Encoder::SkipValueType SkipValueType;
303 typedef typename Encoder::ValueType ValueType;
305 explicit UpperBitsReader(const EliasFanoCompressedList& list)
306 : forwardPointers_(list.forwardPointers.data()),
307 skipPointers_(list.skipPointers.data()),
308 start_(list.upper.data()),
309 block_(start_ != nullptr ? folly::loadUnaligned<block_t>(start_) : 0),
310 outer_(0), // outer offset: number of consumed bytes in upper.
311 inner_(-1), // inner offset: (bit) position in current block.
312 position_(-1), // index of current value (= #reads - 1).
315 size_t position() const { return position_; }
316 ValueType value() const { return value_; }
319 // Skip to the first non-zero block.
320 while (block_ == 0) {
321 outer_ += sizeof(block_t);
322 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
326 inner_ = Instructions::ctz(block_);
327 block_ &= block_ - 1;
332 ValueType skip(size_t n) {
335 position_ += n; // n 1-bits will be read.
337 // Use forward pointer.
338 if (Encoder::forwardQuantum > 0 && n > Encoder::forwardQuantum) {
339 // Workaround to avoid 'division by zero' compile-time error.
340 constexpr size_t q = Encoder::forwardQuantum ?: 1;
342 const size_t steps = position_ / q;
344 folly::loadUnaligned<SkipValueType>(
345 forwardPointers_ + (steps - 1) * sizeof(SkipValueType));
347 /* static */ if (Encoder::version > 0) {
348 reposition(dest + steps * q);
352 n = position_ + 1 - steps * q; // n is > 0.
353 // correct inner_ will be set at the end.
357 // Find necessary block.
358 while ((cnt = Instructions::popcount(block_)) < n) {
360 outer_ += sizeof(block_t);
361 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
364 // NOTE: Trying to skip half-block here didn't show any
365 // performance improvements.
369 // Kill n - 1 least significant 1-bits.
370 for (size_t i = 0; i < n - 1; ++i) {
371 block_ &= block_ - 1;
374 inner_ = Instructions::ctz(block_);
375 block_ &= block_ - 1;
380 // Skip to the first element that is >= v and located *after* the current
381 // one (so even if current value equals v, position will be increased by 1).
382 ValueType skipToNext(ValueType v) {
383 DCHECK_GE(v, value_);
386 if (Encoder::skipQuantum > 0 && v >= value_ + Encoder::skipQuantum) {
387 // Workaround to avoid 'division by zero' compile-time error.
388 constexpr size_t q = Encoder::skipQuantum ?: 1;
390 const size_t steps = v / q;
392 folly::loadUnaligned<SkipValueType>(
393 skipPointers_ + (steps - 1) * sizeof(SkipValueType));
395 /* static */ if (Encoder::version > 0) {
396 reposition(dest + q * steps);
397 position_ = dest - 1;
400 position_ = dest - q * steps - 1;
402 // Correct inner_ and value_ will be set during the next()
405 // NOTE: Corresponding block of lower bits sequence may be
406 // prefetched here (via __builtin_prefetch), but experiments
407 // didn't show any significant improvements.
412 size_t skip = v - (8 * outer_ - position_ - 1);
414 constexpr size_t kBitsPerBlock = 8 * sizeof(block_t);
415 while ((cnt = Instructions::popcount(~block_)) < skip) {
417 position_ += kBitsPerBlock - cnt;
418 outer_ += sizeof(block_t);
419 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
422 // Try to skip half-block.
423 constexpr size_t kBitsPerHalfBlock = 4 * sizeof(block_t);
424 constexpr block_t halfBlockMask = (block_t(1) << kBitsPerHalfBlock) - 1;
425 if ((cnt = Instructions::popcount(~block_ & halfBlockMask)) < skip) {
426 position_ += kBitsPerHalfBlock - cnt;
427 block_ &= ~halfBlockMask;
430 // Just skip until we see expected value.
431 while (next() < v) { }
436 ValueType setValue() {
437 value_ = static_cast<ValueType>(8 * outer_ + inner_ - position_);
441 void reposition(size_t dest) {
443 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
444 block_ &= ~((block_t(1) << (dest % 8)) - 1);
447 typedef unsigned long long block_t;
448 const unsigned char* const forwardPointers_;
449 const unsigned char* const skipPointers_;
450 const unsigned char* const start_;
458 } // namespace detail
460 template <class Encoder,
461 class Instructions = instructions::Default>
462 class EliasFanoReader : private boost::noncopyable {
464 typedef typename Encoder::ValueType ValueType;
466 explicit EliasFanoReader(const EliasFanoCompressedList& list)
468 lowerMask_((ValueType(1) << list_.numLowerBits) - 1),
472 DCHECK(Instructions::supported());
473 // To avoid extra branching during skipTo() while reading
474 // upper sequence we need to know the last element.
475 if (UNLIKELY(list_.size == 0)) {
479 ValueType lastUpperValue = 8 * list_.upper.size() - list_.size;
480 auto it = list_.upper.end() - 1;
482 lastUpperValue -= 8 - folly::findLastSet(*it);
483 lastValue_ = readLowerPart(list_.size - 1) |
484 (lastUpperValue << list_.numLowerBits);
487 size_t size() const { return list_.size; }
489 size_t position() const { return progress_ - 1; }
490 ValueType value() const { return value_; }
493 if (UNLIKELY(progress_ == list_.size)) {
494 value_ = std::numeric_limits<ValueType>::max();
497 value_ = readLowerPart(progress_) |
498 (upper_.next() << list_.numLowerBits);
503 bool skip(size_t n) {
507 if (LIKELY(progress_ < list_.size)) {
508 value_ = readLowerPart(progress_) |
509 (upper_.skip(n) << list_.numLowerBits);
514 progress_ = list_.size;
515 value_ = std::numeric_limits<ValueType>::max();
519 bool skipTo(ValueType value) {
520 DCHECK_GE(value, value_);
521 if (value <= value_) {
524 if (value > lastValue_) {
525 progress_ = list_.size;
526 value_ = std::numeric_limits<ValueType>::max();
530 upper_.skipToNext(value >> list_.numLowerBits);
531 progress_ = upper_.position();
532 value_ = readLowerPart(progress_) |
533 (upper_.value() << list_.numLowerBits);
535 while (value_ < value) {
536 value_ = readLowerPart(progress_) |
537 (upper_.next() << list_.numLowerBits);
545 ValueType readLowerPart(size_t i) const {
546 const size_t pos = i * list_.numLowerBits;
547 const unsigned char* ptr = list_.lower.data() + (pos / 8);
548 const uint64_t ptrv = folly::loadUnaligned<uint64_t>(ptr);
549 return lowerMask_ & (ptrv >> (pos % 8));
552 const EliasFanoCompressedList list_;
553 const ValueType lowerMask_;
554 detail::UpperBitsReader<Encoder, Instructions> upper_;
557 ValueType lastValue_;
562 #endif // FOLLY_EXPERIMENTAL_ELIAS_FANO_CODING_H