2 * Copyright 2014 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * @author Philip Pronin (philipp@fb.com)
20 * Based on the paper by Sebastiano Vigna,
21 * "Quasi-succinct indices" (arxiv:1206.4300).
24 #ifndef FOLLY_EXPERIMENTAL_ELIAS_FANO_CODING_H
25 #define FOLLY_EXPERIMENTAL_ELIAS_FANO_CODING_H
28 #error EliasFanoCoding.h requires GCC
31 #if !defined(__x86_64__)
32 #error EliasFanoCoding.h requires x86_64
38 #include <type_traits>
39 #include <boost/noncopyable.hpp>
40 #include <glog/logging.h>
42 #include "folly/Bits.h"
43 #include "folly/CpuId.h"
44 #include "folly/Likely.h"
45 #include "folly/Range.h"
47 #if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
48 #error EliasFanoCoding.h requires little endianness
51 namespace folly { namespace compression {
53 struct EliasFanoCompressedList {
54 EliasFanoCompressedList()
55 : size(0), numLowerBits(0) { }
58 ::free(const_cast<unsigned char*>(lower.data()));
59 ::free(const_cast<unsigned char*>(upper.data()));
60 ::free(const_cast<unsigned char*>(skipPointers.data()));
61 ::free(const_cast<unsigned char*>(forwardPointers.data()));
67 // WARNING: EliasFanoCompressedList has no ownership of
68 // lower, upper, skipPointers and forwardPointers.
69 // The 7 bytes following the last byte of lower and upper
70 // sequences should be readable.
71 folly::ByteRange lower;
72 folly::ByteRange upper;
74 folly::ByteRange skipPointers;
75 folly::ByteRange forwardPointers;
79 // In version 1 skip / forward pointers encoding has been changed,
80 // so SkipValue = uint32_t is able to address up to ~4B elements,
81 // instead of only ~2B.
82 template <class Value,
83 class SkipValue = size_t,
84 size_t kSkipQuantum = 0, // 0 = disabled
85 size_t kForwardQuantum = 0, // 0 = disabled
87 struct EliasFanoEncoder {
88 static_assert(std::is_integral<Value>::value &&
89 std::is_unsigned<Value>::value,
90 "Value should be unsigned integral");
92 typedef EliasFanoCompressedList CompressedList;
94 typedef Value ValueType;
95 typedef SkipValue SkipValueType;
97 static constexpr size_t skipQuantum = kSkipQuantum;
98 static constexpr size_t forwardQuantum = kForwardQuantum;
99 static constexpr size_t version = kVersion;
101 static uint8_t defaultNumLowerBits(size_t upperBound, size_t size) {
102 if (size == 0 || upperBound < size) {
105 // floor(log(upperBound / size));
106 return folly::findLastSet(upperBound / size) - 1;
109 // WARNING: encode() mallocates lower, upper, skipPointers
110 // and forwardPointers. As EliasFanoCompressedList has
111 // no ownership of them, you need to call free() explicitly.
112 static void encode(const ValueType* list, size_t size,
113 EliasFanoCompressedList& result) {
114 encode(list, list + size, result);
117 // Range (begin, end) should be sorted.
118 template <class RandomAccessIterator>
119 static void encode(RandomAccessIterator begin,
120 RandomAccessIterator end,
121 EliasFanoCompressedList& result) {
122 CHECK(std::is_sorted(begin, end));
125 const size_t size = end - begin;
128 result = EliasFanoCompressedList();
132 const ValueType upperBound = list[size - 1];
133 uint8_t numLowerBits = defaultNumLowerBits(upperBound, size);
135 // This is detail::writeBits56 limitation.
136 numLowerBits = std::min<uint8_t>(numLowerBits, 56);
137 CHECK_LT(numLowerBits, 8 * sizeof(Value)); // As we shift by numLowerBits.
139 // WARNING: Current read/write logic assumes that the 7 bytes
140 // following the last byte of lower and upper sequences are
141 // readable (stored value doesn't matter and won't be changed),
142 // so we allocate additional 7B, but do not include them in size
143 // of returned value.
146 const size_t lowerSize = (numLowerBits * size + 7) / 8;
147 unsigned char* lower = nullptr;
148 if (lowerSize > 0) { // numLowerBits != 0
149 lower = static_cast<unsigned char*>(calloc(lowerSize + 7, 1));
150 const ValueType lowerMask = (ValueType(1) << numLowerBits) - 1;
151 for (size_t i = 0; i < size; ++i) {
152 const ValueType lowerBits = list[i] & lowerMask;
153 writeBits56(lower, i * numLowerBits, numLowerBits, lowerBits);
158 // Upper bits are stored using unary delta encoding.
159 // For example, (3 5 5 9) will be encoded as 1000011001000_2.
160 const size_t upperSizeBits =
161 (upperBound >> numLowerBits) + // Number of 0-bits to be stored.
163 const size_t upperSize = (upperSizeBits + 7) / 8;
164 unsigned char* const upper =
165 static_cast<unsigned char*>(calloc(upperSize + 7, 1));
166 for (size_t i = 0; i < size; ++i) {
167 const ValueType upperBits = list[i] >> numLowerBits;
168 const size_t pos = upperBits + i; // upperBits 0-bits and (i + 1) 1-bits.
169 upper[pos / 8] |= 1U << (pos % 8);
172 // *** Skip pointers.
173 // Store (1-indexed) position of every skipQuantum-th
174 // 0-bit in upper bits sequence.
175 SkipValueType* skipPointers = nullptr;
176 size_t numSkipPointers = 0;
177 /* static */ if (skipQuantum != 0) {
178 // Workaround to avoid 'division by zero' compile-time error.
179 constexpr size_t q = skipQuantum ?: 1;
180 /* static */ if (kVersion > 0) {
181 CHECK_LT(size, std::numeric_limits<SkipValueType>::max());
183 CHECK_LT(upperSizeBits, std::numeric_limits<SkipValueType>::max());
185 // 8 * upperSize is used here instead of upperSizeBits, as that is
186 // more serialization-friendly way (upperSizeBits isn't known outside of
187 // this function, unlike upperSize; thus numSkipPointers could easily be
188 // deduced from upperSize).
189 numSkipPointers = (8 * upperSize - size) / q;
190 skipPointers = static_cast<SkipValueType*>(
193 : calloc(numSkipPointers, sizeof(SkipValueType)));
195 for (size_t i = 0, pos = 0; i < size; ++i) {
196 const ValueType upperBits = list[i] >> numLowerBits;
197 for (; (pos + 1) * q <= upperBits; ++pos) {
198 /* static */ if (kVersion > 0) {
199 // Since version 1, just the number of preceding 1-bits is stored.
200 skipPointers[pos] = i;
202 skipPointers[pos] = i + (pos + 1) * q;
208 // *** Forward pointers.
209 // Store (1-indexed) position of every forwardQuantum-th
210 // 1-bit in upper bits sequence.
211 SkipValueType* forwardPointers = nullptr;
212 size_t numForwardPointers = 0;
213 /* static */ if (forwardQuantum != 0) {
214 // Workaround to avoid 'division by zero' compile-time error.
215 constexpr size_t q = forwardQuantum ?: 1;
216 /* static */ if (kVersion > 0) {
217 CHECK_LT(upperBound >> numLowerBits,
218 std::numeric_limits<SkipValueType>::max());
220 CHECK_LT(upperSizeBits, std::numeric_limits<SkipValueType>::max());
223 numForwardPointers = size / q;
224 forwardPointers = static_cast<SkipValueType*>(
225 numForwardPointers == 0
227 : malloc(numForwardPointers * sizeof(SkipValueType)));
229 for (size_t i = q - 1, pos = 0; i < size; i += q, ++pos) {
230 const ValueType upperBits = list[i] >> numLowerBits;
231 /* static */ if (kVersion > 0) {
232 // Since version 1, just the number of preceding 0-bits is stored.
233 forwardPointers[pos] = upperBits;
235 forwardPointers[pos] = upperBits + i + 1;
242 result.numLowerBits = numLowerBits;
243 result.lower.reset(lower, lowerSize);
244 result.upper.reset(upper, upperSize);
245 result.skipPointers.reset(
246 reinterpret_cast<unsigned char*>(skipPointers),
247 numSkipPointers * sizeof(SkipValueType));
248 result.forwardPointers.reset(
249 reinterpret_cast<unsigned char*>(forwardPointers),
250 numForwardPointers * sizeof(SkipValueType));
254 // Writes value (with len up to 56 bits) to data starting at pos-th bit.
255 static void writeBits56(unsigned char* data, size_t pos,
256 uint8_t len, uint64_t value) {
257 DCHECK_LE(uint32_t(len), 56);
258 DCHECK_EQ(0, value & ~((uint64_t(1) << len) - 1));
259 unsigned char* const ptr = data + (pos / 8);
260 uint64_t ptrv = folly::loadUnaligned<uint64_t>(ptr);
261 ptrv |= value << (pos % 8);
262 folly::storeUnaligned<uint64_t>(ptr, ptrv);
266 // NOTE: It's recommended to compile EF coding with -msse4.2, starting
267 // with Nehalem, Intel CPUs support POPCNT instruction and gcc will emit
268 // it for __builtin_popcountll intrinsic.
269 // But we provide an alternative way for the client code: it can switch to
270 // the appropriate version of EliasFanoReader<> in realtime (client should
271 // implement this switching logic itself) by specifying instruction set to
273 namespace instructions {
276 static bool supported() {
279 static inline uint64_t popcount(uint64_t value) {
280 return __builtin_popcountll(value);
282 static inline int ctz(uint64_t value) {
284 return __builtin_ctzll(value);
288 struct Fast : public Default {
289 static bool supported() {
291 return cpuId.popcnt();
293 static inline uint64_t popcount(uint64_t value) {
295 asm ("popcntq %1, %0" : "=r" (result) : "r" (value));
300 } // namespace instructions
304 template <class Encoder, class Instructions>
305 class UpperBitsReader {
306 typedef typename Encoder::SkipValueType SkipValueType;
308 typedef typename Encoder::ValueType ValueType;
310 explicit UpperBitsReader(const EliasFanoCompressedList& list)
311 : forwardPointers_(list.forwardPointers.data()),
312 skipPointers_(list.skipPointers.data()),
313 start_(list.upper.data()),
314 block_(start_ != nullptr ? folly::loadUnaligned<block_t>(start_) : 0),
315 outer_(0), // outer offset: number of consumed bytes in upper.
316 inner_(-1), // inner offset: (bit) position in current block.
317 position_(-1), // index of current value (= #reads - 1).
320 size_t position() const { return position_; }
321 ValueType value() const { return value_; }
324 // Skip to the first non-zero block.
325 while (block_ == 0) {
326 outer_ += sizeof(block_t);
327 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
331 inner_ = Instructions::ctz(block_);
332 block_ &= block_ - 1;
337 ValueType skip(size_t n) {
340 position_ += n; // n 1-bits will be read.
342 // Use forward pointer.
343 if (Encoder::forwardQuantum > 0 && n > Encoder::forwardQuantum) {
344 // Workaround to avoid 'division by zero' compile-time error.
345 constexpr size_t q = Encoder::forwardQuantum ?: 1;
347 const size_t steps = position_ / q;
349 folly::loadUnaligned<SkipValueType>(
350 forwardPointers_ + (steps - 1) * sizeof(SkipValueType));
352 /* static */ if (Encoder::version > 0) {
353 reposition(dest + steps * q);
357 n = position_ + 1 - steps * q; // n is > 0.
358 // correct inner_ will be set at the end.
362 // Find necessary block.
363 while ((cnt = Instructions::popcount(block_)) < n) {
365 outer_ += sizeof(block_t);
366 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
369 // NOTE: Trying to skip half-block here didn't show any
370 // performance improvements.
374 // Kill n - 1 least significant 1-bits.
375 for (size_t i = 0; i < n - 1; ++i) {
376 block_ &= block_ - 1;
379 inner_ = Instructions::ctz(block_);
380 block_ &= block_ - 1;
385 // Skip to the first element that is >= v and located *after* the current
386 // one (so even if current value equals v, position will be increased by 1).
387 ValueType skipToNext(ValueType v) {
388 DCHECK_GE(v, value_);
391 if (Encoder::skipQuantum > 0 && v >= value_ + Encoder::skipQuantum) {
392 // Workaround to avoid 'division by zero' compile-time error.
393 constexpr size_t q = Encoder::skipQuantum ?: 1;
395 const size_t steps = v / q;
397 folly::loadUnaligned<SkipValueType>(
398 skipPointers_ + (steps - 1) * sizeof(SkipValueType));
400 /* static */ if (Encoder::version > 0) {
401 reposition(dest + q * steps);
402 position_ = dest - 1;
405 position_ = dest - q * steps - 1;
407 // Correct inner_ and value_ will be set during the next()
410 // NOTE: Corresponding block of lower bits sequence may be
411 // prefetched here (via __builtin_prefetch), but experiments
412 // didn't show any significant improvements.
417 size_t skip = v - (8 * outer_ - position_ - 1);
419 constexpr size_t kBitsPerBlock = 8 * sizeof(block_t);
420 while ((cnt = Instructions::popcount(~block_)) < skip) {
422 position_ += kBitsPerBlock - cnt;
423 outer_ += sizeof(block_t);
424 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
427 // Try to skip half-block.
428 constexpr size_t kBitsPerHalfBlock = 4 * sizeof(block_t);
429 constexpr block_t halfBlockMask = (block_t(1) << kBitsPerHalfBlock) - 1;
430 if ((cnt = Instructions::popcount(~block_ & halfBlockMask)) < skip) {
431 position_ += kBitsPerHalfBlock - cnt;
432 block_ &= ~halfBlockMask;
435 // Just skip until we see expected value.
436 while (next() < v) { }
441 ValueType setValue() {
442 value_ = static_cast<ValueType>(8 * outer_ + inner_ - position_);
446 void reposition(size_t dest) {
448 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
449 block_ &= ~((block_t(1) << (dest % 8)) - 1);
452 typedef unsigned long long block_t;
453 const unsigned char* const forwardPointers_;
454 const unsigned char* const skipPointers_;
455 const unsigned char* const start_;
463 } // namespace detail
465 template <class Encoder,
466 class Instructions = instructions::Default>
467 class EliasFanoReader : private boost::noncopyable {
469 typedef typename Encoder::ValueType ValueType;
471 explicit EliasFanoReader(const EliasFanoCompressedList& list)
473 lowerMask_((ValueType(1) << list_.numLowerBits) - 1),
477 DCHECK(Instructions::supported());
478 // To avoid extra branching during skipTo() while reading
479 // upper sequence we need to know the last element.
480 if (UNLIKELY(list_.size == 0)) {
484 ValueType lastUpperValue = 8 * list_.upper.size() - list_.size;
485 auto it = list_.upper.end() - 1;
487 lastUpperValue -= 8 - folly::findLastSet(*it);
488 lastValue_ = readLowerPart(list_.size - 1) |
489 (lastUpperValue << list_.numLowerBits);
492 size_t size() const { return list_.size; }
494 size_t position() const { return progress_ - 1; }
495 ValueType value() const { return value_; }
498 if (UNLIKELY(progress_ == list_.size)) {
499 value_ = std::numeric_limits<ValueType>::max();
502 value_ = readLowerPart(progress_) |
503 (upper_.next() << list_.numLowerBits);
508 bool skip(size_t n) {
512 if (LIKELY(progress_ < list_.size)) {
513 value_ = readLowerPart(progress_) |
514 (upper_.skip(n) << list_.numLowerBits);
519 progress_ = list_.size;
520 value_ = std::numeric_limits<ValueType>::max();
524 bool skipTo(ValueType value) {
525 DCHECK_GE(value, value_);
526 if (value <= value_) {
529 if (value > lastValue_) {
530 progress_ = list_.size;
531 value_ = std::numeric_limits<ValueType>::max();
535 upper_.skipToNext(value >> list_.numLowerBits);
536 progress_ = upper_.position();
537 value_ = readLowerPart(progress_) |
538 (upper_.value() << list_.numLowerBits);
540 while (value_ < value) {
541 value_ = readLowerPart(progress_) |
542 (upper_.next() << list_.numLowerBits);
550 ValueType readLowerPart(size_t i) const {
551 const size_t pos = i * list_.numLowerBits;
552 const unsigned char* ptr = list_.lower.data() + (pos / 8);
553 const uint64_t ptrv = folly::loadUnaligned<uint64_t>(ptr);
554 return lowerMask_ & (ptrv >> (pos % 8));
557 const EliasFanoCompressedList list_;
558 const ValueType lowerMask_;
559 detail::UpperBitsReader<Encoder, Instructions> upper_;
562 ValueType lastValue_;
567 #endif // FOLLY_EXPERIMENTAL_ELIAS_FANO_CODING_H