/*
- * Copyright 2014 Facebook, Inc.
+ * Copyright 2013-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+#pragma once
-#ifndef FOLLY_CURSOR_H
-#define FOLLY_CURSOR_H
-
-#include <assert.h>
+#include <cassert>
#include <cstdarg>
+#include <cstring>
+#include <memory>
#include <stdexcept>
-#include <string.h>
#include <type_traits>
-#include <memory>
-#include <folly/Bits.h>
-#include <folly/io/IOBuf.h>
-#include <folly/io/IOBufQueue.h>
#include <folly/Likely.h>
#include <folly/Memory.h>
#include <folly/Portability.h>
#include <folly/Range.h>
+#include <folly/io/IOBuf.h>
+#include <folly/io/IOBufQueue.h>
+#include <folly/lang/Bits.h>
+#include <folly/portability/BitsFunctexcept.h>
/**
* Cursor class for fast iteration over IOBuf chains.
*
* RWPrivateCursor - Read-write access, assumes private access to IOBuf chain
* RWUnshareCursor - Read-write access, calls unshare on write (COW)
- * Appender - Write access, assumes private access to IOBuf chian
+ * Appender - Write access, assumes private access to IOBuf chain
*
* Note that RW cursors write in the preallocated part of buffers (that is,
* between the buffer's data() and tail()), while Appenders append to the end
* Appender with a buffer chain; for this reason, Appenders assume private
* access to the buffer (you need to call unshare() yourself if necessary).
**/
-namespace folly { namespace io {
+namespace folly {
+namespace io {
+
namespace detail {
-template <class Derived, typename BufType>
+template <class Derived, class BufType>
class CursorBase {
+ // Make all the templated classes friends for copy constructor.
+ template <class D, typename B> friend class CursorBase;
public:
+ explicit CursorBase(BufType* buf) : crtBuf_(buf), buffer_(buf) {
+ if (crtBuf_) {
+ crtPos_ = crtBegin_ = crtBuf_->data();
+ crtEnd_ = crtBuf_->tail();
+ }
+ }
+
+ /**
+ * Copy constructor.
+ *
+ * This also allows constructing a CursorBase from other derived types.
+ * For instance, this allows constructing a Cursor from an RWPrivateCursor.
+ */
+ template <class OtherDerived, class OtherBuf>
+ explicit CursorBase(const CursorBase<OtherDerived, OtherBuf>& cursor)
+ : crtBuf_(cursor.crtBuf_),
+ buffer_(cursor.buffer_),
+ crtBegin_(cursor.crtBegin_),
+ crtEnd_(cursor.crtEnd_),
+ crtPos_(cursor.crtPos_),
+ absolutePos_(cursor.absolutePos_) {}
+
+ /**
+ * Reset cursor to point to a new buffer.
+ */
+ void reset(BufType* buf) {
+ crtBuf_ = buf;
+ buffer_ = buf;
+ absolutePos_ = 0;
+ if (crtBuf_) {
+ crtPos_ = crtBegin_ = crtBuf_->data();
+ crtEnd_ = crtBuf_->tail();
+ }
+ }
+
+ /**
+ * Get the current Cursor position relative to the head of IOBuf chain.
+ */
+ size_t getCurrentPosition() const {
+ return (crtPos_ - crtBegin_) + absolutePos_;
+ }
+
const uint8_t* data() const {
- return crtBuf_->data() + offset_;
+ return crtPos_;
}
- /*
+ /**
* Return the remaining space available in the current IOBuf.
*
- * May return 0 if the cursor is at the end of an IOBuf. Use peek() instead
- * if you want to avoid this. peek() will advance to the next non-empty
- * IOBuf (up to the end of the chain) if the cursor is currently pointing at
- * the end of a buffer.
+ * May return 0 if the cursor is at the end of an IOBuf. Use peekBytes()
+ * instead if you want to avoid this. peekBytes() will advance to the next
+ * non-empty IOBuf (up to the end of the chain) if the cursor is currently
+ * pointing at the end of a buffer.
*/
size_t length() const {
- return crtBuf_->length() - offset_;
+ return crtEnd_ - crtPos_;
}
- /*
+ /**
* Return the space available until the end of the entire IOBuf chain.
*/
size_t totalLength() const {
if (crtBuf_ == buffer_) {
- return crtBuf_->computeChainDataLength() - offset_;
+ return crtBuf_->computeChainDataLength() - (crtPos_ - crtBegin_);
}
CursorBase end(buffer_->prev());
- end.offset_ = end.buffer_->length();
+ end.crtPos_ = end.crtEnd_;
return end - *this;
}
+ /**
+ * Return true if the cursor could advance the specified number of bytes
+ * from its current position.
+ * This is useful for applications that want to do checked reads instead of
+ * catching exceptions and is more efficient than using totalLength as it
+ * walks the minimal set of buffers in the chain to determine the result.
+ */
+ bool canAdvance(size_t amount) const {
+ const IOBuf* nextBuf = crtBuf_;
+ size_t available = length();
+ do {
+ if (available >= amount) {
+ return true;
+ }
+ amount -= available;
+ nextBuf = nextBuf->next();
+ available = nextBuf->length();
+ } while (nextBuf != buffer_);
+ return false;
+ }
+
+ /*
+ * Return true if the cursor is at the end of the entire IOBuf chain.
+ */
+ bool isAtEnd() const {
+ // Check for the simple cases first.
+ if (crtPos_ != crtEnd_) {
+ return false;
+ }
+ if (crtBuf_ == buffer_->prev()) {
+ return true;
+ }
+ // We are at the end of a buffer, but it isn't the last buffer.
+ // We might still be at the end if the remaining buffers in the chain are
+ // empty.
+ const IOBuf* buf = crtBuf_->next();;
+ while (buf != buffer_) {
+ if (buf->length() > 0) {
+ return false;
+ }
+ buf = buf->next();
+ }
+ return true;
+ }
+
+ /**
+ * Advances the cursor to the end of the entire IOBuf chain.
+ */
+ void advanceToEnd() {
+ // Simple case, we're already in the last IOBuf.
+ if (crtBuf_ == buffer_->prev()) {
+ crtPos_ = crtEnd_;
+ return;
+ }
+
+ auto* nextBuf = crtBuf_->next();
+ while (nextBuf != buffer_) {
+ absolutePos_ += crtEnd_ - crtBegin_;
+
+ crtBuf_ = nextBuf;
+ nextBuf = crtBuf_->next();
+ crtBegin_ = crtBuf_->data();
+ crtPos_ = crtEnd_ = crtBuf_->tail();
+
+ static_cast<Derived*>(this)->advanceDone();
+ }
+ }
+
Derived& operator+=(size_t offset) {
Derived* p = static_cast<Derived*>(this);
p->skip(offset);
return other;
}
+ Derived& operator-=(size_t offset) {
+ Derived* p = static_cast<Derived*>(this);
+ p->retreat(offset);
+ return *p;
+ }
+ Derived operator-(size_t offset) const {
+ Derived other(*this);
+ other.retreat(offset);
+ return other;
+ }
+
/**
* Compare cursors for equality/inequality.
*
* same IOBuf chain.
*/
bool operator==(const Derived& other) const {
- return (offset_ == other.offset_) && (crtBuf_ == other.crtBuf_);
+ const IOBuf* crtBuf = crtBuf_;
+ auto crtPos = crtPos_;
+ // We can be pointing to the end of a buffer chunk, find first non-empty.
+ while (crtPos == crtBuf->tail() && crtBuf != buffer_->prev()) {
+ crtBuf = crtBuf->next();
+ crtPos = crtBuf->data();
+ }
+
+ const IOBuf* crtBufOther = other.crtBuf_;
+ auto crtPosOther = other.crtPos_;
+ // We can be pointing to the end of a buffer chunk, find first non-empty.
+ while (crtPosOther == crtBufOther->tail() &&
+ crtBufOther != other.buffer_->prev()) {
+ crtBufOther = crtBufOther->next();
+ crtPosOther = crtBufOther->data();
+ }
+ return (crtPos == crtPosOther) && (crtBuf == crtBufOther);
}
bool operator!=(const Derived& other) const {
return !operator==(other);
}
template <class T>
- typename std::enable_if<std::is_arithmetic<T>::value, T>::type
- read() {
- T val;
- pull(&val, sizeof(T));
- return val;
+ typename std::enable_if<std::is_arithmetic<T>::value, bool>::type tryRead(
+ T& val) {
+ if (LIKELY(crtPos_ + sizeof(T) <= crtEnd_)) {
+ val = loadUnaligned<T>(data());
+ crtPos_ += sizeof(T);
+ return true;
+ }
+ return pullAtMostSlow(&val, sizeof(T)) == sizeof(T);
+ }
+
+ template <class T>
+ bool tryReadBE(T& val) {
+ const bool result = tryRead(val);
+ val = Endian::big(val);
+ return result;
+ }
+
+ template <class T>
+ bool tryReadLE(T& val) {
+ const bool result = tryRead(val);
+ val = Endian::little(val);
+ return result;
+ }
+
+ template <class T>
+ T read() {
+ if (LIKELY(crtPos_ + sizeof(T) <= crtEnd_)) {
+ T val = loadUnaligned<T>(data());
+ crtPos_ += sizeof(T);
+ return val;
+ } else {
+ return readSlow<T>();
+ }
}
template <class T>
*/
std::string readFixedString(size_t len) {
std::string str;
-
str.reserve(len);
- for (;;) {
- // Fast path: it all fits in one buffer.
- size_t available = length();
- if (LIKELY(available >= len)) {
- str.append(reinterpret_cast<const char*>(data()), len);
- offset_ += len;
- return str;
- }
-
- str.append(reinterpret_cast<const char*>(data()), available);
- if (UNLIKELY(!tryAdvanceBuffer())) {
- throw std::out_of_range("string underflow");
- }
- len -= available;
+ if (LIKELY(length() >= len)) {
+ str.append(reinterpret_cast<const char*>(data()), len);
+ crtPos_ += len;
+ } else {
+ readFixedStringSlow(&str, len);
}
+ return str;
}
/**
* vs. using pull().
*/
std::string readTerminatedString(
- char termChar = '\0',
- size_t maxLength = std::numeric_limits<size_t>::max()) {
- std::string str;
+ char termChar = '\0',
+ size_t maxLength = std::numeric_limits<size_t>::max());
- for (;;) {
- const uint8_t* buf = data();
- size_t buflen = length();
-
- size_t i = 0;
- while (i < buflen && buf[i] != termChar) {
- ++i;
+ /*
+ * Read all bytes until the specified predicate returns true.
+ *
+ * The predicate will be called on each byte in turn, until it returns false
+ * or until the end of the IOBuf chain is reached.
+ *
+ * Returns the result as a string.
+ */
+ template <typename Predicate>
+ std::string readWhile(const Predicate& predicate);
- // Do this check after incrementing 'i', as even though we start at the
- // 0 byte, it still represents a single character
- if (str.length() + i >= maxLength) {
- throw std::length_error("string overflow");
- }
- }
+ /*
+ * Read all bytes until the specified predicate returns true.
+ *
+ * This is a more generic version of readWhile() takes an arbitrary Output
+ * object, and calls Output::append() with each chunk of matching data.
+ */
+ template <typename Predicate, typename Output>
+ void readWhile(const Predicate& predicate, Output& out);
- str.append(reinterpret_cast<const char*>(buf), i);
- if (i < buflen) {
- skip(i + 1);
- return str;
- }
+ /*
+ * Skip all bytes until the specified predicate returns true.
+ *
+ * The predicate will be called on each byte in turn, until it returns false
+ * or until the end of the IOBuf chain is reached.
+ */
+ template <typename Predicate>
+ void skipWhile(const Predicate& predicate);
- skip(i);
+ size_t skipAtMost(size_t len) {
+ if (LIKELY(crtPos_ + len < crtEnd_)) {
+ crtPos_ += len;
+ return len;
+ }
+ return skipAtMostSlow(len);
+ }
- if (UNLIKELY(!tryAdvanceBuffer())) {
- throw std::out_of_range("string underflow");
- }
+ void skip(size_t len) {
+ if (LIKELY(crtPos_ + len < crtEnd_)) {
+ crtPos_ += len;
+ } else {
+ skipSlow(len);
}
}
- explicit CursorBase(BufType* buf)
- : crtBuf_(buf)
- , offset_(0)
- , buffer_(buf) {}
+ /**
+ * Skip bytes in the current IOBuf without advancing to the next one.
+ * Precondition: length() >= len
+ */
+ void skipNoAdvance(size_t len) {
+ DCHECK_LE(len, length());
+ crtPos_ += len;
+ }
- // Make all the templated classes friends for copy constructor.
- template <class D, typename B> friend class CursorBase;
+ size_t retreatAtMost(size_t len) {
+ if (len <= static_cast<size_t>(crtPos_ - crtBegin_)) {
+ crtPos_ -= len;
+ return len;
+ }
+ return retreatAtMostSlow(len);
+ }
- /*
- * Copy constructor.
- *
- * This also allows constructing a CursorBase from other derived types.
- * For instance, this allows constructing a Cursor from an RWPrivateCursor.
- */
- template <class OtherDerived, class OtherBuf>
- explicit CursorBase(const CursorBase<OtherDerived, OtherBuf>& cursor)
- : crtBuf_(cursor.crtBuf_),
- offset_(cursor.offset_),
- buffer_(cursor.buffer_) {}
+ void retreat(size_t len) {
+ if (len <= static_cast<size_t>(crtPos_ - crtBegin_)) {
+ crtPos_ -= len;
+ } else {
+ retreatSlow(len);
+ }
+ }
- // reset cursor to point to a new buffer.
- void reset(BufType* buf) {
- crtBuf_ = buf;
- buffer_ = buf;
- offset_ = 0;
+ size_t pullAtMost(void* buf, size_t len) {
+ // Fast path: it all fits in one buffer.
+ if (LIKELY(crtPos_ + len <= crtEnd_)) {
+ memcpy(buf, data(), len);
+ crtPos_ += len;
+ return len;
+ }
+ return pullAtMostSlow(buf, len);
+ }
+
+ void pull(void* buf, size_t len) {
+ if (LIKELY(crtPos_ + len <= crtEnd_)) {
+ memcpy(buf, data(), len);
+ crtPos_ += len;
+ } else {
+ pullSlow(buf, len);
+ }
}
/**
* Return the available data in the current buffer.
* If you want to gather more data from the chain into a contiguous region
- * (for hopefully zero-copy access), use gather() before peek().
+ * (for hopefully zero-copy access), use gather() before peekBytes().
*/
- std::pair<const uint8_t*, size_t> peek() {
+ ByteRange peekBytes() {
// Ensure that we're pointing to valid data
size_t available = length();
while (UNLIKELY(available == 0 && tryAdvanceBuffer())) {
available = length();
}
-
- return std::make_pair(data(), available);
+ return ByteRange{data(), available};
}
/**
- * Read one byte. Equivalent to pull(&byte, 1) but faster.
+ * Alternate version of peekBytes() that returns a std::pair
+ * instead of a ByteRange. (This method pre-dates ByteRange.)
+ *
+ * This function will eventually be deprecated.
*/
- uint8_t pullByte() {
- // fast path
- if (LIKELY(length() >= 1)) {
- uint8_t byte = *data();
- offset_++;
- return byte;
- }
-
- // slow path
- uint8_t byte;
- if (UNLIKELY(pullAtMost(&byte, 1) != 1)) {
- throw std::out_of_range("underflow");
- }
- return byte;
- }
-
- void pull(void* buf, size_t len) {
- if (UNLIKELY(pullAtMost(buf, len) != len)) {
- throw std::out_of_range("underflow");
- }
+ std::pair<const uint8_t*, size_t> peek() {
+ auto bytes = peekBytes();
+ return std::make_pair(bytes.data(), bytes.size());
}
void clone(std::unique_ptr<folly::IOBuf>& buf, size_t len) {
if (UNLIKELY(cloneAtMost(buf, len) != len)) {
- throw std::out_of_range("underflow");
+ std::__throw_out_of_range("underflow");
}
}
void clone(folly::IOBuf& buf, size_t len) {
if (UNLIKELY(cloneAtMost(buf, len) != len)) {
- throw std::out_of_range("underflow");
- }
- }
-
- void skip(size_t len) {
- if (LIKELY(length() >= len)) {
- offset_ += len;
- return;
- }
- skipSlow(len);
- }
-
- size_t pullAtMost(void* buf, size_t len) {
- uint8_t* p = reinterpret_cast<uint8_t*>(buf);
- size_t copied = 0;
- for (;;) {
- // Fast path: it all fits in one buffer.
- size_t available = length();
- if (LIKELY(available >= len)) {
- memcpy(p, data(), len);
- offset_ += len;
- return copied + len;
- }
-
- memcpy(p, data(), available);
- copied += available;
- if (UNLIKELY(!tryAdvanceBuffer())) {
- return copied;
- }
- p += available;
- len -= available;
+ std::__throw_out_of_range("underflow");
}
}
size_t cloneAtMost(folly::IOBuf& buf, size_t len) {
- buf = folly::IOBuf();
+ // We might be at the end of buffer.
+ advanceBufferIfEmpty();
std::unique_ptr<folly::IOBuf> tmp;
size_t copied = 0;
if (LIKELY(available >= len)) {
if (loopCount == 0) {
crtBuf_->cloneOneInto(buf);
- buf.trimStart(offset_);
+ buf.trimStart(crtPos_ - crtBegin_);
buf.trimEnd(buf.length() - len);
} else {
tmp = crtBuf_->cloneOne();
- tmp->trimStart(offset_);
+ tmp->trimStart(crtPos_ - crtBegin_);
tmp->trimEnd(tmp->length() - len);
buf.prependChain(std::move(tmp));
}
- offset_ += len;
+ crtPos_ += len;
+ advanceBufferIfEmpty();
return copied + len;
}
-
if (loopCount == 0) {
crtBuf_->cloneOneInto(buf);
- buf.trimStart(offset_);
+ buf.trimStart(crtPos_ - crtBegin_);
} else {
tmp = crtBuf_->cloneOne();
- tmp->trimStart(offset_);
+ tmp->trimStart(crtPos_ - crtBegin_);
buf.prependChain(std::move(tmp));
}
size_t cloneAtMost(std::unique_ptr<folly::IOBuf>& buf, size_t len) {
if (!buf) {
- buf = make_unique<folly::IOBuf>();
+ buf = std::make_unique<folly::IOBuf>();
}
-
return cloneAtMost(*buf, len);
}
- size_t skipAtMost(size_t len) {
- size_t skipped = 0;
- for (;;) {
- // Fast path: it all fits in one buffer.
- size_t available = length();
- if (LIKELY(available >= len)) {
- offset_ += len;
- return skipped + len;
- }
-
- skipped += available;
- if (UNLIKELY(!tryAdvanceBuffer())) {
- return skipped;
- }
- len -= available;
- }
- }
-
/**
* Return the distance between two cursors.
*/
size_t len = 0;
if (otherBuf != crtBuf_) {
- len += otherBuf->length() - other.offset_;
+ len += other.crtEnd_ - other.crtPos_;
for (otherBuf = otherBuf->next();
otherBuf != crtBuf_ && otherBuf != other.buffer_;
}
if (otherBuf == other.buffer_) {
- throw std::out_of_range("wrap-around");
+ std::__throw_out_of_range("wrap-around");
}
- len += offset_;
+ len += crtPos_ - crtBegin_;
} else {
- if (offset_ < other.offset_) {
- throw std::out_of_range("underflow");
+ if (crtPos_ < other.crtPos_) {
+ std::__throw_out_of_range("underflow");
}
- len += offset_ - other.offset_;
+ len += crtPos_ - other.crtPos_;
}
return len;
size_t operator-(const BufType* buf) const {
size_t len = 0;
- BufType *curBuf = buf;
+ const BufType* curBuf = buf;
while (curBuf != crtBuf_) {
len += curBuf->length();
curBuf = curBuf->next();
if (curBuf == buf || curBuf == buffer_) {
- throw std::out_of_range("wrap-around");
+ std::__throw_out_of_range("wrap-around");
}
}
- len += offset_;
+ len += crtPos_ - crtBegin_;
return len;
}
protected:
- BufType* crtBuf_;
- size_t offset_;
-
- ~CursorBase(){}
+ ~CursorBase() { }
BufType* head() {
return buffer_;
bool tryAdvanceBuffer() {
BufType* nextBuf = crtBuf_->next();
if (UNLIKELY(nextBuf == buffer_)) {
- offset_ = crtBuf_->length();
+ crtPos_ = crtEnd_;
return false;
}
- offset_ = 0;
+ absolutePos_ += crtEnd_ - crtBegin_;
crtBuf_ = nextBuf;
+ crtPos_ = crtBegin_ = crtBuf_->data();
+ crtEnd_ = crtBuf_->tail();
+ static_cast<Derived*>(this)->advanceDone();
+ return true;
+ }
+
+ bool tryRetreatBuffer() {
+ if (UNLIKELY(crtBuf_ == buffer_)) {
+ crtPos_ = crtBegin_;
+ return false;
+ }
+ crtBuf_ = crtBuf_->prev();
+ crtBegin_ = crtBuf_->data();
+ crtPos_ = crtEnd_ = crtBuf_->tail();
+ absolutePos_ -= crtEnd_ - crtBegin_;
static_cast<Derived*>(this)->advanceDone();
return true;
}
+ void advanceBufferIfEmpty() {
+ if (crtPos_ == crtEnd_) {
+ tryAdvanceBuffer();
+ }
+ }
+
+ BufType* crtBuf_;
+ BufType* buffer_;
+ const uint8_t* crtBegin_{nullptr};
+ const uint8_t* crtEnd_{nullptr};
+ const uint8_t* crtPos_{nullptr};
+ size_t absolutePos_{0};
+
private:
- void advanceDone() {
+ template <class T>
+ FOLLY_NOINLINE T readSlow() {
+ T val;
+ pullSlow(&val, sizeof(T));
+ return val;
+ }
+
+ void readFixedStringSlow(std::string* str, size_t len) {
+ for (size_t available; (available = length()) < len; ) {
+ str->append(reinterpret_cast<const char*>(data()), available);
+ if (UNLIKELY(!tryAdvanceBuffer())) {
+ std::__throw_out_of_range("string underflow");
+ }
+ len -= available;
+ }
+ str->append(reinterpret_cast<const char*>(data()), len);
+ crtPos_ += len;
+ advanceBufferIfEmpty();
+ }
+
+ size_t pullAtMostSlow(void* buf, size_t len) {
+ uint8_t* p = reinterpret_cast<uint8_t*>(buf);
+ size_t copied = 0;
+ for (size_t available; (available = length()) < len; ) {
+ memcpy(p, data(), available);
+ copied += available;
+ if (UNLIKELY(!tryAdvanceBuffer())) {
+ return copied;
+ }
+ p += available;
+ len -= available;
+ }
+ memcpy(p, data(), len);
+ crtPos_ += len;
+ advanceBufferIfEmpty();
+ return copied + len;
+ }
+
+ void pullSlow(void* buf, size_t len) {
+ if (UNLIKELY(pullAtMostSlow(buf, len) != len)) {
+ std::__throw_out_of_range("underflow");
+ }
+ }
+
+ size_t skipAtMostSlow(size_t len) {
+ size_t skipped = 0;
+ for (size_t available; (available = length()) < len; ) {
+ skipped += available;
+ if (UNLIKELY(!tryAdvanceBuffer())) {
+ return skipped;
+ }
+ len -= available;
+ }
+ crtPos_ += len;
+ advanceBufferIfEmpty();
+ return skipped + len;
}
void skipSlow(size_t len) {
- if (UNLIKELY(skipAtMost(len) != len)) {
- throw std::out_of_range("underflow");
+ if (UNLIKELY(skipAtMostSlow(len) != len)) {
+ std::__throw_out_of_range("underflow");
}
}
- BufType* buffer_;
+ size_t retreatAtMostSlow(size_t len) {
+ size_t retreated = 0;
+ for (size_t available; (available = crtPos_ - crtBegin_) < len;) {
+ retreated += available;
+ if (UNLIKELY(!tryRetreatBuffer())) {
+ return retreated;
+ }
+ len -= available;
+ }
+ crtPos_ -= len;
+ return retreated + len;
+ }
+
+ void retreatSlow(size_t len) {
+ if (UNLIKELY(retreatAtMostSlow(len) != len)) {
+ std::__throw_out_of_range("underflow");
+ }
+ }
+
+ void advanceDone() {
+ }
};
-} //namespace detail
+} // namespace detail
class Cursor : public detail::CursorBase<Cursor, const IOBuf> {
public:
void push(const uint8_t* buf, size_t len) {
Derived* d = static_cast<Derived*>(this);
if (d->pushAtMost(buf, len) != len) {
- throw std::out_of_range("overflow");
+ std::__throw_out_of_range("overflow");
}
}
void push(ByteRange buf) {
if (this->pushAtMost(buf) != buf.size()) {
- throw std::out_of_range("overflow");
+ std::__throw_out_of_range("overflow");
}
}
*/
void push(Cursor cursor, size_t len) {
if (this->pushAtMost(cursor, len) != len) {
- throw std::out_of_range("overflow");
+ std::__throw_out_of_range("overflow");
}
}
size_t pushAtMost(Cursor cursor, size_t len) {
size_t written = 0;
for(;;) {
- auto currentBuffer = cursor.peek();
- const uint8_t* crtData = currentBuffer.first;
- size_t available = currentBuffer.second;
+ auto currentBuffer = cursor.peekBytes();
+ const uint8_t* crtData = currentBuffer.data();
+ size_t available = currentBuffer.size();
if (available == 0) {
// end of buffer chain
return written;
if (this->crtBuf_ != this->head() && this->totalLength() < n) {
throw std::overflow_error("cannot gather() past the end of the chain");
}
- this->crtBuf_->gather(this->offset_ + n);
+ size_t offset = this->crtPos_ - this->crtBegin_;
+ this->crtBuf_->gather(offset + n);
+ this->crtBegin_ = this->crtBuf_->data();
+ this->crtEnd_ = this->crtBuf_->tail();
+ this->crtPos_ = this->crtBegin_ + offset;
}
void gatherAtMost(size_t n) {
size_t size = std::min(n, this->totalLength());
- return this->crtBuf_->gather(this->offset_ + size);
+ size_t offset = this->crtPos_ - this->crtBegin_;
+ this->crtBuf_->gather(offset + size);
+ this->crtBegin_ = this->crtBuf_->data();
+ this->crtEnd_ = this->crtBuf_->tail();
+ this->crtPos_ = this->crtBegin_ + offset;
}
using detail::Writable<RWCursor<access>>::pushAtMost;
size_t pushAtMost(const uint8_t* buf, size_t len) {
+ // We have to explicitly check for an input length of 0.
+ // We support buf being nullptr in this case, but we need to avoid calling
+ // memcpy() with a null source pointer, since that is undefined behavior
+ // even if the length is 0.
+ if (len == 0) {
+ return 0;
+ }
+
size_t copied = 0;
for (;;) {
// Fast path: the current buffer is big enough.
maybeUnshare();
}
memcpy(writableData(), buf, len);
- this->offset_ += len;
+ this->crtPos_ += len;
return copied + len;
}
}
void insert(std::unique_ptr<folly::IOBuf> buf) {
- folly::IOBuf* nextBuf;
- if (this->offset_ == 0) {
+ this->absolutePos_ += buf->computeChainDataLength();
+ if (this->crtPos_ == this->crtBegin_ && this->crtBuf_ != this->buffer_) {
// Can just prepend
- nextBuf = this->crtBuf_;
this->crtBuf_->prependChain(std::move(buf));
} else {
+ IOBuf* nextBuf;
std::unique_ptr<folly::IOBuf> remaining;
- if (this->crtBuf_->length() - this->offset_ > 0) {
+ if (this->crtPos_ != this->crtEnd_) {
// Need to split current IOBuf in two.
remaining = this->crtBuf_->cloneOne();
- remaining->trimStart(this->offset_);
+ remaining->trimStart(this->crtPos_ - this->crtBegin_);
nextBuf = remaining.get();
buf->prependChain(std::move(remaining));
} else {
nextBuf = this->crtBuf_->next();
}
this->crtBuf_->trimEnd(this->length());
+ this->absolutePos_ += this->crtPos_ - this->crtBegin_;
this->crtBuf_->appendChain(std::move(buf));
+
+ // Jump past the new links
+ this->crtBuf_ = nextBuf;
+ this->crtPos_ = this->crtBegin_ = this->crtBuf_->data();
+ this->crtEnd_ = this->crtBuf_->tail();
}
- // Jump past the new links
- this->offset_ = 0;
- this->crtBuf_ = nextBuf;
}
uint8_t* writableData() {
- return this->crtBuf_->writableData() + this->offset_;
+ return this->crtBuf_->writableData() + (this->crtPos_ - this->crtBegin_);
}
private:
void maybeUnshare() {
if (UNLIKELY(maybeShared_)) {
+ size_t offset = this->crtPos_ - this->crtBegin_;
this->crtBuf_->unshareOne();
+ this->crtBegin_ = this->crtBuf_->data();
+ this->crtEnd_ = this->crtBuf_->tail();
+ this->crtPos_ = this->crtBegin_ + offset;
maybeShared_ = false;
}
}
// Waste the rest of the current buffer and allocate a new one.
// Don't make it too small, either.
if (growth_ == 0) {
- throw std::out_of_range("can't grow buffer chain");
+ std::__throw_out_of_range("can't grow buffer chain");
}
n = std::max(n, growth_);
using detail::Writable<Appender>::pushAtMost;
size_t pushAtMost(const uint8_t* buf, size_t len) {
+ // We have to explicitly check for an input length of 0.
+ // We support buf being nullptr in this case, but we need to avoid calling
+ // memcpy() with a null source pointer, since that is undefined behavior
+ // even if the length is 0.
+ if (len == 0) {
+ return 0;
+ }
+
size_t copied = 0;
for (;;) {
// Fast path: it all fits in one buffer.
* space in the queue, we grow no more than growth bytes at once
* (unless you call ensure() with a bigger value yourself).
*/
- QueueAppender(IOBufQueue* queue, uint64_t growth) {
- reset(queue, growth);
- }
+ QueueAppender(IOBufQueue* queue, uint64_t growth)
+ : queueCache_(queue), growth_(growth) {}
void reset(IOBufQueue* queue, uint64_t growth) {
- queue_ = queue;
+ queueCache_.reset(queue);
growth_ = growth;
}
uint8_t* writableData() {
- return static_cast<uint8_t*>(queue_->writableTail());
+ return queueCache_.writableData();
}
- size_t length() const { return queue_->tailroom(); }
+ size_t length() {
+ return queueCache_.length();
+ }
- void append(size_t n) { queue_->postallocate(n); }
+ void append(size_t n) {
+ queueCache_.append(n);
+ }
// Ensure at least n contiguous; can go above growth_, throws if
// not enough room.
- void ensure(uint64_t n) { queue_->preallocate(n, growth_); }
+ void ensure(size_t n) {
+ if (length() < n) {
+ ensureSlow(n);
+ }
+ }
template <class T>
- typename std::enable_if<std::is_arithmetic<T>::value>::type
- write(T value) {
+ typename std::enable_if<std::is_arithmetic<T>::value>::type write(T value) {
// We can't fail.
- auto p = queue_->preallocate(sizeof(T), growth_);
- storeUnaligned(p.first, value);
- queue_->postallocate(sizeof(T));
+ if (length() >= sizeof(T)) {
+ storeUnaligned(queueCache_.writableData(), value);
+ queueCache_.appendUnsafe(sizeof(T));
+ } else {
+ writeSlow<T>(value);
+ }
}
using detail::Writable<QueueAppender>::pushAtMost;
size_t pushAtMost(const uint8_t* buf, size_t len) {
- size_t remaining = len;
+ // Fill the current buffer
+ const size_t copyLength = std::min(len, length());
+ if (copyLength != 0) {
+ memcpy(writableData(), buf, copyLength);
+ queueCache_.appendUnsafe(copyLength);
+ buf += copyLength;
+ }
+ size_t remaining = len - copyLength;
+ // Allocate more buffers as necessary
while (remaining != 0) {
- auto p = queue_->preallocate(std::min(remaining, growth_),
- growth_,
- remaining);
+ auto p = queueCache_.queue()->preallocate(
+ std::min(remaining, growth_), growth_, remaining);
memcpy(p.first, buf, p.second);
- queue_->postallocate(p.second);
+ queueCache_.queue()->postallocate(p.second);
buf += p.second;
remaining -= p.second;
}
-
return len;
}
void insert(std::unique_ptr<folly::IOBuf> buf) {
if (buf) {
- queue_->append(std::move(buf), true);
+ queueCache_.queue()->append(std::move(buf), true);
}
}
+ void insert(const folly::IOBuf& buf) {
+ insert(buf.clone());
+ }
+
private:
- folly::IOBufQueue* queue_;
- size_t growth_;
+ folly::IOBufQueue::WritableRangeCache queueCache_{nullptr};
+ size_t growth_{0};
+
+ FOLLY_NOINLINE void ensureSlow(size_t n) {
+ queueCache_.queue()->preallocate(n, growth_);
+ queueCache_.fillCache();
+ }
+
+ template <class T>
+ typename std::enable_if<std::is_arithmetic<T>::value>::type FOLLY_NOINLINE
+ writeSlow(T value) {
+ queueCache_.queue()->preallocate(sizeof(T), growth_);
+ queueCache_.fillCache();
+
+ storeUnaligned(queueCache_.writableData(), value);
+ queueCache_.appendUnsafe(sizeof(T));
+ }
};
-}} // folly::io
+} // namespace io
+} // namespace folly
-#endif // FOLLY_CURSOR_H
+#include <folly/io/Cursor-inl.h>