/*
- * Copyright 2013 Facebook, Inc.
+ * Copyright 2013-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* limitations under the License.
*/
-#include "folly/io/IOBufQueue.h"
+#include <folly/io/IOBufQueue.h>
#include <string.h>
using folly::IOBuf;
const size_t MIN_ALLOC_SIZE = 2000;
-const size_t MAX_ALLOC_SIZE = 8000; // Must fit within a uint32_t
+const size_t MAX_ALLOC_SIZE = 8000;
const size_t MAX_PACK_COPY = 4096;
/**
// reduce wastage (the tail's tailroom and the head's headroom) when
// joining two IOBufQueues together.
size_t copyRemaining = MAX_PACK_COPY;
- uint32_t n;
+ uint64_t n;
while (src &&
(n = src->length()) < copyRemaining &&
n < tail->tailroom()) {
}
}
-} // anonymous namespace
+} // namespace
namespace folly {
IOBufQueue::IOBufQueue(const Options& options)
- : options_(options),
- chainLength_(0) {
+ : options_(options), cachePtr_(&localCache_) {
+ localCache_.attached = true;
}
-IOBufQueue::IOBufQueue(IOBufQueue&& other)
- : options_(other.options_),
- chainLength_(other.chainLength_),
- head_(std::move(other.head_)) {
+IOBufQueue::~IOBufQueue() {
+ clearWritableRangeCache();
+}
+
+IOBufQueue::IOBufQueue(IOBufQueue&& other) noexcept
+ : options_(other.options_), cachePtr_(&localCache_) {
+ other.clearWritableRangeCache();
+ head_ = std::move(other.head_);
+ chainLength_ = other.chainLength_;
+
+ tailStart_ = other.tailStart_;
+ localCache_.cachedRange = other.localCache_.cachedRange;
+ localCache_.attached = true;
+
other.chainLength_ = 0;
+ other.tailStart_ = nullptr;
+ other.localCache_.cachedRange = {nullptr, nullptr};
}
IOBufQueue& IOBufQueue::operator=(IOBufQueue&& other) {
if (&other != this) {
+ other.clearWritableRangeCache();
+ clearWritableRangeCache();
+
options_ = other.options_;
- chainLength_ = other.chainLength_;
head_ = std::move(other.head_);
+ chainLength_ = other.chainLength_;
+
+ tailStart_ = other.tailStart_;
+ localCache_.cachedRange = other.localCache_.cachedRange;
+ localCache_.attached = true;
+
other.chainLength_ = 0;
+ other.tailStart_ = nullptr;
+ other.localCache_.cachedRange = {nullptr, nullptr};
}
return *this;
}
-std::pair<void*, uint32_t>
+std::pair<void*, uint64_t>
IOBufQueue::headroom() {
+ // Note, headroom is independent from the tail, so we don't need to flush the
+ // cache.
if (head_) {
return std::make_pair(head_->writableBuffer(), head_->headroom());
} else {
}
void
-IOBufQueue::markPrepended(uint32_t n) {
+IOBufQueue::markPrepended(uint64_t n) {
if (n == 0) {
return;
}
+ // Note, headroom is independent from the tail, so we don't need to flush the
+ // cache.
assert(head_);
head_->prepend(n);
- if (options_.cacheChainLength) {
- chainLength_ += n;
- }
+ chainLength_ += n;
}
void
-IOBufQueue::prepend(const void* buf, uint32_t n) {
- auto p = headroom();
- if (n > p.second) {
+IOBufQueue::prepend(const void* buf, uint64_t n) {
+ // We're not touching the tail, so we don't need to flush the cache.
+ auto hroom = head_->headroom();
+ if (!head_ || hroom < n) {
throw std::overflow_error("Not enough room to prepend");
}
- memcpy(static_cast<char*>(p.first) + p.second - n, buf, n);
- markPrepended(n);
+ memcpy(head_->writableBuffer() + hroom - n, buf, n);
+ head_->prepend(n);
+ chainLength_ += n;
}
void
if (!buf) {
return;
}
+ auto guard = updateGuard();
if (options_.cacheChainLength) {
chainLength_ += buf->computeChainDataLength();
}
if (!other.head_) {
return;
}
+ // We're going to chain other, thus we need to grab both guards.
+ auto otherGuard = other.updateGuard();
+ auto guard = updateGuard();
if (options_.cacheChainLength) {
if (other.options_.cacheChainLength) {
chainLength_ += other.chainLength_;
void
IOBufQueue::append(const void* buf, size_t len) {
+ auto guard = updateGuard();
auto src = static_cast<const uint8_t*>(buf);
while (len != 0) {
if ((head_ == nullptr) || head_->prev()->isSharedOne() ||
(head_->prev()->tailroom() == 0)) {
- appendToChain(head_, std::move(
+ appendToChain(head_,
IOBuf::create(std::max(MIN_ALLOC_SIZE,
- std::min(len, MAX_ALLOC_SIZE)))),
+ std::min(len, MAX_ALLOC_SIZE))),
false);
}
IOBuf* last = head_->prev();
- uint32_t copyLen = std::min(len, (size_t)last->tailroom());
+ uint64_t copyLen = std::min(len, (size_t)last->tailroom());
memcpy(last->writableTail(), src, copyLen);
src += copyLen;
last->append(copyLen);
- if (options_.cacheChainLength) {
- chainLength_ += copyLen;
- }
+ chainLength_ += copyLen;
len -= copyLen;
}
}
void
-IOBufQueue::wrapBuffer(const void* buf, size_t len, uint32_t blockSize) {
+IOBufQueue::wrapBuffer(const void* buf, size_t len, uint64_t blockSize) {
auto src = static_cast<const uint8_t*>(buf);
while (len != 0) {
size_t n = std::min(len, size_t(blockSize));
}
}
-pair<void*,uint32_t>
-IOBufQueue::preallocate(uint32_t min, uint32_t newAllocationSize,
- uint32_t max) {
- if (head_ != nullptr) {
- // If there's enough space left over at the end of the queue, use that.
- IOBuf* last = head_->prev();
- if (!last->isSharedOne()) {
- uint32_t avail = last->tailroom();
- if (avail >= min) {
- return make_pair(last->writableTail(), std::min(max, avail));
- }
- }
- }
+pair<void*,uint64_t>
+IOBufQueue::preallocateSlow(uint64_t min, uint64_t newAllocationSize,
+ uint64_t max) {
+ // Avoid grabbing update guard, since we're manually setting the cache ptrs.
+ flushCache();
// Allocate a new buffer of the requested max size.
unique_ptr<IOBuf> newBuf(IOBuf::create(std::max(min, newAllocationSize)));
- appendToChain(head_, std::move(newBuf), false);
- IOBuf* last = head_->prev();
- return make_pair(last->writableTail(),
- std::min(max, last->tailroom()));
-}
-void
-IOBufQueue::postallocate(uint32_t n) {
- head_->prev()->append(n);
- if (options_.cacheChainLength) {
- chainLength_ += n;
- }
+ tailStart_ = newBuf->writableTail();
+ cachePtr_->cachedRange = std::pair<uint8_t*, uint8_t*>(
+ tailStart_, tailStart_ + newBuf->tailroom());
+ appendToChain(head_, std::move(newBuf), false);
+ return make_pair(writableTail(), std::min<uint64_t>(max, tailroom()));
}
-unique_ptr<IOBuf>
-IOBufQueue::split(size_t n) {
+unique_ptr<IOBuf> IOBufQueue::split(size_t n, bool throwOnUnderflow) {
+ auto guard = updateGuard();
unique_ptr<IOBuf> result;
while (n != 0) {
if (head_ == nullptr) {
- throw std::underflow_error(
- "Attempt to remove more bytes than are present in IOBufQueue");
+ if (throwOnUnderflow) {
+ throw std::underflow_error(
+ "Attempt to remove more bytes than are present in IOBufQueue");
+ } else {
+ break;
+ }
} else if (head_->length() <= n) {
n -= head_->length();
- if (options_.cacheChainLength) {
- chainLength_ -= head_->length();
- }
+ chainLength_ -= head_->length();
unique_ptr<IOBuf> remainder = head_->pop();
appendToChain(result, std::move(head_), false);
head_ = std::move(remainder);
clone->trimEnd(clone->length() - n);
appendToChain(result, std::move(clone), false);
head_->trimStart(n);
- if (options_.cacheChainLength) {
- chainLength_ -= n;
- }
+ chainLength_ -= n;
break;
}
}
- return std::move(result);
+ if (UNLIKELY(result == nullptr)) {
+ return IOBuf::create(0);
+ }
+ return result;
}
void IOBufQueue::trimStart(size_t amount) {
+ auto trimmed = trimStartAtMost(amount);
+ if (trimmed != amount) {
+ throw std::underflow_error(
+ "Attempt to trim more bytes than are present in IOBufQueue");
+ }
+}
+
+size_t IOBufQueue::trimStartAtMost(size_t amount) {
+ auto guard = updateGuard();
+ auto original = amount;
while (amount > 0) {
if (!head_) {
- throw std::underflow_error(
- "Attempt to trim more bytes than are present in IOBufQueue");
+ break;
}
if (head_->length() > amount) {
head_->trimStart(amount);
- if (options_.cacheChainLength) {
- chainLength_ -= amount;
- }
+ chainLength_ -= amount;
+ amount = 0;
break;
}
amount -= head_->length();
- if (options_.cacheChainLength) {
- chainLength_ -= head_->length();
- }
+ chainLength_ -= head_->length();
head_ = head_->pop();
}
+ return original - amount;
}
void IOBufQueue::trimEnd(size_t amount) {
+ auto trimmed = trimEndAtMost(amount);
+ if (trimmed != amount) {
+ throw std::underflow_error(
+ "Attempt to trim more bytes than are present in IOBufQueue");
+ }
+}
+
+size_t IOBufQueue::trimEndAtMost(size_t amount) {
+ auto guard = updateGuard();
+ auto original = amount;
while (amount > 0) {
if (!head_) {
- throw std::underflow_error(
- "Attempt to trim more bytes than are present in IOBufQueue");
+ break;
}
if (head_->prev()->length() > amount) {
head_->prev()->trimEnd(amount);
- if (options_.cacheChainLength) {
- chainLength_ -= amount;
- }
+ chainLength_ -= amount;
+ amount = 0;
break;
}
amount -= head_->prev()->length();
- if (options_.cacheChainLength) {
- chainLength_ -= head_->prev()->length();
- }
+ chainLength_ -= head_->prev()->length();
if (head_->isChained()) {
head_->prev()->unlink();
head_.reset();
}
}
+ return original - amount;
}
std::unique_ptr<folly::IOBuf> IOBufQueue::pop_front() {
+ auto guard = updateGuard();
if (!head_) {
return nullptr;
}
- if (options_.cacheChainLength) {
- chainLength_ -= head_->length();
- }
+ chainLength_ -= head_->length();
std::unique_ptr<folly::IOBuf> retBuf = std::move(head_);
head_ = retBuf->pop();
return retBuf;
}
-} // folly
+void IOBufQueue::clear() {
+ if (!head_) {
+ return;
+ }
+ auto guard = updateGuard();
+ IOBuf* buf = head_.get();
+ do {
+ buf->clear();
+ buf = buf->next();
+ } while (buf != head_.get());
+ chainLength_ = 0;
+}
+
+void IOBufQueue::appendToString(std::string& out) const {
+ if (!head_) {
+ return;
+ }
+ auto len = options_.cacheChainLength
+ ? chainLength_ + (cachePtr_->cachedRange.first - tailStart_)
+ : head_->computeChainDataLength() +
+ (cachePtr_->cachedRange.first - tailStart_);
+ out.reserve(out.size() + len);
+
+ for (auto range : *head_) {
+ out.append(reinterpret_cast<const char*>(range.data()), range.size());
+ }
+
+ if (tailStart_ != cachePtr_->cachedRange.first) {
+ out.append(
+ reinterpret_cast<const char*>(tailStart_),
+ cachePtr_->cachedRange.first - tailStart_);
+ }
+}
+
+void IOBufQueue::gather(uint64_t maxLength) {
+ auto guard = updateGuard();
+ if (head_ != nullptr) {
+ head_->gather(maxLength);
+ }
+}
+
+} // namespace folly