/*
- * Copyright 2013 Facebook, Inc.
+ * Copyright 2013-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* limitations under the License.
*/
-#ifndef FOLLY_IO_IOBUF_H_
-#define FOLLY_IO_IOBUF_H_
+#pragma once
#include <glog/logging.h>
#include <atomic>
#include <cinttypes>
#include <cstddef>
#include <cstring>
-#include <memory>
#include <limits>
-#include <sys/uio.h>
+#include <memory>
#include <type_traits>
#include <boost/iterator/iterator_facade.hpp>
-#include "folly/FBString.h"
-#include "folly/Range.h"
-#include "folly/FBVector.h"
+#include <folly/FBString.h>
+#include <folly/FBVector.h>
+#include <folly/Portability.h>
+#include <folly/Range.h>
+#include <folly/portability/SysUio.h>
// Ignore shadowing warnings within this file, so includers can use -Wshadow.
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wshadow"
+FOLLY_PUSH_WARNING
+FOLLY_GCC_DISABLE_WARNING("-Wshadow")
namespace folly {
* an IOBuf chain must be heap allocated. (All functions to add nodes to a
* chain require a std::unique_ptr<IOBuf>, which enforces this requrement.)
*
- * Additionally, no copy-constructor or assignment operator currently exists,
- * so stack-allocated IOBufs may only be moved, not copied. (Technically
- * nothing is preventing us from adding a copy constructor and assignment
- * operator. However, it seems like this would add the possibility for some
- * confusion. We would need to determine if these functions would copy just a
- * single buffer, or the entire chain.)
- *
+ * Copying IOBufs is only meaningful for the head of a chain. The entire chain
+ * is cloned; the IOBufs will become shared, and the old and new IOBufs will
+ * refer to the same underlying memory.
*
* IOBuf Sharing
* -------------
*/
namespace detail {
// Is T a unique_ptr<> to a standard-layout type?
-template <class T, class Enable=void> struct IsUniquePtrToSL
- : public std::false_type { };
-template <class T, class D>
-struct IsUniquePtrToSL<
- std::unique_ptr<T, D>,
- typename std::enable_if<std::is_standard_layout<T>::value>::type>
- : public std::true_type { };
-} // namespace detail
+template <typename T>
+struct IsUniquePtrToSL : std::false_type {};
+template <typename T, typename D>
+struct IsUniquePtrToSL<std::unique_ptr<T, D>> : std::is_standard_layout<T> {};
+} // namespace detail
class IOBuf {
public:
*
* Throws std::bad_alloc on error.
*/
- static std::unique_ptr<IOBuf> create(uint32_t capacity);
- IOBuf(CreateOp, uint32_t capacity);
+ static std::unique_ptr<IOBuf> create(uint64_t capacity);
+ IOBuf(CreateOp, uint64_t capacity);
/**
* Create a new IOBuf, using a single memory allocation to allocate space
* IOBuf object itself is also freed. (It can also be slightly wasteful in
* some cases where you clone this IOBuf and then free the original IOBuf.)
*/
- static std::unique_ptr<IOBuf> createCombined(uint32_t capacity);
+ static std::unique_ptr<IOBuf> createCombined(uint64_t capacity);
/**
* Create a new IOBuf, using separate memory allocations for the IOBuf object
* This requires two memory allocations, but saves space in the long run
* if you know that you will need to reallocate the data buffer later.
*/
- static std::unique_ptr<IOBuf> createSeparate(uint32_t capacity);
+ static std::unique_ptr<IOBuf> createSeparate(uint64_t capacity);
/**
* Allocate a new IOBuf chain with the requested total capacity, allocating
* no more than maxBufCapacity to each buffer.
*/
static std::unique_ptr<IOBuf> createChain(
- size_t totalCapacity, uint32_t maxBufCapacity);
+ size_t totalCapacity, uint64_t maxBufCapacity);
/**
* Create a new IOBuf pointing to an existing data buffer.
* On error, std::bad_alloc will be thrown. If freeOnError is true (the
* default) the buffer will be freed before throwing the error.
*/
- static std::unique_ptr<IOBuf> takeOwnership(void* buf, uint32_t capacity,
+ static std::unique_ptr<IOBuf> takeOwnership(void* buf, uint64_t capacity,
FreeFunction freeFn = nullptr,
void* userData = nullptr,
bool freeOnError = true) {
return takeOwnership(buf, capacity, capacity, freeFn,
userData, freeOnError);
}
- IOBuf(TakeOwnershipOp op, void* buf, uint32_t capacity,
+ IOBuf(TakeOwnershipOp op, void* buf, uint64_t capacity,
FreeFunction freeFn = nullptr, void* userData = nullptr,
bool freeOnError = true)
: IOBuf(op, buf, capacity, capacity, freeFn, userData, freeOnError) {}
- static std::unique_ptr<IOBuf> takeOwnership(void* buf, uint32_t capacity,
- uint32_t length,
+ static std::unique_ptr<IOBuf> takeOwnership(void* buf, uint64_t capacity,
+ uint64_t length,
FreeFunction freeFn = nullptr,
void* userData = nullptr,
bool freeOnError = true);
- IOBuf(TakeOwnershipOp, void* buf, uint32_t capacity, uint32_t length,
+ IOBuf(TakeOwnershipOp, void* buf, uint64_t capacity, uint64_t length,
FreeFunction freeFn = nullptr, void* userData = nullptr,
bool freeOnError = true);
*
* On error, std::bad_alloc will be thrown.
*/
- static std::unique_ptr<IOBuf> wrapBuffer(const void* buf, uint32_t capacity);
+ static std::unique_ptr<IOBuf> wrapBuffer(const void* buf, uint64_t capacity);
static std::unique_ptr<IOBuf> wrapBuffer(ByteRange br) {
- CHECK_LE(br.size(), std::numeric_limits<uint32_t>::max());
return wrapBuffer(br.data(), br.size());
}
- IOBuf(WrapBufferOp op, const void* buf, uint32_t capacity);
+
+ /**
+ * Similar to wrapBuffer(), but returns IOBuf by value rather than
+ * heap-allocating it.
+ */
+ static IOBuf wrapBufferAsValue(const void* buf, uint64_t capacity);
+ static IOBuf wrapBufferAsValue(ByteRange br) {
+ return wrapBufferAsValue(br.data(), br.size());
+ }
+
+ IOBuf(WrapBufferOp op, const void* buf, uint64_t capacity);
IOBuf(WrapBufferOp op, ByteRange br);
/**
* user-supplied buffer, optionally allocating a given amount of
* headroom and tailroom.
*/
- static std::unique_ptr<IOBuf> copyBuffer(const void* buf, uint32_t size,
- uint32_t headroom=0,
- uint32_t minTailroom=0);
+ static std::unique_ptr<IOBuf> copyBuffer(const void* buf, uint64_t size,
+ uint64_t headroom=0,
+ uint64_t minTailroom=0);
static std::unique_ptr<IOBuf> copyBuffer(ByteRange br,
- uint32_t headroom=0,
- uint32_t minTailroom=0) {
- CHECK_LE(br.size(), std::numeric_limits<uint32_t>::max());
+ uint64_t headroom=0,
+ uint64_t minTailroom=0) {
return copyBuffer(br.data(), br.size(), headroom, minTailroom);
}
- IOBuf(CopyBufferOp op, const void* buf, uint32_t size,
- uint32_t headroom=0, uint32_t minTailroom=0);
+ IOBuf(CopyBufferOp op, const void* buf, uint64_t size,
+ uint64_t headroom=0, uint64_t minTailroom=0);
IOBuf(CopyBufferOp op, ByteRange br,
- uint32_t headroom=0, uint32_t minTailroom=0);
+ uint64_t headroom=0, uint64_t minTailroom=0);
/**
* Convenience function to create a new IOBuf object that copies data from a
* copyBuffer() above, with the size argument of 3.
*/
static std::unique_ptr<IOBuf> copyBuffer(const std::string& buf,
- uint32_t headroom=0,
- uint32_t minTailroom=0);
+ uint64_t headroom=0,
+ uint64_t minTailroom=0);
IOBuf(CopyBufferOp op, const std::string& buf,
- uint32_t headroom=0, uint32_t minTailroom=0)
+ uint64_t headroom=0, uint64_t minTailroom=0)
: IOBuf(op, buf.data(), buf.size(), headroom, minTailroom) {}
/**
* is empty.
*/
static std::unique_ptr<IOBuf> maybeCopyBuffer(const std::string& buf,
- uint32_t headroom=0,
- uint32_t minTailroom=0);
+ uint64_t headroom=0,
+ uint64_t minTailroom=0);
/**
* Convenience function to free a chain of IOBufs held by a unique_ptr.
/**
* Get the data length.
*/
- uint32_t length() const {
+ uint64_t length() const {
return length_;
}
*
* Returns the number of bytes in the buffer before the start of the data.
*/
- uint32_t headroom() const {
- return data_ - buffer();
+ uint64_t headroom() const {
+ return uint64_t(data_ - buffer());
}
/**
*
* Returns the number of bytes in the buffer after the end of the data.
*/
- uint32_t tailroom() const {
- return bufferEnd() - tail();
+ uint64_t tailroom() const {
+ return uint64_t(bufferEnd() - tail());
}
/**
* This returns the total usable length of the buffer. Use the length()
* method to get the length of the actual valid data in this IOBuf.
*/
- uint32_t capacity() const {
+ uint64_t capacity() const {
return capacity_;
}
* for making sure the buffer is unshared, so it will not affect other IOBufs
* that may be sharing the same underlying buffer.
*/
- void advance(uint32_t amount) {
+ void advance(uint64_t amount) {
// In debug builds, assert if there is a problem.
assert(amount <= tailroom());
* for making sure the buffer is unshared, so it will not affect other IOBufs
* that may be sharing the same underlying buffer.
*/
- void retreat(uint32_t amount) {
+ void retreat(uint64_t amount) {
// In debug builds, assert if there is a problem.
assert(amount <= headroom());
*
* This does not modify any actual data in the buffer.
*/
- void prepend(uint32_t amount) {
+ void prepend(uint64_t amount) {
DCHECK_LE(amount, headroom());
data_ -= amount;
length_ += amount;
*
* This does not modify any actual data in the buffer.
*/
- void append(uint32_t amount) {
+ void append(uint64_t amount) {
DCHECK_LE(amount, tailroom());
length_ += amount;
}
*
* This does not modify any actual data in the buffer.
*/
- void trimStart(uint32_t amount) {
+ void trimStart(uint64_t amount) {
DCHECK_LE(amount, length_);
data_ += amount;
length_ -= amount;
*
* This does not modify any actual data in the buffer.
*/
- void trimEnd(uint32_t amount) {
+ void trimEnd(uint64_t amount) {
DCHECK_LE(amount, length_);
length_ -= amount;
}
* Postcondition: headroom() >= minHeadroom, tailroom() >= minTailroom,
* the data (between data() and data() + length()) is preserved.
*/
- void reserve(uint32_t minHeadroom, uint32_t minTailroom) {
+ void reserve(uint64_t minHeadroom, uint64_t minTailroom) {
// Maybe we don't need to do anything.
if (headroom() >= minHeadroom && tailroom() >= minTailroom) {
return;
* Use isChained() if you just want to check if this IOBuf is part of a chain
* or not.
*/
- uint32_t countChainElements() const;
+ size_t countChainElements() const;
/**
* Get the length of all the data in this IOBuf chain.
}
}
+ /**
+ * Return true if all IOBufs in this chain are managed by the usual
+ * refcounting mechanism (and so the lifetime of the underlying memory
+ * can be extended by clone()).
+ */
+ bool isManaged() const {
+ const IOBuf* current = this;
+ while (true) {
+ if (!current->isManagedOne()) {
+ return false;
+ }
+ current = current->next_;
+ if (current == this) {
+ return true;
+ }
+ }
+ }
+
+ /**
+ * Return true if this IOBuf is managed by the usual refcounting mechanism
+ * (and so the lifetime of the underlying memory can be extended by
+ * cloneOne()).
+ */
+ bool isManagedOne() const {
+ return sharedInfo();
+ }
+
/**
* Return true if other IOBufs are also pointing to the buffer used by this
* IOBuf, and false otherwise.
* This only checks the current IOBuf, and not other IOBufs in the chain.
*/
bool isSharedOne() const {
- if (LIKELY(flags_ & (kFlagUserOwned | kFlagMaybeShared)) == 0) {
- return false;
+ // If this is a user-owned buffer, it is always considered shared
+ if (UNLIKELY(!sharedInfo())) {
+ return true;
}
- // If this is a user-owned buffer, it is always considered shared
- if (flags_ & kFlagUserOwned) {
+ if (UNLIKELY(sharedInfo()->externallyShared)) {
return true;
}
+ if (LIKELY(!(flags() & kFlagMaybeShared))) {
+ return false;
+ }
+
// kFlagMaybeShared is set, so we need to check the reference count.
// (Checking the reference count requires an atomic operation, which is why
// we prefer to only check kFlagMaybeShared if possible.)
- DCHECK(flags_ & kFlagMaybeShared);
- bool shared = sharedInfo_->refcount.load(std::memory_order_acquire) > 1;
+ bool shared = sharedInfo()->refcount.load(std::memory_order_acquire) > 1;
if (!shared) {
// we're the last one left
- flags_ &= ~kFlagMaybeShared;
+ clearFlags(kFlagMaybeShared);
}
return shared;
}
}
}
+ /**
+ * Mark the underlying buffers in this chain as shared with external memory
+ * management mechanism. This will make isShared() always returns true.
+ *
+ * This function is not thread-safe, and only safe to call immediately after
+ * creating an IOBuf, before it has been shared with other threads.
+ */
+ void markExternallyShared();
+
+ /**
+ * Mark the underlying buffer that this IOBuf refers to as shared with
+ * external memory management mechanism. This will make isSharedOne() always
+ * returns true.
+ *
+ * This function is not thread-safe, and only safe to call immediately after
+ * creating an IOBuf, before it has been shared with other threads.
+ */
+ void markExternallySharedOne() {
+ SharedInfo* info = sharedInfo();
+ if (info) {
+ info->externallyShared = true;
+ }
+ }
+
+ /**
+ * Ensure that the memory that IOBufs in this chain refer to will continue to
+ * be allocated for as long as the IOBufs of the chain (or any clone()s
+ * created from this point onwards) is alive.
+ *
+ * This only has an effect for user-owned buffers (created with the
+ * WRAP_BUFFER constructor or wrapBuffer factory function), in which case
+ * those buffers are unshared.
+ */
+ void makeManaged() {
+ if (isChained()) {
+ makeManagedChained();
+ } else {
+ makeManagedOne();
+ }
+ }
+
+ /**
+ * Ensure that the memory that this IOBuf refers to will continue to be
+ * allocated for as long as this IOBuf (or any clone()s created from this
+ * point onwards) is alive.
+ *
+ * This only has an effect for user-owned buffers (created with the
+ * WRAP_BUFFER constructor or wrapBuffer factory function), in which case
+ * those buffers are unshared.
+ */
+ void makeManagedOne() {
+ if (!isManagedOne()) {
+ // We can call the internal function directly; unmanaged implies shared.
+ unshareOneSlow();
+ }
+ }
+
/**
* Coalesce this IOBuf chain into a single buffer.
*
* in the chain.
*
* Throws std::bad_alloc on error. On error the IOBuf chain will be
- * unmodified. Throws std::overflow_error if the length of the entire chain
- * larger than can be described by a uint32_t capacity.
+ * unmodified.
*
* Returns ByteRange that points to the data IOBuf stores.
*/
*
* Throws std::bad_alloc or std::overflow_error on error. On error the IOBuf
* chain will be unmodified. Throws std::overflow_error if maxLength is
- * longer than the total chain length, or if the length of the coalesced
- * portion of the chain is larger than can be described by a uint32_t
- * capacity. (Although maxLength is uint32_t, gather() doesn't split
- * buffers, so coalescing whole buffers may result in a capacity that can't
- * be described in uint32_t.
+ * longer than the total chain length.
*
* Upon return, either enough of the chain was coalesced into a contiguous
* region, or the entire chain was coalesced. That is,
* length() >= maxLength || !isChained() is true.
*/
- void gather(uint32_t maxLength) {
+ void gather(uint64_t maxLength) {
if (!isChained() || length_ >= maxLength) {
return;
}
*/
std::unique_ptr<IOBuf> clone() const;
+ /**
+ * Similar to clone(). But returns IOBuf by value rather than heap-allocating
+ * it.
+ */
+ IOBuf cloneAsValue() const;
+
/**
* Return a new IOBuf with the same data as this IOBuf.
*
*/
std::unique_ptr<IOBuf> cloneOne() const;
+ /**
+ * Similar to cloneOne(). But returns IOBuf by value rather than
+ * heap-allocating it.
+ */
+ IOBuf cloneOneAsValue() const;
+
+ /**
+ * Return a new unchained IOBuf that may share the same data as this chain.
+ *
+ * If the IOBuf chain is not chained then the new IOBuf will point to the same
+ * underlying data buffer as the original chain. Otherwise, it will clone and
+ * coalesce the IOBuf chain.
+ *
+ * The new IOBuf will have at least as much headroom as the first IOBuf in the
+ * chain, and at least as much tailroom as the last IOBuf in the chain.
+ *
+ * Throws std::bad_alloc on error.
+ */
+ std::unique_ptr<IOBuf> cloneCoalesced() const;
+
+ /**
+ * Similar to cloneCoalesced(). But returns IOBuf by value rather than
+ * heap-allocating it.
+ */
+ IOBuf cloneCoalescedAsValue() const;
+
+ /**
+ * Similar to Clone(). But use other as the head node. Other nodes in the
+ * chain (if any) will be allocted on heap.
+ */
+ void cloneInto(IOBuf& other) const {
+ other = cloneAsValue();
+ }
+
+ /**
+ * Similar to CloneOne(). But to fill an existing IOBuf instead of a new
+ * IOBuf.
+ */
+ void cloneOneInto(IOBuf& other) const {
+ other = cloneOneAsValue();
+ }
+
/**
* Return an iovector suitable for e.g. writev()
*
*/
folly::fbvector<struct iovec> getIov() const;
+ /**
+ * Update an existing iovec array with the IOBuf data.
+ *
+ * New iovecs will be appended to the existing vector; anything already
+ * present in the vector will be left unchanged.
+ *
+ * Naturally, the returned iovec data will be invalid if you modify the
+ * buffer chain.
+ */
+ void appendToIov(folly::fbvector<struct iovec>* iov) const;
+
+ /**
+ * Fill an iovec array with the IOBuf data.
+ *
+ * Returns the number of iovec filled. If there are more buffer than
+ * iovec, returns 0. This version is suitable to use with stack iovec
+ * arrays.
+ *
+ * Naturally, the filled iovec data will be invalid if you modify the
+ * buffer chain.
+ */
+ size_t fillIov(struct iovec* iov, size_t len) const;
+
/*
* Overridden operator new and delete.
* These perform specialized memory management to help support
* the head of an IOBuf chain or a solitary IOBuf not part of a chain. If
* the move destination is part of a chain, all other IOBufs in the chain
* will be deleted.
- *
- * (We currently don't provide a copy constructor or assignment operator.
- * The main reason is because it is not clear these operations should copy
- * the entire chain or just the single IOBuf.)
*/
IOBuf(IOBuf&& other) noexcept;
IOBuf& operator=(IOBuf&& other) noexcept;
- private:
- enum FlagsEnum : uint32_t {
- kFlagUserOwned = 0x1,
- kFlagFreeSharedInfo = 0x2,
- kFlagMaybeShared = 0x4,
- };
+ IOBuf(const IOBuf& other);
+ IOBuf& operator=(const IOBuf& other);
- // Values for the type_ field.
- // We currently don't really use this for anything, other than to have it
- // around for debugging purposes. We store it at the moment just because we
- // have the 4 extra bytes that would just be padding otherwise.
- enum ExtBufTypeEnum {
- kExtAllocated = 0,
- kExtUserSupplied = 1,
- kExtUserOwned = 2,
- kCombinedAlloc = 3,
+ private:
+ enum FlagsEnum : uintptr_t {
+ // Adding any more flags would not work on 32-bit architectures,
+ // as these flags are stashed in the least significant 2 bits of a
+ // max-align-aligned pointer.
+ kFlagFreeSharedInfo = 0x1,
+ kFlagMaybeShared = 0x2,
+ kFlagMask = kFlagFreeSharedInfo | kFlagMaybeShared
};
struct SharedInfo {
FreeFunction freeFn;
void* userData;
std::atomic<uint32_t> refcount;
+ bool externallyShared{false};
};
// Helper structs for use by operator new and delete
struct HeapPrefix;
struct HeapStorage;
struct HeapFullStorage;
- // Forbidden copy constructor and assignment opererator
- IOBuf(IOBuf const &);
- IOBuf& operator=(IOBuf const &);
-
/**
* Create a new IOBuf pointing to an external buffer.
*
* IOBuf. The IOBuf constructor does not automatically increment the
* reference count.
*/
- IOBuf(ExtBufTypeEnum type, uint32_t flags,
- uint8_t* buf, uint32_t capacity,
- uint8_t* data, uint32_t length,
- SharedInfo* sharedInfo);
+ struct InternalConstructor {}; // avoid conflicts
+ IOBuf(InternalConstructor, uintptr_t flagsAndSharedInfo,
+ uint8_t* buf, uint64_t capacity,
+ uint8_t* data, uint64_t length);
void unshareOneSlow();
void unshareChained();
+ void makeManagedChained();
void coalesceSlow();
void coalesceSlow(size_t maxLength);
// newLength must be the entire length of the buffers between this and
coalesceAndReallocate(headroom(), newLength, end, end->prev_->tailroom());
}
void decrementRefcount();
- void reserveSlow(uint32_t minHeadroom, uint32_t minTailroom);
+ void reserveSlow(uint64_t minHeadroom, uint64_t minTailroom);
void freeExtBuffer();
- static size_t goodExtBufferSize(uint32_t minCapacity);
+ static size_t goodExtBufferSize(uint64_t minCapacity);
static void initExtBuffer(uint8_t* buf, size_t mallocSize,
SharedInfo** infoReturn,
- uint32_t* capacityReturn);
- static void allocExtBuffer(uint32_t minCapacity,
+ uint64_t* capacityReturn);
+ static void allocExtBuffer(uint64_t minCapacity,
uint8_t** bufReturn,
SharedInfo** infoReturn,
- uint32_t* capacityReturn);
+ uint64_t* capacityReturn);
static void releaseStorage(HeapStorage* storage, uint16_t freeFlags);
static void freeInternalBuf(void* buf, void* userData);
*/
uint8_t* data_{nullptr};
uint8_t* buf_{nullptr};
- uint32_t length_{0};
- uint32_t capacity_{0};
- mutable uint32_t flags_{kFlagUserOwned};
- uint32_t type_{kExtUserOwned};
- // SharedInfo may be NULL if kFlagUserOwned is set. It is non-NULL
- // in all other cases.
- SharedInfo* sharedInfo_{nullptr};
+ uint64_t length_{0};
+ uint64_t capacity_{0};
+
+ // Pack flags in least significant 2 bits, sharedInfo in the rest
+ mutable uintptr_t flagsAndSharedInfo_{0};
+
+ static inline uintptr_t packFlagsAndSharedInfo(uintptr_t flags,
+ SharedInfo* info) {
+ uintptr_t uinfo = reinterpret_cast<uintptr_t>(info);
+ DCHECK_EQ(flags & ~kFlagMask, 0u);
+ DCHECK_EQ(uinfo & kFlagMask, 0u);
+ return flags | uinfo;
+ }
+
+ inline SharedInfo* sharedInfo() const {
+ return reinterpret_cast<SharedInfo*>(flagsAndSharedInfo_ & ~kFlagMask);
+ }
+
+ inline void setSharedInfo(SharedInfo* info) {
+ uintptr_t uinfo = reinterpret_cast<uintptr_t>(info);
+ DCHECK_EQ(uinfo & kFlagMask, 0u);
+ flagsAndSharedInfo_ = (flagsAndSharedInfo_ & kFlagMask) | uinfo;
+ }
+
+ inline uintptr_t flags() const {
+ return flagsAndSharedInfo_ & kFlagMask;
+ }
+
+ // flags_ are changed from const methods
+ inline void setFlags(uintptr_t flags) const {
+ DCHECK_EQ(flags & ~kFlagMask, 0u);
+ flagsAndSharedInfo_ |= flags;
+ }
+
+ inline void clearFlags(uintptr_t flags) const {
+ DCHECK_EQ(flags & ~kFlagMask, 0u);
+ flagsAndSharedInfo_ &= ~flags;
+ }
+
+ inline void setFlagsAndSharedInfo(uintptr_t flags, SharedInfo* info) {
+ flagsAndSharedInfo_ = packFlagsAndSharedInfo(flags, info);
+ }
struct DeleterBase {
virtual ~DeleterBase() { }
typedef typename UniquePtr::deleter_type Deleter;
explicit UniquePtrDeleter(Deleter deleter) : deleter_(std::move(deleter)){ }
- void dispose(void* p) {
+ void dispose(void* p) override {
try {
deleter_(static_cast<Pointer>(p));
delete this;
}
};
+/**
+ * Hasher for IOBuf objects. Hashes the entire chain using SpookyHashV2.
+ */
+struct IOBufHash {
+ size_t operator()(const IOBuf& buf) const;
+ size_t operator()(const std::unique_ptr<IOBuf>& buf) const {
+ return buf ? (*this)(*buf) : 0;
+ }
+};
+
+/**
+ * Equality predicate for IOBuf objects. Compares data in the entire chain.
+ */
+struct IOBufEqual {
+ bool operator()(const IOBuf& a, const IOBuf& b) const;
+ bool operator()(const std::unique_ptr<IOBuf>& a,
+ const std::unique_ptr<IOBuf>& b) const {
+ if (!a && !b) {
+ return true;
+ } else if (!a || !b) {
+ return false;
+ } else {
+ return (*this)(*a, *b);
+ }
+ }
+};
+
template <class UniquePtr>
typename std::enable_if<detail::IsUniquePtrToSL<UniquePtr>::value,
std::unique_ptr<IOBuf>>::type
IOBuf::takeOwnership(UniquePtr&& buf, size_t count) {
size_t size = count * sizeof(typename UniquePtr::element_type);
- DCHECK_LT(size, size_t(std::numeric_limits<uint32_t>::max()));
auto deleter = new UniquePtrDeleter<UniquePtr>(buf.get_deleter());
return takeOwnership(buf.release(),
size,
}
inline std::unique_ptr<IOBuf> IOBuf::copyBuffer(
- const void* data, uint32_t size, uint32_t headroom,
- uint32_t minTailroom) {
- uint32_t capacity = headroom + size + minTailroom;
+ const void* data, uint64_t size, uint64_t headroom,
+ uint64_t minTailroom) {
+ uint64_t capacity = headroom + size + minTailroom;
std::unique_ptr<IOBuf> buf = create(capacity);
buf->advance(headroom);
- memcpy(buf->writableData(), data, size);
+ if (size != 0) {
+ memcpy(buf->writableData(), data, size);
+ }
buf->append(size);
return buf;
}
inline std::unique_ptr<IOBuf> IOBuf::copyBuffer(const std::string& buf,
- uint32_t headroom,
- uint32_t minTailroom) {
+ uint64_t headroom,
+ uint64_t minTailroom) {
return copyBuffer(buf.data(), buf.size(), headroom, minTailroom);
}
inline std::unique_ptr<IOBuf> IOBuf::maybeCopyBuffer(const std::string& buf,
- uint32_t headroom,
- uint32_t minTailroom) {
+ uint64_t headroom,
+ uint64_t minTailroom) {
if (buf.empty()) {
return nullptr;
}
}
}
+ Iterator() {}
+
private:
void setVal() {
val_ = ByteRange(pos_->data(), pos_->tail());
adjustForEnd();
}
- const IOBuf* pos_;
- const IOBuf* end_;
+ const IOBuf* pos_{nullptr};
+ const IOBuf* end_{nullptr};
ByteRange val_;
};
inline IOBuf::Iterator IOBuf::begin() const { return cbegin(); }
inline IOBuf::Iterator IOBuf::end() const { return cend(); }
-} // folly
-
-#pragma GCC diagnostic pop
+} // namespace folly
-#endif // FOLLY_IO_IOBUF_H_
+FOLLY_POP_WARNING