From: Tudor Bosman Date: Wed, 19 Feb 2014 04:20:15 +0000 (-0800) Subject: Make IOBuf support 64-bit length and capacity X-Git-Tag: v0.22.0~692 X-Git-Url: http://plrg.eecs.uci.edu/git/?a=commitdiff_plain;h=81823a9cb036baed2a3cfe5b352832e6340e6a39;p=folly.git Make IOBuf support 64-bit length and capacity Summary: Remove type_ (unused), pack flags in least significant bits of sharedInfo_. sizeof(IOBuf) remains 56 bytes. Test Plan: folly/io/test with -opt and -dbg; iobuf*_test with asan as well. Reviewed By: simpkins@fb.com FB internal diff: D1179993 --- diff --git a/folly/io/Cursor.h b/folly/io/Cursor.h index 67a86243..bb0e5fdd 100644 --- a/folly/io/Cursor.h +++ b/folly/io/Cursor.h @@ -575,7 +575,7 @@ typedef RWCursor RWUnshareCursor; */ class Appender : public detail::Writable { public: - Appender(IOBuf* buf, uint32_t growth) + Appender(IOBuf* buf, uint64_t growth) : buffer_(buf), crtBuf_(buf->prev()), growth_(growth) { @@ -601,7 +601,7 @@ class Appender : public detail::Writable { * Ensure at least n contiguous bytes available to write. * Postcondition: length() >= n. */ - void ensure(uint32_t n) { + void ensure(uint64_t n) { if (LIKELY(length() >= n)) { return; } @@ -653,7 +653,7 @@ class Appender : public detail::Writable { IOBuf* buffer_; IOBuf* crtBuf_; - uint32_t growth_; + uint64_t growth_; }; class QueueAppender : public detail::Writable { @@ -663,11 +663,11 @@ class QueueAppender : public detail::Writable { * space in the queue, we grow no more than growth bytes at once * (unless you call ensure() with a bigger value yourself). */ - QueueAppender(IOBufQueue* queue, uint32_t growth) { + QueueAppender(IOBufQueue* queue, uint64_t growth) { reset(queue, growth); } - void reset(IOBufQueue* queue, uint32_t growth) { + void reset(IOBufQueue* queue, uint64_t growth) { queue_ = queue; growth_ = growth; } @@ -682,7 +682,7 @@ class QueueAppender : public detail::Writable { // Ensure at least n contiguous; can go above growth_, throws if // not enough room. - void ensure(uint32_t n) { queue_->preallocate(n, growth_); } + void ensure(uint64_t n) { queue_->preallocate(n, growth_); } template typename std::enable_if::value>::type diff --git a/folly/io/IOBuf.cpp b/folly/io/IOBuf.cpp index 3bcef5bd..06bcca46 100644 --- a/folly/io/IOBuf.cpp +++ b/folly/io/IOBuf.cpp @@ -41,7 +41,7 @@ enum : uint16_t { kDataInUse = 0x02, }; -enum : uint32_t { +enum : uint64_t { // When create() is called for buffers less than kDefaultCombinedBufSize, // we allocate a single combined memory segment for the IOBuf and the data // together. See the comments for createCombined()/createSeparate() for more @@ -103,6 +103,11 @@ struct IOBuf::HeapStorage { }; struct IOBuf::HeapFullStorage { + // Make sure jemalloc allocates from the 64-byte class. Putting this here + // because HeapStorage is private so it can't be at namespace level. + static_assert(sizeof(HeapStorage) <= 64, + "IOBuf may not grow over 56 bytes!"); + HeapStorage hs; SharedInfo shared; MaxAlign align; @@ -184,19 +189,20 @@ void IOBuf::freeInternalBuf(void* buf, void* userData) { releaseStorage(storage, kDataInUse); } -IOBuf::IOBuf(CreateOp, uint32_t capacity) +IOBuf::IOBuf(CreateOp, uint64_t capacity) : next_(this), prev_(this), data_(nullptr), length_(0), - flags_(0), - type_(kExtAllocated) { - allocExtBuffer(capacity, &buf_, &sharedInfo_, &capacity_); + flagsAndSharedInfo_(0) { + SharedInfo* info; + allocExtBuffer(capacity, &buf_, &info, &capacity_); + setSharedInfo(info); data_ = buf_; } -IOBuf::IOBuf(CopyBufferOp op, const void* buf, uint32_t size, - uint32_t headroom, uint32_t minTailroom) +IOBuf::IOBuf(CopyBufferOp op, const void* buf, uint64_t size, + uint64_t headroom, uint64_t minTailroom) : IOBuf(CREATE, headroom + size + minTailroom) { advance(headroom); memcpy(writableData(), buf, size); @@ -204,11 +210,11 @@ IOBuf::IOBuf(CopyBufferOp op, const void* buf, uint32_t size, } IOBuf::IOBuf(CopyBufferOp op, ByteRange br, - uint32_t headroom, uint32_t minTailroom) + uint64_t headroom, uint64_t minTailroom) : IOBuf(op, br.data(), br.size(), headroom, minTailroom) { } -unique_ptr IOBuf::create(uint32_t capacity) { +unique_ptr IOBuf::create(uint64_t capacity) { // For smaller-sized buffers, allocate the IOBuf, SharedInfo, and the buffer // all with a single allocation. // @@ -224,7 +230,7 @@ unique_ptr IOBuf::create(uint32_t capacity) { return createSeparate(capacity); } -unique_ptr IOBuf::createCombined(uint32_t capacity) { +unique_ptr IOBuf::createCombined(uint64_t capacity) { // To save a memory allocation, allocate space for the IOBuf object, the // SharedInfo struct, and the data itself all with a single call to malloc(). size_t requiredStorage = offsetof(HeapFullStorage, align) + capacity; @@ -238,17 +244,17 @@ unique_ptr IOBuf::createCombined(uint32_t capacity) { uint8_t* storageEnd = reinterpret_cast(storage) + mallocSize; size_t actualCapacity = storageEnd - bufAddr; unique_ptr ret(new (&storage->hs.buf) IOBuf( - kCombinedAlloc, 0, bufAddr, actualCapacity, - bufAddr, 0, &storage->shared)); + InternalConstructor(), packFlagsAndSharedInfo(0, &storage->shared), + bufAddr, actualCapacity, bufAddr, 0)); return ret; } -unique_ptr IOBuf::createSeparate(uint32_t capacity) { +unique_ptr IOBuf::createSeparate(uint64_t capacity) { return make_unique(CREATE, capacity); } unique_ptr IOBuf::createChain( - size_t totalCapacity, uint32_t maxBufCapacity) { + size_t totalCapacity, uint64_t maxBufCapacity) { unique_ptr out = create( std::min(totalCapacity, size_t(maxBufCapacity))); size_t allocatedCapacity = out->capacity(); @@ -263,7 +269,7 @@ unique_ptr IOBuf::createChain( return out; } -IOBuf::IOBuf(TakeOwnershipOp, void* buf, uint32_t capacity, uint32_t length, +IOBuf::IOBuf(TakeOwnershipOp, void* buf, uint64_t capacity, uint64_t length, FreeFunction freeFn, void* userData, bool freeOnError) : next_(this), @@ -272,18 +278,17 @@ IOBuf::IOBuf(TakeOwnershipOp, void* buf, uint32_t capacity, uint32_t length, buf_(static_cast(buf)), length_(length), capacity_(capacity), - flags_(kFlagFreeSharedInfo), - type_(kExtUserSupplied) { + flagsAndSharedInfo_(packFlagsAndSharedInfo(kFlagFreeSharedInfo, nullptr)) { try { - sharedInfo_ = new SharedInfo(freeFn, userData); + setSharedInfo(new SharedInfo(freeFn, userData)); } catch (...) { takeOwnershipError(freeOnError, buf, freeFn, userData); throw; } } -unique_ptr IOBuf::takeOwnership(void* buf, uint32_t capacity, - uint32_t length, +unique_ptr IOBuf::takeOwnership(void* buf, uint64_t capacity, + uint64_t length, FreeFunction freeFn, void* userData, bool freeOnError) { @@ -305,21 +310,20 @@ unique_ptr IOBuf::takeOwnership(void* buf, uint32_t capacity, } } -IOBuf::IOBuf(WrapBufferOp, const void* buf, uint32_t capacity) - : IOBuf(kExtUserOwned, kFlagUserOwned, +IOBuf::IOBuf(WrapBufferOp, const void* buf, uint64_t capacity) + : IOBuf(InternalConstructor(), 0, // We cast away the const-ness of the buffer here. // This is okay since IOBuf users must use unshare() to create a copy // of this buffer before writing to the buffer. static_cast(const_cast(buf)), capacity, - static_cast(const_cast(buf)), capacity, - nullptr) { + static_cast(const_cast(buf)), capacity) { } IOBuf::IOBuf(WrapBufferOp op, ByteRange br) - : IOBuf(op, br.data(), folly::to(br.size())) { + : IOBuf(op, br.data(), br.size()) { } -unique_ptr IOBuf::wrapBuffer(const void* buf, uint32_t capacity) { +unique_ptr IOBuf::wrapBuffer(const void* buf, uint64_t capacity) { return make_unique(WRAP_BUFFER, buf, capacity); } @@ -330,26 +334,21 @@ IOBuf::IOBuf(IOBuf&& other) noexcept { *this = std::move(other); } -IOBuf::IOBuf(ExtBufTypeEnum type, - uint32_t flags, +IOBuf::IOBuf(InternalConstructor, + uintptr_t flagsAndSharedInfo, uint8_t* buf, - uint32_t capacity, + uint64_t capacity, uint8_t* data, - uint32_t length, - SharedInfo* sharedInfo) + uint64_t length) : next_(this), prev_(this), data_(data), buf_(buf), length_(length), capacity_(capacity), - flags_(flags), - type_(type), - sharedInfo_(sharedInfo) { + flagsAndSharedInfo_(flagsAndSharedInfo) { assert(data >= buf); assert(data + length <= buf + capacity); - assert(static_cast(flags & kFlagUserOwned) == - (sharedInfo == NULL)); } IOBuf::~IOBuf() { @@ -381,17 +380,13 @@ IOBuf& IOBuf::operator=(IOBuf&& other) noexcept { buf_ = other.buf_; length_ = other.length_; capacity_ = other.capacity_; - flags_ = other.flags_; - type_ = other.type_; - sharedInfo_ = other.sharedInfo_; + flagsAndSharedInfo_ = other.flagsAndSharedInfo_; // Reset other so it is a clean state to be destroyed. other.data_ = nullptr; other.buf_ = nullptr; other.length_ = 0; other.capacity_ = 0; - other.flags_ = kFlagUserOwned; - other.type_ = kExtUserOwned; - other.sharedInfo_ = nullptr; + other.flagsAndSharedInfo_ = 0; // If other was part of the chain, assume ownership of the rest of its chain. // (It's only valid to perform move assignment on the head of a chain.) @@ -408,7 +403,6 @@ IOBuf& IOBuf::operator=(IOBuf&& other) noexcept { // Sanity check to make sure that other is in a valid state to be destroyed. DCHECK_EQ(other.prev_, &other); DCHECK_EQ(other.next_, &other); - DCHECK(other.flags_ & kFlagUserOwned); return *this; } @@ -424,8 +418,8 @@ bool IOBuf::empty() const { return true; } -uint32_t IOBuf::countChainElements() const { - uint32_t numElements = 1; +size_t IOBuf::countChainElements() const { + size_t numElements = 1; for (IOBuf* current = next_; current != this; current = current->next_) { ++numElements; } @@ -482,15 +476,15 @@ void IOBuf::cloneInto(IOBuf& other) const { } void IOBuf::cloneOneInto(IOBuf& other) const { - if (sharedInfo_) { - flags_ |= kFlagMaybeShared; + SharedInfo* info = sharedInfo(); + if (info) { + setFlags(kFlagMaybeShared); } - other = IOBuf(static_cast(type_), - flags_, buf_, capacity_, - data_, length_, - sharedInfo_); - if (sharedInfo_) { - sharedInfo_->refcount.fetch_add(1, std::memory_order_acq_rel); + other = IOBuf(InternalConstructor(), + flagsAndSharedInfo_, buf_, capacity_, + data_, length_); + if (info) { + info->refcount.fetch_add(1, std::memory_order_acq_rel); } } @@ -498,25 +492,23 @@ void IOBuf::unshareOneSlow() { // Allocate a new buffer for the data uint8_t* buf; SharedInfo* sharedInfo; - uint32_t actualCapacity; + uint64_t actualCapacity; allocExtBuffer(capacity_, &buf, &sharedInfo, &actualCapacity); // Copy the data // Maintain the same amount of headroom. Since we maintained the same // minimum capacity we also maintain at least the same amount of tailroom. - uint32_t headlen = headroom(); + uint64_t headlen = headroom(); memcpy(buf + headlen, data_, length_); // Release our reference on the old buffer decrementRefcount(); - // Make sure kFlagUserOwned, kFlagMaybeShared, and kFlagFreeSharedInfo - // are all cleared. - flags_ = 0; + // Make sure kFlagMaybeShared and kFlagFreeSharedInfo are all cleared. + setFlagsAndSharedInfo(0, sharedInfo); // Update the buffer pointers to point to the new buffer data_ = buf + headlen; buf_ = buf; - sharedInfo_ = sharedInfo; } void IOBuf::unshareChained() { @@ -601,7 +593,7 @@ void IOBuf::coalesceAndReallocate(size_t newHeadroom, // internal buffer before. uint8_t* newBuf; SharedInfo* newInfo; - uint32_t actualCapacity; + uint64_t actualCapacity; allocExtBuffer(newCapacity, &newBuf, &newInfo, &actualCapacity); // Copy the data into the new buffer @@ -621,14 +613,11 @@ void IOBuf::coalesceAndReallocate(size_t newHeadroom, // Point at the new buffer decrementRefcount(); - // Make sure kFlagUserOwned, kFlagMaybeShared, and kFlagFreeSharedInfo - // are all cleared. - flags_ = 0; + // Make sure kFlagMaybeShared and kFlagFreeSharedInfo are all cleared. + setFlagsAndSharedInfo(0, newInfo); capacity_ = actualCapacity; - type_ = kExtAllocated; buf_ = newBuf; - sharedInfo_ = newInfo; data_ = newData; length_ = newLength; @@ -643,13 +632,13 @@ void IOBuf::coalesceAndReallocate(size_t newHeadroom, void IOBuf::decrementRefcount() { // Externally owned buffers don't have a SharedInfo object and aren't managed // by the reference count - if (flags_ & kFlagUserOwned) { - assert(sharedInfo_ == nullptr); + SharedInfo* info = sharedInfo(); + if (!info) { return; } // Decrement the refcount - uint32_t newcnt = sharedInfo_->refcount.fetch_sub( + uint32_t newcnt = info->refcount.fetch_sub( 1, std::memory_order_acq_rel); // Note that fetch_sub() returns the value before we decremented. // If it is 1, we were the only remaining user; if it is greater there are @@ -671,12 +660,12 @@ void IOBuf::decrementRefcount() { // takeOwnership() store the user's free function with its allocated // SharedInfo object.) However, handling this specially with a flag seems // like it shouldn't be problematic. - if (flags_ & kFlagFreeSharedInfo) { - delete sharedInfo_; + if (flags() & kFlagFreeSharedInfo) { + delete sharedInfo(); } } -void IOBuf::reserveSlow(uint32_t minHeadroom, uint32_t minTailroom) { +void IOBuf::reserveSlow(uint64_t minHeadroom, uint64_t minTailroom) { size_t newCapacity = (size_t)length_ + minHeadroom + minTailroom; DCHECK_LT(newCapacity, UINT32_MAX); @@ -708,13 +697,14 @@ void IOBuf::reserveSlow(uint32_t minHeadroom, uint32_t minTailroom) { size_t newAllocatedCapacity = goodExtBufferSize(newCapacity); uint8_t* newBuffer = nullptr; - uint32_t newHeadroom = 0; - uint32_t oldHeadroom = headroom(); + uint64_t newHeadroom = 0; + uint64_t oldHeadroom = headroom(); // If we have a buffer allocated with malloc and we just need more tailroom, // try to use realloc()/rallocm() to grow the buffer in place. - if ((flags_ & kFlagUserOwned) == 0 && (sharedInfo_->freeFn == nullptr) && - length_ != 0 && oldHeadroom >= minHeadroom) { + SharedInfo* info = sharedInfo(); + if (info && (info->freeFn == nullptr) && length_ != 0 && + oldHeadroom >= minHeadroom) { if (usingJEMalloc()) { size_t headSlack = oldHeadroom - minHeadroom; // We assume that tailroom is more useful and more important than @@ -767,35 +757,33 @@ void IOBuf::reserveSlow(uint32_t minHeadroom, uint32_t minTailroom) { } newBuffer = static_cast(p); memcpy(newBuffer + minHeadroom, data_, length_); - if ((flags_ & kFlagUserOwned) == 0) { + if (sharedInfo()) { freeExtBuffer(); } newHeadroom = minHeadroom; } - SharedInfo* info; - uint32_t cap; + uint64_t cap; initExtBuffer(newBuffer, newAllocatedCapacity, &info, &cap); - if (flags_ & kFlagFreeSharedInfo) { - delete sharedInfo_; + if (flags() & kFlagFreeSharedInfo) { + delete sharedInfo(); } - flags_ = 0; + setFlagsAndSharedInfo(0, info); capacity_ = cap; - type_ = kExtAllocated; buf_ = newBuffer; - sharedInfo_ = info; data_ = newBuffer + newHeadroom; // length_ is unchanged } void IOBuf::freeExtBuffer() { - DCHECK((flags_ & kFlagUserOwned) == 0); + SharedInfo* info = sharedInfo(); + DCHECK(info); - if (sharedInfo_->freeFn) { + if (info->freeFn) { try { - sharedInfo_->freeFn(buf_, sharedInfo_->userData); + info->freeFn(buf_, info->userData); } catch (...) { // The user's free function should never throw. Otherwise we might // throw from the IOBuf destructor. Other code paths like coalesce() @@ -807,10 +795,10 @@ void IOBuf::freeExtBuffer() { } } -void IOBuf::allocExtBuffer(uint32_t minCapacity, +void IOBuf::allocExtBuffer(uint64_t minCapacity, uint8_t** bufReturn, SharedInfo** infoReturn, - uint32_t* capacityReturn) { + uint64_t* capacityReturn) { size_t mallocSize = goodExtBufferSize(minCapacity); uint8_t* buf = static_cast(malloc(mallocSize)); if (UNLIKELY(buf == NULL)) { @@ -820,7 +808,7 @@ void IOBuf::allocExtBuffer(uint32_t minCapacity, *bufReturn = buf; } -size_t IOBuf::goodExtBufferSize(uint32_t minCapacity) { +size_t IOBuf::goodExtBufferSize(uint64_t minCapacity) { // Determine how much space we should allocate. We'll store the SharedInfo // for the external buffer just after the buffer itself. (We store it just // after the buffer rather than just before so that the code can still just @@ -838,34 +826,25 @@ size_t IOBuf::goodExtBufferSize(uint32_t minCapacity) { void IOBuf::initExtBuffer(uint8_t* buf, size_t mallocSize, SharedInfo** infoReturn, - uint32_t* capacityReturn) { + uint64_t* capacityReturn) { // Find the SharedInfo storage at the end of the buffer // and construct the SharedInfo. uint8_t* infoStart = (buf + mallocSize) - sizeof(SharedInfo); SharedInfo* sharedInfo = new(infoStart) SharedInfo; - size_t actualCapacity = infoStart - buf; - // On the unlikely possibility that the actual capacity is larger than can - // fit in a uint32_t after adding room for the refcount and calling - // goodMallocSize(), truncate downwards if necessary. - if (actualCapacity >= UINT32_MAX) { - *capacityReturn = UINT32_MAX; - } else { - *capacityReturn = actualCapacity; - } - + *capacityReturn = infoStart - buf; *infoReturn = sharedInfo; } fbstring IOBuf::moveToFbString() { // malloc-allocated buffers are just fine, everything else needs // to be turned into one. - if ((flags_ & kFlagUserOwned) || // user owned, not ours to give up - sharedInfo_->freeFn != nullptr || // not malloc()-ed - headroom() != 0 || // malloc()-ed block doesn't start at beginning - tailroom() == 0 || // no room for NUL terminator - isShared() || // shared - isChained()) { // chained + if (!sharedInfo() || // user owned, not ours to give up + sharedInfo()->freeFn || // not malloc()-ed + headroom() != 0 || // malloc()-ed block doesn't start at beginning + tailroom() == 0 || // no room for NUL terminator + isShared() || // shared + isChained()) { // chained // We might as well get rid of all head and tailroom if we're going // to reallocate; we need 1 byte for NUL terminator. coalesceAndReallocate(0, computeChainDataLength(), this, 1); @@ -877,13 +856,12 @@ fbstring IOBuf::moveToFbString() { length(), capacity(), AcquireMallocatedString()); - if (flags_ & kFlagFreeSharedInfo) { - delete sharedInfo_; + if (flags() & kFlagFreeSharedInfo) { + delete sharedInfo(); } // Reset to a state where we can be deleted cleanly - flags_ = kFlagUserOwned; - sharedInfo_ = nullptr; + flagsAndSharedInfo_ = 0; buf_ = nullptr; clear(); return str; diff --git a/folly/io/IOBuf.h b/folly/io/IOBuf.h index ba216061..40229343 100644 --- a/folly/io/IOBuf.h +++ b/folly/io/IOBuf.h @@ -251,8 +251,8 @@ class IOBuf { * * Throws std::bad_alloc on error. */ - static std::unique_ptr create(uint32_t capacity); - IOBuf(CreateOp, uint32_t capacity); + static std::unique_ptr create(uint64_t capacity); + IOBuf(CreateOp, uint64_t capacity); /** * Create a new IOBuf, using a single memory allocation to allocate space @@ -264,7 +264,7 @@ class IOBuf { * IOBuf object itself is also freed. (It can also be slightly wasteful in * some cases where you clone this IOBuf and then free the original IOBuf.) */ - static std::unique_ptr createCombined(uint32_t capacity); + static std::unique_ptr createCombined(uint64_t capacity); /** * Create a new IOBuf, using separate memory allocations for the IOBuf object @@ -273,14 +273,14 @@ class IOBuf { * This requires two memory allocations, but saves space in the long run * if you know that you will need to reallocate the data buffer later. */ - static std::unique_ptr createSeparate(uint32_t capacity); + static std::unique_ptr createSeparate(uint64_t capacity); /** * Allocate a new IOBuf chain with the requested total capacity, allocating * no more than maxBufCapacity to each buffer. */ static std::unique_ptr createChain( - size_t totalCapacity, uint32_t maxBufCapacity); + size_t totalCapacity, uint64_t maxBufCapacity); /** * Create a new IOBuf pointing to an existing data buffer. @@ -306,24 +306,24 @@ class IOBuf { * On error, std::bad_alloc will be thrown. If freeOnError is true (the * default) the buffer will be freed before throwing the error. */ - static std::unique_ptr takeOwnership(void* buf, uint32_t capacity, + static std::unique_ptr takeOwnership(void* buf, uint64_t capacity, FreeFunction freeFn = nullptr, void* userData = nullptr, bool freeOnError = true) { return takeOwnership(buf, capacity, capacity, freeFn, userData, freeOnError); } - IOBuf(TakeOwnershipOp op, void* buf, uint32_t capacity, + IOBuf(TakeOwnershipOp op, void* buf, uint64_t capacity, FreeFunction freeFn = nullptr, void* userData = nullptr, bool freeOnError = true) : IOBuf(op, buf, capacity, capacity, freeFn, userData, freeOnError) {} - static std::unique_ptr takeOwnership(void* buf, uint32_t capacity, - uint32_t length, + static std::unique_ptr takeOwnership(void* buf, uint64_t capacity, + uint64_t length, FreeFunction freeFn = nullptr, void* userData = nullptr, bool freeOnError = true); - IOBuf(TakeOwnershipOp, void* buf, uint32_t capacity, uint32_t length, + IOBuf(TakeOwnershipOp, void* buf, uint64_t capacity, uint64_t length, FreeFunction freeFn = nullptr, void* userData = nullptr, bool freeOnError = true); @@ -372,12 +372,11 @@ class IOBuf { * * On error, std::bad_alloc will be thrown. */ - static std::unique_ptr wrapBuffer(const void* buf, uint32_t capacity); + static std::unique_ptr wrapBuffer(const void* buf, uint64_t capacity); static std::unique_ptr wrapBuffer(ByteRange br) { - CHECK_LE(br.size(), std::numeric_limits::max()); return wrapBuffer(br.data(), br.size()); } - IOBuf(WrapBufferOp op, const void* buf, uint32_t capacity); + IOBuf(WrapBufferOp op, const void* buf, uint64_t capacity); IOBuf(WrapBufferOp op, ByteRange br); /** @@ -385,19 +384,18 @@ class IOBuf { * user-supplied buffer, optionally allocating a given amount of * headroom and tailroom. */ - static std::unique_ptr copyBuffer(const void* buf, uint32_t size, - uint32_t headroom=0, - uint32_t minTailroom=0); + static std::unique_ptr copyBuffer(const void* buf, uint64_t size, + uint64_t headroom=0, + uint64_t minTailroom=0); static std::unique_ptr copyBuffer(ByteRange br, - uint32_t headroom=0, - uint32_t minTailroom=0) { - CHECK_LE(br.size(), std::numeric_limits::max()); + uint64_t headroom=0, + uint64_t minTailroom=0) { return copyBuffer(br.data(), br.size(), headroom, minTailroom); } - IOBuf(CopyBufferOp op, const void* buf, uint32_t size, - uint32_t headroom=0, uint32_t minTailroom=0); + IOBuf(CopyBufferOp op, const void* buf, uint64_t size, + uint64_t headroom=0, uint64_t minTailroom=0); IOBuf(CopyBufferOp op, ByteRange br, - uint32_t headroom=0, uint32_t minTailroom=0); + uint64_t headroom=0, uint64_t minTailroom=0); /** * Convenience function to create a new IOBuf object that copies data from a @@ -411,10 +409,10 @@ class IOBuf { * copyBuffer() above, with the size argument of 3. */ static std::unique_ptr copyBuffer(const std::string& buf, - uint32_t headroom=0, - uint32_t minTailroom=0); + uint64_t headroom=0, + uint64_t minTailroom=0); IOBuf(CopyBufferOp op, const std::string& buf, - uint32_t headroom=0, uint32_t minTailroom=0) + uint64_t headroom=0, uint64_t minTailroom=0) : IOBuf(op, buf.data(), buf.size(), headroom, minTailroom) {} /** @@ -422,8 +420,8 @@ class IOBuf { * is empty. */ static std::unique_ptr maybeCopyBuffer(const std::string& buf, - uint32_t headroom=0, - uint32_t minTailroom=0); + uint64_t headroom=0, + uint64_t minTailroom=0); /** * Convenience function to free a chain of IOBufs held by a unique_ptr. @@ -494,7 +492,7 @@ class IOBuf { /** * Get the data length. */ - uint32_t length() const { + uint64_t length() const { return length_; } @@ -503,7 +501,7 @@ class IOBuf { * * Returns the number of bytes in the buffer before the start of the data. */ - uint32_t headroom() const { + uint64_t headroom() const { return data_ - buffer(); } @@ -512,7 +510,7 @@ class IOBuf { * * Returns the number of bytes in the buffer after the end of the data. */ - uint32_t tailroom() const { + uint64_t tailroom() const { return bufferEnd() - tail(); } @@ -554,7 +552,7 @@ class IOBuf { * This returns the total usable length of the buffer. Use the length() * method to get the length of the actual valid data in this IOBuf. */ - uint32_t capacity() const { + uint64_t capacity() const { return capacity_; } @@ -593,7 +591,7 @@ class IOBuf { * for making sure the buffer is unshared, so it will not affect other IOBufs * that may be sharing the same underlying buffer. */ - void advance(uint32_t amount) { + void advance(uint64_t amount) { // In debug builds, assert if there is a problem. assert(amount <= tailroom()); @@ -614,7 +612,7 @@ class IOBuf { * for making sure the buffer is unshared, so it will not affect other IOBufs * that may be sharing the same underlying buffer. */ - void retreat(uint32_t amount) { + void retreat(uint64_t amount) { // In debug builds, assert if there is a problem. assert(amount <= headroom()); @@ -634,7 +632,7 @@ class IOBuf { * * This does not modify any actual data in the buffer. */ - void prepend(uint32_t amount) { + void prepend(uint64_t amount) { DCHECK_LE(amount, headroom()); data_ -= amount; length_ += amount; @@ -650,7 +648,7 @@ class IOBuf { * * This does not modify any actual data in the buffer. */ - void append(uint32_t amount) { + void append(uint64_t amount) { DCHECK_LE(amount, tailroom()); length_ += amount; } @@ -664,7 +662,7 @@ class IOBuf { * * This does not modify any actual data in the buffer. */ - void trimStart(uint32_t amount) { + void trimStart(uint64_t amount) { DCHECK_LE(amount, length_); data_ += amount; length_ -= amount; @@ -679,7 +677,7 @@ class IOBuf { * * This does not modify any actual data in the buffer. */ - void trimEnd(uint32_t amount) { + void trimEnd(uint64_t amount) { DCHECK_LE(amount, length_); length_ -= amount; } @@ -702,7 +700,7 @@ class IOBuf { * Postcondition: headroom() >= minHeadroom, tailroom() >= minTailroom, * the data (between data() and data() + length()) is preserved. */ - void reserve(uint32_t minHeadroom, uint32_t minTailroom) { + void reserve(uint64_t minHeadroom, uint64_t minTailroom) { // Maybe we don't need to do anything. if (headroom() >= minHeadroom && tailroom() >= minTailroom) { return; @@ -734,7 +732,7 @@ class IOBuf { * Use isChained() if you just want to check if this IOBuf is part of a chain * or not. */ - uint32_t countChainElements() const; + size_t countChainElements() const; /** * Get the length of all the data in this IOBuf chain. @@ -888,23 +886,22 @@ class IOBuf { * This only checks the current IOBuf, and not other IOBufs in the chain. */ bool isSharedOne() const { - if (LIKELY(flags_ & (kFlagUserOwned | kFlagMaybeShared)) == 0) { - return false; - } - // If this is a user-owned buffer, it is always considered shared - if (flags_ & kFlagUserOwned) { + if (UNLIKELY(!sharedInfo())) { return true; } + if (LIKELY(!(flags() & kFlagMaybeShared))) { + return false; + } + // kFlagMaybeShared is set, so we need to check the reference count. // (Checking the reference count requires an atomic operation, which is why // we prefer to only check kFlagMaybeShared if possible.) - DCHECK(flags_ & kFlagMaybeShared); - bool shared = sharedInfo_->refcount.load(std::memory_order_acquire) > 1; + bool shared = sharedInfo()->refcount.load(std::memory_order_acquire) > 1; if (!shared) { // we're the last one left - flags_ &= ~kFlagMaybeShared; + clearFlags(kFlagMaybeShared); } return shared; } @@ -966,8 +963,7 @@ class IOBuf { * in the chain. * * Throws std::bad_alloc on error. On error the IOBuf chain will be - * unmodified. Throws std::overflow_error if the length of the entire chain - * larger than can be described by a uint32_t capacity. + * unmodified. * * Returns ByteRange that points to the data IOBuf stores. */ @@ -991,17 +987,13 @@ class IOBuf { * * Throws std::bad_alloc or std::overflow_error on error. On error the IOBuf * chain will be unmodified. Throws std::overflow_error if maxLength is - * longer than the total chain length, or if the length of the coalesced - * portion of the chain is larger than can be described by a uint32_t - * capacity. (Although maxLength is uint32_t, gather() doesn't split - * buffers, so coalescing whole buffers may result in a capacity that can't - * be described in uint32_t. + * longer than the total chain length. * * Upon return, either enough of the chain was coalesced into a contiguous * region, or the entire chain was coalesced. That is, * length() >= maxLength || !isChained() is true. */ - void gather(uint32_t maxLength) { + void gather(uint64_t maxLength) { if (!isChained() || length_ >= maxLength) { return; } @@ -1110,21 +1102,13 @@ class IOBuf { IOBuf& operator=(IOBuf&& other) noexcept; private: - enum FlagsEnum : uint32_t { - kFlagUserOwned = 0x1, - kFlagFreeSharedInfo = 0x2, - kFlagMaybeShared = 0x4, - }; - - // Values for the type_ field. - // We currently don't really use this for anything, other than to have it - // around for debugging purposes. We store it at the moment just because we - // have the 4 extra bytes that would just be padding otherwise. - enum ExtBufTypeEnum { - kExtAllocated = 0, - kExtUserSupplied = 1, - kExtUserOwned = 2, - kCombinedAlloc = 3, + enum FlagsEnum : uintptr_t { + // Adding any more flags would not work on 32-bit architectures, + // as these flags are stashed in the least significant 2 bits of a + // max-align-aligned pointer. + kFlagFreeSharedInfo = 0x1, + kFlagMaybeShared = 0x2, + kFlagMask = kFlagFreeSharedInfo | kFlagMaybeShared }; struct SharedInfo { @@ -1153,10 +1137,10 @@ class IOBuf { * IOBuf. The IOBuf constructor does not automatically increment the * reference count. */ - IOBuf(ExtBufTypeEnum type, uint32_t flags, - uint8_t* buf, uint32_t capacity, - uint8_t* data, uint32_t length, - SharedInfo* sharedInfo); + struct InternalConstructor {}; // avoid conflicts + IOBuf(InternalConstructor, uintptr_t flagsAndSharedInfo, + uint8_t* buf, uint64_t capacity, + uint8_t* data, uint64_t length); void unshareOneSlow(); void unshareChained(); @@ -1173,17 +1157,17 @@ class IOBuf { coalesceAndReallocate(headroom(), newLength, end, end->prev_->tailroom()); } void decrementRefcount(); - void reserveSlow(uint32_t minHeadroom, uint32_t minTailroom); + void reserveSlow(uint64_t minHeadroom, uint64_t minTailroom); void freeExtBuffer(); - static size_t goodExtBufferSize(uint32_t minCapacity); + static size_t goodExtBufferSize(uint64_t minCapacity); static void initExtBuffer(uint8_t* buf, size_t mallocSize, SharedInfo** infoReturn, - uint32_t* capacityReturn); - static void allocExtBuffer(uint32_t minCapacity, + uint64_t* capacityReturn); + static void allocExtBuffer(uint64_t minCapacity, uint8_t** bufReturn, SharedInfo** infoReturn, - uint32_t* capacityReturn); + uint64_t* capacityReturn); static void releaseStorage(HeapStorage* storage, uint16_t freeFlags); static void freeInternalBuf(void* buf, void* userData); @@ -1209,13 +1193,48 @@ class IOBuf { */ uint8_t* data_{nullptr}; uint8_t* buf_{nullptr}; - uint32_t length_{0}; - uint32_t capacity_{0}; - mutable uint32_t flags_{kFlagUserOwned}; - uint32_t type_{kExtUserOwned}; - // SharedInfo may be NULL if kFlagUserOwned is set. It is non-NULL - // in all other cases. - SharedInfo* sharedInfo_{nullptr}; + uint64_t length_{0}; + uint64_t capacity_{0}; + + // Pack flags in least significant 2 bits, sharedInfo in the rest + mutable uintptr_t flagsAndSharedInfo_{0}; + + static inline uintptr_t packFlagsAndSharedInfo(uintptr_t flags, + SharedInfo* info) { + uintptr_t uinfo = reinterpret_cast(info); + DCHECK_EQ(flags & ~kFlagMask, 0); + DCHECK_EQ(uinfo & kFlagMask, 0); + return flags | uinfo; + } + + inline SharedInfo* sharedInfo() const { + return reinterpret_cast(flagsAndSharedInfo_ & ~kFlagMask); + } + + inline void setSharedInfo(SharedInfo* info) { + uintptr_t uinfo = reinterpret_cast(info); + DCHECK_EQ(uinfo & kFlagMask, 0); + flagsAndSharedInfo_ = (flagsAndSharedInfo_ & kFlagMask) | uinfo; + } + + inline uintptr_t flags() const { + return flagsAndSharedInfo_ & kFlagMask; + } + + // flags_ are changed from const methods + inline void setFlags(uintptr_t flags) const { + DCHECK_EQ(flags & ~kFlagMask, 0); + flagsAndSharedInfo_ |= flags; + } + + inline void clearFlags(uintptr_t flags) const { + DCHECK_EQ(flags & ~kFlagMask, 0); + flagsAndSharedInfo_ &= ~flags; + } + + inline void setFlagsAndSharedInfo(uintptr_t flags, SharedInfo* info) { + flagsAndSharedInfo_ = packFlagsAndSharedInfo(flags, info); + } struct DeleterBase { virtual ~DeleterBase() { } @@ -1251,7 +1270,6 @@ typename std::enable_if::value, std::unique_ptr>::type IOBuf::takeOwnership(UniquePtr&& buf, size_t count) { size_t size = count * sizeof(typename UniquePtr::element_type); - DCHECK_LT(size, size_t(std::numeric_limits::max())); auto deleter = new UniquePtrDeleter(buf.get_deleter()); return takeOwnership(buf.release(), size, @@ -1260,9 +1278,9 @@ IOBuf::takeOwnership(UniquePtr&& buf, size_t count) { } inline std::unique_ptr IOBuf::copyBuffer( - const void* data, uint32_t size, uint32_t headroom, - uint32_t minTailroom) { - uint32_t capacity = headroom + size + minTailroom; + const void* data, uint64_t size, uint64_t headroom, + uint64_t minTailroom) { + uint64_t capacity = headroom + size + minTailroom; std::unique_ptr buf = create(capacity); buf->advance(headroom); memcpy(buf->writableData(), data, size); @@ -1271,14 +1289,14 @@ inline std::unique_ptr IOBuf::copyBuffer( } inline std::unique_ptr IOBuf::copyBuffer(const std::string& buf, - uint32_t headroom, - uint32_t minTailroom) { + uint64_t headroom, + uint64_t minTailroom) { return copyBuffer(buf.data(), buf.size(), headroom, minTailroom); } inline std::unique_ptr IOBuf::maybeCopyBuffer(const std::string& buf, - uint32_t headroom, - uint32_t minTailroom) { + uint64_t headroom, + uint64_t minTailroom) { if (buf.empty()) { return nullptr; } diff --git a/folly/io/IOBufQueue.cpp b/folly/io/IOBufQueue.cpp index 8ad8cfcf..58c1844a 100644 --- a/folly/io/IOBufQueue.cpp +++ b/folly/io/IOBufQueue.cpp @@ -29,7 +29,7 @@ namespace { using folly::IOBuf; const size_t MIN_ALLOC_SIZE = 2000; -const size_t MAX_ALLOC_SIZE = 8000; // Must fit within a uint32_t +const size_t MAX_ALLOC_SIZE = 8000; const size_t MAX_PACK_COPY = 4096; /** @@ -46,7 +46,7 @@ appendToChain(unique_ptr& dst, unique_ptr&& src, bool pack) { // reduce wastage (the tail's tailroom and the head's headroom) when // joining two IOBufQueues together. size_t copyRemaining = MAX_PACK_COPY; - uint32_t n; + uint64_t n; while (src && (n = src->length()) < copyRemaining && n < tail->tailroom()) { @@ -88,7 +88,7 @@ IOBufQueue& IOBufQueue::operator=(IOBufQueue&& other) { return *this; } -std::pair +std::pair IOBufQueue::headroom() { if (head_) { return std::make_pair(head_->writableBuffer(), head_->headroom()); @@ -98,7 +98,7 @@ IOBufQueue::headroom() { } void -IOBufQueue::markPrepended(uint32_t n) { +IOBufQueue::markPrepended(uint64_t n) { if (n == 0) { return; } @@ -108,7 +108,7 @@ IOBufQueue::markPrepended(uint32_t n) { } void -IOBufQueue::prepend(const void* buf, uint32_t n) { +IOBufQueue::prepend(const void* buf, uint64_t n) { auto p = headroom(); if (n > p.second) { throw std::overflow_error("Not enough room to prepend"); @@ -156,7 +156,7 @@ IOBufQueue::append(const void* buf, size_t len) { false); } IOBuf* last = head_->prev(); - uint32_t copyLen = std::min(len, (size_t)last->tailroom()); + uint64_t copyLen = std::min(len, (size_t)last->tailroom()); memcpy(last->writableTail(), src, copyLen); src += copyLen; last->append(copyLen); @@ -166,7 +166,7 @@ IOBufQueue::append(const void* buf, size_t len) { } void -IOBufQueue::wrapBuffer(const void* buf, size_t len, uint32_t blockSize) { +IOBufQueue::wrapBuffer(const void* buf, size_t len, uint64_t blockSize) { auto src = static_cast(buf); while (len != 0) { size_t n = std::min(len, size_t(blockSize)); @@ -176,9 +176,9 @@ IOBufQueue::wrapBuffer(const void* buf, size_t len, uint32_t blockSize) { } } -pair -IOBufQueue::preallocateSlow(uint32_t min, uint32_t newAllocationSize, - uint32_t max) { +pair +IOBufQueue::preallocateSlow(uint64_t min, uint64_t newAllocationSize, + uint64_t max) { // Allocate a new buffer of the requested max size. unique_ptr newBuf(IOBuf::create(std::max(min, newAllocationSize))); appendToChain(head_, std::move(newBuf), false); diff --git a/folly/io/IOBufQueue.h b/folly/io/IOBufQueue.h index f499492e..21dd020e 100644 --- a/folly/io/IOBufQueue.h +++ b/folly/io/IOBufQueue.h @@ -54,18 +54,18 @@ class IOBufQueue { /** * Return a space to prepend bytes and the amount of headroom available. */ - std::pair headroom(); + std::pair headroom(); /** * Indicate that n bytes from the headroom have been used. */ - void markPrepended(uint32_t n); + void markPrepended(uint64_t n); /** * Prepend an existing range; throws std::overflow_error if not enough * room. */ - void prepend(const void* buf, uint32_t n); + void prepend(const void* buf, uint64_t n); /** * Add a buffer or buffer chain to the end of this queue. The @@ -115,7 +115,7 @@ class IOBufQueue { * Importantly, this method may be used to wrap buffers larger than 4GB. */ void wrapBuffer(const void* buf, size_t len, - uint32_t blockSize=(1U << 31)); // default block size: 2GB + uint64_t blockSize=(1U << 31)); // default block size: 2GB /** * Obtain a writable block of contiguous bytes at the end of this @@ -137,9 +137,9 @@ class IOBufQueue { * callback, tell the application how much of the buffer they've * filled with data. */ - std::pair preallocate( - uint32_t min, uint32_t newAllocationSize, - uint32_t max = std::numeric_limits::max()) { + std::pair preallocate( + uint64_t min, uint64_t newAllocationSize, + uint64_t max = std::numeric_limits::max()) { auto buf = tailBuf(); if (LIKELY(buf && buf->tailroom() >= min)) { return std::make_pair(buf->writableTail(), @@ -159,7 +159,7 @@ class IOBufQueue { * invoke any other non-const methods on this IOBufQueue between * the call to preallocate and the call to postallocate(). */ - void postallocate(uint32_t n) { + void postallocate(uint64_t n) { head_->prev()->append(n); chainLength_ += n; } @@ -168,7 +168,7 @@ class IOBufQueue { * Obtain a writable block of n contiguous bytes, allocating more space * if necessary, and mark it as used. The caller can fill it later. */ - void* allocate(uint32_t n) { + void* allocate(uint64_t n) { void* p = preallocate(n, n).first; postallocate(n); return p; @@ -271,8 +271,8 @@ class IOBufQueue { IOBuf* buf = head_->prev(); return LIKELY(!buf->isSharedOne()) ? buf : nullptr; } - std::pair preallocateSlow( - uint32_t min, uint32_t newAllocationSize, uint32_t max); + std::pair preallocateSlow( + uint64_t min, uint64_t newAllocationSize, uint64_t max); static const size_t kChainLengthNotCached = (size_t)-1; /** Not copyable */