X-Git-Url: http://plrg.eecs.uci.edu/git/?p=folly.git;a=blobdiff_plain;f=folly%2Fio%2FIOBuf.cpp;h=3a73baeb30d89ac17a96f57637cb033eacc7c7c7;hp=06bcca464cc1c02842bbfc40657ee40e3d7c7596;hb=a1614feea3f3c0beb75fb2dc43ec45b3e5d57223;hpb=81823a9cb036baed2a3cfe5b352832e6340e6a39 diff --git a/folly/io/IOBuf.cpp b/folly/io/IOBuf.cpp index 06bcca46..3a73baeb 100644 --- a/folly/io/IOBuf.cpp +++ b/folly/io/IOBuf.cpp @@ -1,5 +1,5 @@ /* - * Copyright 2014 Facebook, Inc. + * Copyright 2015 Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,15 +14,19 @@ * limitations under the License. */ +#ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS +#endif -#include "folly/io/IOBuf.h" +#include -#include "folly/Conv.h" -#include "folly/Likely.h" -#include "folly/Malloc.h" -#include "folly/Memory.h" -#include "folly/ScopeGuard.h" +#include +#include +#include +#include +#include +#include +#include #include #include @@ -110,12 +114,12 @@ struct IOBuf::HeapFullStorage { HeapStorage hs; SharedInfo shared; - MaxAlign align; + std::max_align_t align; }; IOBuf::SharedInfo::SharedInfo() - : freeFn(NULL), - userData(NULL) { + : freeFn(nullptr), + userData(nullptr) { // Use relaxed memory ordering here. Since we are creating a new SharedInfo, // no other threads should be referring to it yet. refcount.store(1, std::memory_order_relaxed); @@ -141,9 +145,7 @@ void* IOBuf::operator new(size_t size) { return &(storage->buf); } -void* IOBuf::operator new(size_t size, void* ptr) { - return ptr; -} +void* IOBuf::operator new(size_t /* size */, void* ptr) { return ptr; } void IOBuf::operator delete(void* ptr) { auto* storageAddr = static_cast(ptr) - offsetof(HeapStorage, buf); @@ -184,7 +186,7 @@ void IOBuf::releaseStorage(HeapStorage* storage, uint16_t freeFlags) { } } -void IOBuf::freeInternalBuf(void* buf, void* userData) { +void IOBuf::freeInternalBuf(void* /* buf */, void* userData) { auto* storage = static_cast(userData); releaseStorage(storage, kDataInUse); } @@ -201,9 +203,12 @@ IOBuf::IOBuf(CreateOp, uint64_t capacity) data_ = buf_; } -IOBuf::IOBuf(CopyBufferOp op, const void* buf, uint64_t size, - uint64_t headroom, uint64_t minTailroom) - : IOBuf(CREATE, headroom + size + minTailroom) { +IOBuf::IOBuf(CopyBufferOp /* op */, + const void* buf, + uint64_t size, + uint64_t headroom, + uint64_t minTailroom) + : IOBuf(CREATE, headroom + size + minTailroom) { advance(headroom); memcpy(writableData(), buf, size); append(size); @@ -334,6 +339,10 @@ IOBuf::IOBuf(IOBuf&& other) noexcept { *this = std::move(other); } +IOBuf::IOBuf(const IOBuf& other) { + other.cloneInto(*this); +} + IOBuf::IOBuf(InternalConstructor, uintptr_t flagsAndSharedInfo, uint8_t* buf, @@ -365,6 +374,10 @@ IOBuf::~IOBuf() { } IOBuf& IOBuf::operator=(IOBuf&& other) noexcept { + if (this == &other) { + return *this; + } + // If we are part of a chain, delete the rest of the chain. while (next_ != this) { // Since unlink() returns unique_ptr() and we don't store it, @@ -407,6 +420,13 @@ IOBuf& IOBuf::operator=(IOBuf&& other) noexcept { return *this; } +IOBuf& IOBuf::operator=(const IOBuf& other) { + if (this != &other) { + *this = IOBuf(other); + } + return *this; +} + bool IOBuf::empty() const { const IOBuf* current = this; do { @@ -535,6 +555,19 @@ void IOBuf::unshareChained() { coalesceSlow(); } +void IOBuf::makeManagedChained() { + assert(isChained()); + + IOBuf* current = this; + while (true) { + current->makeManagedOne(); + current = current->next_; + if (current == this) { + break; + } + } +} + void IOBuf::coalesceSlow() { // coalesceSlow() should only be called if we are part of a chain of multiple // IOBufs. The caller should have already verified this. @@ -584,9 +617,6 @@ void IOBuf::coalesceAndReallocate(size_t newHeadroom, IOBuf* end, size_t newTailroom) { uint64_t newCapacity = newLength + newHeadroom + newTailroom; - if (newCapacity > UINT32_MAX) { - throw std::overflow_error("IOBuf chain too large to coalesce"); - } // Allocate space for the coalesced buffer. // We always convert to an external buffer, even if we happened to be an @@ -695,20 +725,21 @@ void IOBuf::reserveSlow(uint64_t minHeadroom, uint64_t minTailroom) { return; } - size_t newAllocatedCapacity = goodExtBufferSize(newCapacity); + size_t newAllocatedCapacity = 0; uint8_t* newBuffer = nullptr; uint64_t newHeadroom = 0; uint64_t oldHeadroom = headroom(); // If we have a buffer allocated with malloc and we just need more tailroom, - // try to use realloc()/rallocm() to grow the buffer in place. + // try to use realloc()/xallocx() to grow the buffer in place. SharedInfo* info = sharedInfo(); if (info && (info->freeFn == nullptr) && length_ != 0 && oldHeadroom >= minHeadroom) { + size_t headSlack = oldHeadroom - minHeadroom; + newAllocatedCapacity = goodExtBufferSize(newCapacity + headSlack); if (usingJEMalloc()) { - size_t headSlack = oldHeadroom - minHeadroom; // We assume that tailroom is more useful and more important than - // headroom (not least because realloc / rallocm allow us to grow the + // headroom (not least because realloc / xallocx allow us to grow the // buffer at the tail, but not at the head) So, if we have more headroom // than we need, we consider that "wasted". We arbitrarily define "too // much" headroom to be 25% of the capacity. @@ -716,23 +747,11 @@ void IOBuf::reserveSlow(uint64_t minHeadroom, uint64_t minTailroom) { size_t allocatedCapacity = capacity() + sizeof(SharedInfo); void* p = buf_; if (allocatedCapacity >= jemallocMinInPlaceExpandable) { - // rallocm can write to its 2nd arg even if it returns - // ALLOCM_ERR_NOT_MOVED. So, we pass a temporary to its 2nd arg and - // update newAllocatedCapacity only on success. - size_t allocatedSize; - int r = rallocm(&p, &allocatedSize, newAllocatedCapacity, - 0, ALLOCM_NO_MOVE); - if (r == ALLOCM_SUCCESS) { + if (xallocx(p, newAllocatedCapacity, 0, 0) == newAllocatedCapacity) { newBuffer = static_cast(p); newHeadroom = oldHeadroom; - newAllocatedCapacity = allocatedSize; - } else if (r == ALLOCM_ERR_OOM) { - // shouldn't happen as we don't actually allocate new memory - // (due to ALLOCM_NO_MOVE) - throw std::bad_alloc(); } - // if ALLOCM_ERR_NOT_MOVED, do nothing, fall back to - // malloc/memcpy/free + // if xallocx failed, do nothing, fall back to malloc/memcpy/free } } } else { // Not using jemalloc @@ -751,6 +770,7 @@ void IOBuf::reserveSlow(uint64_t minHeadroom, uint64_t minTailroom) { // None of the previous reallocation strategies worked (or we're using // an internal buffer). malloc/copy/free. if (newBuffer == nullptr) { + newAllocatedCapacity = goodExtBufferSize(newCapacity); void* p = malloc(newAllocatedCapacity); if (UNLIKELY(p == nullptr)) { throw std::bad_alloc(); @@ -801,7 +821,7 @@ void IOBuf::allocExtBuffer(uint64_t minCapacity, uint64_t* capacityReturn) { size_t mallocSize = goodExtBufferSize(minCapacity); uint8_t* buf = static_cast(malloc(mallocSize)); - if (UNLIKELY(buf == NULL)) { + if (UNLIKELY(buf == nullptr)) { throw std::bad_alloc(); } initExtBuffer(buf, mallocSize, infoReturn, capacityReturn); @@ -878,15 +898,76 @@ IOBuf::Iterator IOBuf::cend() const { folly::fbvector IOBuf::getIov() const { folly::fbvector iov; iov.reserve(countChainElements()); + appendToIov(&iov); + return iov; +} + +void IOBuf::appendToIov(folly::fbvector* iov) const { IOBuf const* p = this; do { // some code can get confused by empty iovs, so skip them if (p->length() > 0) { - iov.push_back({(void*)p->data(), p->length()}); + iov->push_back({(void*)p->data(), folly::to(p->length())}); } p = p->next(); } while (p != this); - return iov; +} + +size_t IOBuf::fillIov(struct iovec* iov, size_t len) const { + IOBuf const* p = this; + size_t i = 0; + while (i < len) { + // some code can get confused by empty iovs, so skip them + if (p->length() > 0) { + iov[i].iov_base = const_cast(p->data()); + iov[i].iov_len = p->length(); + i++; + } + p = p->next(); + if (p == this) { + return i; + } + } + return 0; +} + +size_t IOBufHash::operator()(const IOBuf& buf) const { + folly::hash::SpookyHashV2 hasher; + hasher.Init(0, 0); + io::Cursor cursor(&buf); + for (;;) { + auto p = cursor.peek(); + if (p.second == 0) { + break; + } + hasher.Update(p.first, p.second); + cursor.skip(p.second); + } + uint64_t h1; + uint64_t h2; + hasher.Final(&h1, &h2); + return h1; +} + +bool IOBufEqual::operator()(const IOBuf& a, const IOBuf& b) const { + io::Cursor ca(&a); + io::Cursor cb(&b); + for (;;) { + auto pa = ca.peek(); + auto pb = cb.peek(); + if (pa.second == 0 && pb.second == 0) { + return true; + } else if (pa.second == 0 || pb.second == 0) { + return false; + } + size_t n = std::min(pa.second, pb.second); + DCHECK_GT(n, 0); + if (memcmp(pa.first, pb.first, n)) { + return false; + } + ca.skip(n); + cb.skip(n); + } } } // folly