2 * Copyright 2013 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #define __STDC_LIMIT_MACROS
19 #include "folly/io/IOBuf.h"
21 #include "folly/Malloc.h"
22 #include "folly/Likely.h"
29 using std::unique_ptr;
33 const uint32_t IOBuf::kMaxIOBufSize;
34 // Note: Applying offsetof() to an IOBuf is legal according to C++11, since
35 // IOBuf is a standard-layout class. However, this isn't legal with earlier
36 // C++ standards, which require that offsetof() only be used with POD types.
38 // This code compiles with g++ 4.6, but not with g++ 4.4 or earlier versions.
39 const uint32_t IOBuf::kMaxInternalDataSize =
40 kMaxIOBufSize - offsetof(folly::IOBuf, int_.buf);
42 IOBuf::SharedInfo::SharedInfo()
45 // Use relaxed memory ordering here. Since we are creating a new SharedInfo,
46 // no other threads should be referring to it yet.
47 refcount.store(1, std::memory_order_relaxed);
50 IOBuf::SharedInfo::SharedInfo(FreeFunction fn, void* arg)
53 // Use relaxed memory ordering here. Since we are creating a new SharedInfo,
54 // no other threads should be referring to it yet.
55 refcount.store(1, std::memory_order_relaxed);
58 void* IOBuf::operator new(size_t size) {
59 // Since IOBuf::create() manually allocates space for some IOBuf objects
60 // using malloc(), override operator new so that all IOBuf objects are
61 // always allocated using malloc(). This way operator delete can always know
62 // that free() is the correct way to deallocate the memory.
63 void* ptr = malloc(size);
65 // operator new is not allowed to return NULL
66 if (UNLIKELY(ptr == NULL)) {
67 throw std::bad_alloc();
73 void* IOBuf::operator new(size_t size, void* ptr) {
74 assert(size <= kMaxIOBufSize);
78 void IOBuf::operator delete(void* ptr) {
79 // For small buffers, IOBuf::create() manually allocates the space for the
80 // IOBuf object using malloc(). Therefore we override delete to ensure that
81 // the IOBuf space is freed using free() rather than a normal delete.
85 unique_ptr<IOBuf> IOBuf::create(uint32_t capacity) {
86 // If the desired capacity is less than kMaxInternalDataSize,
87 // just allocate a single region large enough for both the IOBuf header and
89 if (capacity <= kMaxInternalDataSize) {
90 void* buf = malloc(kMaxIOBufSize);
91 if (UNLIKELY(buf == NULL)) {
92 throw std::bad_alloc();
95 uint8_t* bufEnd = static_cast<uint8_t*>(buf) + kMaxIOBufSize;
96 unique_ptr<IOBuf> iobuf(new(buf) IOBuf(bufEnd));
97 assert(iobuf->capacity() >= capacity);
101 // Allocate an external buffer
103 SharedInfo* sharedInfo;
104 uint32_t actualCapacity;
105 allocExtBuffer(capacity, &buf, &sharedInfo, &actualCapacity);
107 // Allocate the IOBuf header
109 return unique_ptr<IOBuf>(new IOBuf(kExtAllocated, 0,
119 unique_ptr<IOBuf> IOBuf::takeOwnership(void* buf, uint32_t capacity,
124 SharedInfo* sharedInfo = NULL;
126 sharedInfo = new SharedInfo(freeFn, userData);
128 uint8_t* bufPtr = static_cast<uint8_t*>(buf);
129 return unique_ptr<IOBuf>(new IOBuf(kExtUserSupplied, kFlagFreeSharedInfo,
138 freeFn(buf, userData);
140 // The user's free function is not allowed to throw.
151 unique_ptr<IOBuf> IOBuf::wrapBuffer(const void* buf, uint32_t capacity) {
152 // We cast away the const-ness of the buffer here.
153 // This is okay since IOBuf users must use unshare() to create a copy of
154 // this buffer before writing to the buffer.
155 uint8_t* bufPtr = static_cast<uint8_t*>(const_cast<void*>(buf));
156 return unique_ptr<IOBuf>(new IOBuf(kExtUserSupplied, kFlagUserOwned,
162 IOBuf::IOBuf(uint8_t* end)
168 assert(end - int_.buf == kMaxInternalDataSize);
169 assert(end - reinterpret_cast<uint8_t*>(this) == kMaxIOBufSize);
172 IOBuf::IOBuf(ExtBufTypeEnum type,
178 SharedInfo* sharedInfo)
183 flags_(kFlagExt | flags) {
184 ext_.capacity = capacity;
187 ext_.sharedInfo = sharedInfo;
190 assert(data + length <= buf + capacity);
191 assert(static_cast<bool>(flags & kFlagUserOwned) ==
192 (sharedInfo == NULL));
196 // Destroying an IOBuf destroys the entire chain.
197 // Users of IOBuf should only explicitly delete the head of any chain.
198 // The other elements in the chain will be automatically destroyed.
199 while (next_ != this) {
200 // Since unlink() returns unique_ptr() and we don't store it,
201 // it will automatically delete the unlinked element.
202 (void)next_->unlink();
205 if (flags_ & kFlagExt) {
210 bool IOBuf::empty() const {
211 const IOBuf* current = this;
213 if (current->length() != 0) {
216 current = current->next_;
217 } while (current != this);
221 uint32_t IOBuf::countChainElements() const {
222 uint32_t numElements = 1;
223 for (IOBuf* current = next_; current != this; current = current->next_) {
229 uint64_t IOBuf::computeChainDataLength() const {
230 uint64_t fullLength = length_;
231 for (IOBuf* current = next_; current != this; current = current->next_) {
232 fullLength += current->length_;
237 void IOBuf::prependChain(unique_ptr<IOBuf>&& iobuf) {
238 // Take ownership of the specified IOBuf
239 IOBuf* other = iobuf.release();
241 // Remember the pointer to the tail of the other chain
242 IOBuf* otherTail = other->prev_;
244 // Hook up prev_->next_ to point at the start of the other chain,
245 // and other->prev_ to point at prev_
246 prev_->next_ = other;
247 other->prev_ = prev_;
249 // Hook up otherTail->next_ to point at us,
250 // and prev_ to point back at otherTail,
251 otherTail->next_ = this;
255 unique_ptr<IOBuf> IOBuf::clone() const {
256 unique_ptr<IOBuf> newHead(cloneOne());
258 for (IOBuf* current = next_; current != this; current = current->next_) {
259 newHead->prependChain(current->cloneOne());
265 unique_ptr<IOBuf> IOBuf::cloneOne() const {
266 if (flags_ & kFlagExt) {
267 unique_ptr<IOBuf> iobuf(new IOBuf(static_cast<ExtBufTypeEnum>(ext_.type),
268 flags_, ext_.buf, ext_.capacity,
271 if (ext_.sharedInfo) {
272 ext_.sharedInfo->refcount.fetch_add(1, std::memory_order_acq_rel);
276 // We have an internal data buffer that cannot be shared
277 // Allocate a new IOBuf and copy the data into it.
278 unique_ptr<IOBuf> iobuf(IOBuf::create(kMaxInternalDataSize));
279 assert((iobuf->flags_ & kFlagExt) == 0);
280 iobuf->data_ += headroom();
281 memcpy(iobuf->data_, data_, length_);
282 iobuf->length_ = length_;
287 void IOBuf::unshareOneSlow() {
288 // Internal buffers are always unshared, so unshareOneSlow() can only be
289 // called for external buffers
290 assert(flags_ & kFlagExt);
292 // Allocate a new buffer for the data
294 SharedInfo* sharedInfo;
295 uint32_t actualCapacity;
296 allocExtBuffer(ext_.capacity, &buf, &sharedInfo, &actualCapacity);
299 // Maintain the same amount of headroom. Since we maintained the same
300 // minimum capacity we also maintain at least the same amount of tailroom.
301 uint32_t headlen = headroom();
302 memcpy(buf + headlen, data_, length_);
304 // Release our reference on the old buffer
306 // Make sure kFlagExt is set, and kFlagUserOwned and kFlagFreeSharedInfo
310 // Update the buffer pointers to point to the new buffer
311 data_ = buf + headlen;
313 ext_.sharedInfo = sharedInfo;
316 void IOBuf::unshareChained() {
317 // unshareChained() should only be called if we are part of a chain of
318 // multiple IOBufs. The caller should have already verified this.
321 IOBuf* current = this;
323 if (current->isSharedOne()) {
324 // we have to unshare
328 current = current->next_;
329 if (current == this) {
330 // None of the IOBufs in the chain are shared,
331 // so return without doing anything
336 // We have to unshare. Let coalesceSlow() do the work.
340 void IOBuf::coalesceSlow(size_t maxLength) {
341 // coalesceSlow() should only be called if we are part of a chain of multiple
342 // IOBufs. The caller should have already verified this.
344 assert(length_ < maxLength);
346 // Compute the length of the entire chain
347 uint64_t newLength = 0;
350 newLength += end->length_;
352 } while (newLength < maxLength && end != this);
354 uint64_t newHeadroom = headroom();
355 uint64_t newTailroom = end->prev_->tailroom();
356 coalesceAndReallocate(newHeadroom, newLength, end, newTailroom);
357 // We should be only element left in the chain now
358 assert(length_ >= maxLength || !isChained());
361 void IOBuf::coalesceAndReallocate(size_t newHeadroom,
364 size_t newTailroom) {
365 uint64_t newCapacity = newLength + newHeadroom + newTailroom;
366 if (newCapacity > UINT32_MAX) {
367 throw std::overflow_error("IOBuf chain too large to coalesce");
370 // Allocate space for the coalesced buffer.
371 // We always convert to an external buffer, even if we happened to be an
372 // internal buffer before.
375 uint32_t actualCapacity;
376 allocExtBuffer(newCapacity, &newBuf, &newInfo, &actualCapacity);
378 // Copy the data into the new buffer
379 uint8_t* newData = newBuf + newHeadroom;
380 uint8_t* p = newData;
381 IOBuf* current = this;
382 size_t remaining = newLength;
384 assert(current->length_ <= remaining);
385 remaining -= current->length_;
386 memcpy(p, current->data_, current->length_);
387 p += current->length_;
388 current = current->next_;
389 } while (current != end);
390 assert(remaining == 0);
392 // Point at the new buffer
393 if (flags_ & kFlagExt) {
397 // Make sure kFlagExt is set, and kFlagUserOwned and kFlagFreeSharedInfo
401 ext_.capacity = actualCapacity;
402 ext_.type = kExtAllocated;
404 ext_.sharedInfo = newInfo;
408 // Separate from the rest of our chain.
409 // Since we don't store the unique_ptr returned by separateChain(),
410 // this will immediately delete the returned subchain.
412 (void)separateChain(next_, current->prev_);
416 void IOBuf::decrementRefcount() {
417 assert(flags_ & kFlagExt);
419 // Externally owned buffers don't have a SharedInfo object and aren't managed
420 // by the reference count
421 if (flags_ & kFlagUserOwned) {
422 assert(ext_.sharedInfo == NULL);
426 // Decrement the refcount
427 uint32_t newcnt = ext_.sharedInfo->refcount.fetch_sub(
428 1, std::memory_order_acq_rel);
429 // Note that fetch_sub() returns the value before we decremented.
430 // If it is 1, we were the only remaining user; if it is greater there are
431 // still other users.
436 // We were the last user. Free the buffer
437 if (ext_.sharedInfo->freeFn != NULL) {
439 ext_.sharedInfo->freeFn(ext_.buf, ext_.sharedInfo->userData);
441 // The user's free function should never throw. Otherwise we might
442 // throw from the IOBuf destructor. Other code paths like coalesce()
443 // also assume that decrementRefcount() cannot throw.
450 // Free the SharedInfo if it was allocated separately.
452 // This is only used by takeOwnership().
454 // To avoid this special case handling in decrementRefcount(), we could have
455 // takeOwnership() set a custom freeFn() that calls the user's free function
456 // then frees the SharedInfo object. (This would require that
457 // takeOwnership() store the user's free function with its allocated
458 // SharedInfo object.) However, handling this specially with a flag seems
459 // like it shouldn't be problematic.
460 if (flags_ & kFlagFreeSharedInfo) {
461 delete ext_.sharedInfo;
465 void IOBuf::reserveSlow(uint32_t minHeadroom, uint32_t minTailroom) {
466 size_t newCapacity = (size_t)length_ + minHeadroom + minTailroom;
467 CHECK_LT(newCapacity, UINT32_MAX);
469 // We'll need to reallocate the buffer.
470 // There are a few options.
471 // - If we have enough total room, move the data around in the buffer
472 // and adjust the data_ pointer.
473 // - If we're using an internal buffer, we'll switch to an external
474 // buffer with enough headroom and tailroom.
475 // - If we have enough headroom (headroom() >= minHeadroom) but not too much
476 // (so we don't waste memory), we can try one of two things, depending on
477 // whether we use jemalloc or not:
478 // - If using jemalloc, we can try to expand in place, avoiding a memcpy()
479 // - If not using jemalloc and we don't have too much to copy,
480 // we'll use realloc() (note that realloc might have to copy
481 // headroom + data + tailroom, see smartRealloc in folly/Malloc.h)
482 // - Otherwise, bite the bullet and reallocate.
483 if (headroom() + tailroom() >= minHeadroom + minTailroom) {
484 uint8_t* newData = writableBuffer() + minHeadroom;
485 memmove(newData, data_, length_);
490 size_t newAllocatedCapacity = goodExtBufferSize(newCapacity);
491 uint8_t* newBuffer = nullptr;
492 uint32_t newHeadroom = 0;
493 uint32_t oldHeadroom = headroom();
495 if ((flags_ & kFlagExt) && length_ != 0 && oldHeadroom >= minHeadroom) {
496 if (usingJEMalloc()) {
497 size_t headSlack = oldHeadroom - minHeadroom;
498 // We assume that tailroom is more useful and more important than
499 // tailroom (not least because realloc / rallocm allow us to grow the
500 // buffer at the tail, but not at the head) So, if we have more headroom
501 // than we need, we consider that "wasted". We arbitrarily define "too
502 // much" headroom to be 25% of the capacity.
503 if (headSlack * 4 <= newCapacity) {
504 size_t allocatedCapacity = capacity() + sizeof(SharedInfo);
506 if (allocatedCapacity >= jemallocMinInPlaceExpandable) {
507 int r = rallocm(&p, &newAllocatedCapacity, newAllocatedCapacity,
509 if (r == ALLOCM_SUCCESS) {
510 newBuffer = static_cast<uint8_t*>(p);
511 newHeadroom = oldHeadroom;
512 } else if (r == ALLOCM_ERR_OOM) {
513 // shouldn't happen as we don't actually allocate new memory
514 // (due to ALLOCM_NO_MOVE)
515 throw std::bad_alloc();
517 // if ALLOCM_ERR_NOT_MOVED, do nothing, fall back to
518 // malloc/memcpy/free
521 } else { // Not using jemalloc
522 size_t copySlack = capacity() - length_;
523 if (copySlack * 2 <= length_) {
524 void* p = realloc(ext_.buf, newAllocatedCapacity);
525 if (UNLIKELY(p == nullptr)) {
526 throw std::bad_alloc();
528 newBuffer = static_cast<uint8_t*>(p);
529 newHeadroom = oldHeadroom;
534 // None of the previous reallocation strategies worked (or we're using
535 // an internal buffer). malloc/copy/free.
536 if (newBuffer == nullptr) {
537 void* p = malloc(newAllocatedCapacity);
538 if (UNLIKELY(p == nullptr)) {
539 throw std::bad_alloc();
541 newBuffer = static_cast<uint8_t*>(p);
542 memcpy(newBuffer + minHeadroom, data_, length_);
543 if (flags_ & kFlagExt) {
546 newHeadroom = minHeadroom;
551 initExtBuffer(newBuffer, newAllocatedCapacity, &info, &cap);
556 ext_.type = kExtAllocated;
557 ext_.buf = newBuffer;
558 ext_.sharedInfo = info;
559 data_ = newBuffer + newHeadroom;
560 // length_ is unchanged
563 void IOBuf::allocExtBuffer(uint32_t minCapacity,
565 SharedInfo** infoReturn,
566 uint32_t* capacityReturn) {
567 size_t mallocSize = goodExtBufferSize(minCapacity);
568 uint8_t* buf = static_cast<uint8_t*>(malloc(mallocSize));
569 if (UNLIKELY(buf == NULL)) {
570 throw std::bad_alloc();
572 initExtBuffer(buf, mallocSize, infoReturn, capacityReturn);
576 size_t IOBuf::goodExtBufferSize(uint32_t minCapacity) {
577 // Determine how much space we should allocate. We'll store the SharedInfo
578 // for the external buffer just after the buffer itself. (We store it just
579 // after the buffer rather than just before so that the code can still just
580 // use free(ext_.buf) to free the buffer.)
581 size_t minSize = static_cast<size_t>(minCapacity) + sizeof(SharedInfo);
582 // Add room for padding so that the SharedInfo will be aligned on an 8-byte
584 minSize = (minSize + 7) & ~7;
586 // Use goodMallocSize() to bump up the capacity to a decent size to request
587 // from malloc, so we can use all of the space that malloc will probably give
589 return goodMallocSize(minSize);
592 void IOBuf::initExtBuffer(uint8_t* buf, size_t mallocSize,
593 SharedInfo** infoReturn,
594 uint32_t* capacityReturn) {
595 // Find the SharedInfo storage at the end of the buffer
596 // and construct the SharedInfo.
597 uint8_t* infoStart = (buf + mallocSize) - sizeof(SharedInfo);
598 SharedInfo* sharedInfo = new(infoStart) SharedInfo;
600 size_t actualCapacity = infoStart - buf;
601 // On the unlikely possibility that the actual capacity is larger than can
602 // fit in a uint32_t after adding room for the refcount and calling
603 // goodMallocSize(), truncate downwards if necessary.
604 if (actualCapacity >= UINT32_MAX) {
605 *capacityReturn = UINT32_MAX;
607 *capacityReturn = actualCapacity;
610 *infoReturn = sharedInfo;
613 fbstring IOBuf::moveToFbString() {
614 // Externally allocated buffers (malloc) are just fine, everything else needs
615 // to be turned into one.
616 if (flags_ != kFlagExt || // not malloc()-ed
617 headroom() != 0 || // malloc()-ed block doesn't start at beginning
618 tailroom() == 0 || // no room for NUL terminator
619 isShared() || // shared
620 isChained()) { // chained
621 // We might as well get rid of all head and tailroom if we're going
622 // to reallocate; we need 1 byte for NUL terminator.
623 coalesceAndReallocate(0, computeChainDataLength(), this, 1);
626 // Ensure NUL terminated
628 fbstring str(reinterpret_cast<char*>(writableData()),
629 length(), capacity(),
630 AcquireMallocatedString());
632 // Reset to internal buffer.
638 IOBuf::Iterator IOBuf::cbegin() const {
639 return Iterator(this, this);
642 IOBuf::Iterator IOBuf::cend() const {
643 return Iterator(nullptr, nullptr);
646 folly::fbvector<struct iovec> IOBuf::getIov() const {
647 folly::fbvector<struct iovec> iov;
648 iov.reserve(countChainElements());
649 IOBuf const* p = this;
651 // some code can get confused by empty iovs, so skip them
652 if (p->length() > 0) {
653 iov.push_back({(void*)p->data(), p->length()});