2 * Copyright 2013-present Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
23 #include <type_traits>
25 #include <folly/Likely.h>
26 #include <folly/Memory.h>
27 #include <folly/Portability.h>
28 #include <folly/Range.h>
29 #include <folly/io/IOBuf.h>
30 #include <folly/io/IOBufQueue.h>
31 #include <folly/lang/Bits.h>
32 #include <folly/portability/BitsFunctexcept.h>
35 * Cursor class for fast iteration over IOBuf chains.
37 * Cursor - Read-only access
39 * RWPrivateCursor - Read-write access, assumes private access to IOBuf chain
40 * RWUnshareCursor - Read-write access, calls unshare on write (COW)
41 * Appender - Write access, assumes private access to IOBuf chain
43 * Note that RW cursors write in the preallocated part of buffers (that is,
44 * between the buffer's data() and tail()), while Appenders append to the end
45 * of the buffer (between the buffer's tail() and bufferEnd()). Appenders
46 * automatically adjust the buffer pointers, so you may only use one
47 * Appender with a buffer chain; for this reason, Appenders assume private
48 * access to the buffer (you need to call unshare() yourself if necessary).
55 template <class Derived, class BufType>
57 // Make all the templated classes friends for copy constructor.
58 template <class D, typename B> friend class CursorBase;
60 explicit CursorBase(BufType* buf) : crtBuf_(buf), buffer_(buf) {
62 crtPos_ = crtBegin_ = crtBuf_->data();
63 crtEnd_ = crtBuf_->tail();
70 * This also allows constructing a CursorBase from other derived types.
71 * For instance, this allows constructing a Cursor from an RWPrivateCursor.
73 template <class OtherDerived, class OtherBuf>
74 explicit CursorBase(const CursorBase<OtherDerived, OtherBuf>& cursor)
75 : crtBuf_(cursor.crtBuf_),
76 crtBegin_(cursor.crtBegin_),
77 crtEnd_(cursor.crtEnd_),
78 crtPos_(cursor.crtPos_),
79 buffer_(cursor.buffer_) {}
82 * Reset cursor to point to a new buffer.
84 void reset(BufType* buf) {
88 crtPos_ = crtBegin_ = crtBuf_->data();
89 crtEnd_ = crtBuf_->tail();
93 const uint8_t* data() const {
98 * Return the remaining space available in the current IOBuf.
100 * May return 0 if the cursor is at the end of an IOBuf. Use peekBytes()
101 * instead if you want to avoid this. peekBytes() will advance to the next
102 * non-empty IOBuf (up to the end of the chain) if the cursor is currently
103 * pointing at the end of a buffer.
105 size_t length() const {
106 return crtEnd_ - crtPos_;
110 * Return the space available until the end of the entire IOBuf chain.
112 size_t totalLength() const {
113 if (crtBuf_ == buffer_) {
114 return crtBuf_->computeChainDataLength() - (crtPos_ - crtBegin_);
116 CursorBase end(buffer_->prev());
117 end.crtPos_ = end.crtEnd_;
122 * Return true if the cursor could advance the specified number of bytes
123 * from its current position.
124 * This is useful for applications that want to do checked reads instead of
125 * catching exceptions and is more efficient than using totalLength as it
126 * walks the minimal set of buffers in the chain to determine the result.
128 bool canAdvance(size_t amount) const {
129 const IOBuf* nextBuf = crtBuf_;
130 size_t available = length();
132 if (available >= amount) {
136 nextBuf = nextBuf->next();
137 available = nextBuf->length();
138 } while (nextBuf != buffer_);
143 * Return true if the cursor is at the end of the entire IOBuf chain.
145 bool isAtEnd() const {
146 // Check for the simple cases first.
147 if (crtPos_ != crtEnd_) {
150 if (crtBuf_ == buffer_->prev()) {
153 // We are at the end of a buffer, but it isn't the last buffer.
154 // We might still be at the end if the remaining buffers in the chain are
156 const IOBuf* buf = crtBuf_->next();;
157 while (buf != buffer_) {
158 if (buf->length() > 0) {
167 * Advances the cursor to the end of the entire IOBuf chain.
169 void advanceToEnd() {
170 crtBegin_ = buffer_->prev()->data();
171 crtPos_ = crtEnd_ = buffer_->prev()->tail();
172 if (crtBuf_ != buffer_->prev()) {
173 crtBuf_ = buffer_->prev();
174 static_cast<Derived*>(this)->advanceDone();
178 Derived& operator+=(size_t offset) {
179 Derived* p = static_cast<Derived*>(this);
183 Derived operator+(size_t offset) const {
184 Derived other(*this);
189 Derived& operator-=(size_t offset) {
190 Derived* p = static_cast<Derived*>(this);
194 Derived operator-(size_t offset) const {
195 Derived other(*this);
196 other.retreat(offset);
201 * Compare cursors for equality/inequality.
203 * Two cursors are equal if they are pointing to the same location in the
206 bool operator==(const Derived& other) const {
207 const IOBuf* crtBuf = crtBuf_;
208 auto crtPos = crtPos_;
209 // We can be pointing to the end of a buffer chunk, find first non-empty.
210 while (crtPos == crtBuf->tail() && crtBuf != buffer_->prev()) {
211 crtBuf = crtBuf->next();
212 crtPos = crtBuf->data();
215 const IOBuf* crtBufOther = other.crtBuf_;
216 auto crtPosOther = other.crtPos_;
217 // We can be pointing to the end of a buffer chunk, find first non-empty.
218 while (crtPosOther == crtBufOther->tail() &&
219 crtBufOther != other.buffer_->prev()) {
220 crtBufOther = crtBufOther->next();
221 crtPosOther = crtBufOther->data();
223 return (crtPos == crtPosOther) && (crtBuf == crtBufOther);
225 bool operator!=(const Derived& other) const {
226 return !operator==(other);
230 typename std::enable_if<std::is_arithmetic<T>::value, bool>::type tryRead(
232 if (LIKELY(crtPos_ + sizeof(T) <= crtEnd_)) {
233 val = loadUnaligned<T>(data());
234 crtPos_ += sizeof(T);
237 return pullAtMostSlow(&val, sizeof(T)) == sizeof(T);
241 bool tryReadBE(T& val) {
242 const bool result = tryRead(val);
243 val = Endian::big(val);
248 bool tryReadLE(T& val) {
249 const bool result = tryRead(val);
250 val = Endian::little(val);
256 if (LIKELY(crtPos_ + sizeof(T) <= crtEnd_)) {
257 T val = loadUnaligned<T>(data());
258 crtPos_ += sizeof(T);
261 return readSlow<T>();
267 return Endian::big(read<T>());
272 return Endian::little(read<T>());
276 * Read a fixed-length string.
278 * The std::string-based APIs should probably be avoided unless you
279 * ultimately want the data to live in an std::string. You're better off
280 * using the pull() APIs to copy into a raw buffer otherwise.
282 std::string readFixedString(size_t len) {
285 if (LIKELY(length() >= len)) {
286 str.append(reinterpret_cast<const char*>(data()), len);
289 readFixedStringSlow(&str, len);
295 * Read a string consisting of bytes until the given terminator character is
296 * seen. Raises an std::length_error if maxLength bytes have been processed
297 * before the terminator is seen.
299 * See comments in readFixedString() about when it's appropriate to use this
302 std::string readTerminatedString(
303 char termChar = '\0',
304 size_t maxLength = std::numeric_limits<size_t>::max());
307 * Read all bytes until the specified predicate returns true.
309 * The predicate will be called on each byte in turn, until it returns false
310 * or until the end of the IOBuf chain is reached.
312 * Returns the result as a string.
314 template <typename Predicate>
315 std::string readWhile(const Predicate& predicate);
318 * Read all bytes until the specified predicate returns true.
320 * This is a more generic version of readWhile() takes an arbitrary Output
321 * object, and calls Output::append() with each chunk of matching data.
323 template <typename Predicate, typename Output>
324 void readWhile(const Predicate& predicate, Output& out);
327 * Skip all bytes until the specified predicate returns true.
329 * The predicate will be called on each byte in turn, until it returns false
330 * or until the end of the IOBuf chain is reached.
332 template <typename Predicate>
333 void skipWhile(const Predicate& predicate);
335 size_t skipAtMost(size_t len) {
336 if (LIKELY(crtPos_ + len < crtEnd_)) {
340 return skipAtMostSlow(len);
343 void skip(size_t len) {
344 if (LIKELY(crtPos_ + len < crtEnd_)) {
352 * Skip bytes in the current IOBuf without advancing to the next one.
353 * Precondition: length() >= len
355 void skipNoAdvance(size_t len) {
356 DCHECK_LE(len, length());
360 size_t retreatAtMost(size_t len) {
361 if (len <= static_cast<size_t>(crtPos_ - crtBegin_)) {
365 return retreatAtMostSlow(len);
368 void retreat(size_t len) {
369 if (len <= static_cast<size_t>(crtPos_ - crtBegin_)) {
376 size_t pullAtMost(void* buf, size_t len) {
377 // Fast path: it all fits in one buffer.
378 if (LIKELY(crtPos_ + len <= crtEnd_)) {
379 memcpy(buf, data(), len);
383 return pullAtMostSlow(buf, len);
386 void pull(void* buf, size_t len) {
387 if (LIKELY(crtPos_ + len <= crtEnd_)) {
388 memcpy(buf, data(), len);
396 * Return the available data in the current buffer.
397 * If you want to gather more data from the chain into a contiguous region
398 * (for hopefully zero-copy access), use gather() before peekBytes().
400 ByteRange peekBytes() {
401 // Ensure that we're pointing to valid data
402 size_t available = length();
403 while (UNLIKELY(available == 0 && tryAdvanceBuffer())) {
404 available = length();
406 return ByteRange{data(), available};
410 * Alternate version of peekBytes() that returns a std::pair
411 * instead of a ByteRange. (This method pre-dates ByteRange.)
413 * This function will eventually be deprecated.
415 std::pair<const uint8_t*, size_t> peek() {
416 auto bytes = peekBytes();
417 return std::make_pair(bytes.data(), bytes.size());
420 void clone(std::unique_ptr<folly::IOBuf>& buf, size_t len) {
421 if (UNLIKELY(cloneAtMost(buf, len) != len)) {
422 std::__throw_out_of_range("underflow");
426 void clone(folly::IOBuf& buf, size_t len) {
427 if (UNLIKELY(cloneAtMost(buf, len) != len)) {
428 std::__throw_out_of_range("underflow");
432 size_t cloneAtMost(folly::IOBuf& buf, size_t len) {
433 // We might be at the end of buffer.
434 advanceBufferIfEmpty();
436 std::unique_ptr<folly::IOBuf> tmp;
438 for (int loopCount = 0; true; ++loopCount) {
439 // Fast path: it all fits in one buffer.
440 size_t available = length();
441 if (LIKELY(available >= len)) {
442 if (loopCount == 0) {
443 crtBuf_->cloneOneInto(buf);
444 buf.trimStart(crtPos_ - crtBegin_);
445 buf.trimEnd(buf.length() - len);
447 tmp = crtBuf_->cloneOne();
448 tmp->trimStart(crtPos_ - crtBegin_);
449 tmp->trimEnd(tmp->length() - len);
450 buf.prependChain(std::move(tmp));
454 advanceBufferIfEmpty();
458 if (loopCount == 0) {
459 crtBuf_->cloneOneInto(buf);
460 buf.trimStart(crtPos_ - crtBegin_);
462 tmp = crtBuf_->cloneOne();
463 tmp->trimStart(crtPos_ - crtBegin_);
464 buf.prependChain(std::move(tmp));
468 if (UNLIKELY(!tryAdvanceBuffer())) {
475 size_t cloneAtMost(std::unique_ptr<folly::IOBuf>& buf, size_t len) {
477 buf = std::make_unique<folly::IOBuf>();
479 return cloneAtMost(*buf, len);
483 * Return the distance between two cursors.
485 size_t operator-(const CursorBase& other) const {
486 BufType *otherBuf = other.crtBuf_;
489 if (otherBuf != crtBuf_) {
490 len += other.crtEnd_ - other.crtPos_;
492 for (otherBuf = otherBuf->next();
493 otherBuf != crtBuf_ && otherBuf != other.buffer_;
494 otherBuf = otherBuf->next()) {
495 len += otherBuf->length();
498 if (otherBuf == other.buffer_) {
499 std::__throw_out_of_range("wrap-around");
502 len += crtPos_ - crtBegin_;
504 if (crtPos_ < other.crtPos_) {
505 std::__throw_out_of_range("underflow");
508 len += crtPos_ - other.crtPos_;
515 * Return the distance from the given IOBuf to the this cursor.
517 size_t operator-(const BufType* buf) const {
520 const BufType* curBuf = buf;
521 while (curBuf != crtBuf_) {
522 len += curBuf->length();
523 curBuf = curBuf->next();
524 if (curBuf == buf || curBuf == buffer_) {
525 std::__throw_out_of_range("wrap-around");
529 len += crtPos_ - crtBegin_;
540 bool tryAdvanceBuffer() {
541 BufType* nextBuf = crtBuf_->next();
542 if (UNLIKELY(nextBuf == buffer_)) {
548 crtPos_ = crtBegin_ = crtBuf_->data();
549 crtEnd_ = crtBuf_->tail();
550 static_cast<Derived*>(this)->advanceDone();
554 bool tryRetreatBuffer() {
555 if (UNLIKELY(crtBuf_ == buffer_)) {
559 crtBuf_ = crtBuf_->prev();
560 crtBegin_ = crtBuf_->data();
561 crtPos_ = crtEnd_ = crtBuf_->tail();
562 static_cast<Derived*>(this)->advanceDone();
566 void advanceBufferIfEmpty() {
567 if (crtPos_ == crtEnd_) {
573 const uint8_t* crtBegin_{nullptr};
574 const uint8_t* crtEnd_{nullptr};
575 const uint8_t* crtPos_{nullptr};
579 FOLLY_NOINLINE T readSlow() {
581 pullSlow(&val, sizeof(T));
585 void readFixedStringSlow(std::string* str, size_t len) {
586 for (size_t available; (available = length()) < len; ) {
587 str->append(reinterpret_cast<const char*>(data()), available);
588 if (UNLIKELY(!tryAdvanceBuffer())) {
589 std::__throw_out_of_range("string underflow");
593 str->append(reinterpret_cast<const char*>(data()), len);
595 advanceBufferIfEmpty();
598 size_t pullAtMostSlow(void* buf, size_t len) {
599 uint8_t* p = reinterpret_cast<uint8_t*>(buf);
601 for (size_t available; (available = length()) < len; ) {
602 memcpy(p, data(), available);
604 if (UNLIKELY(!tryAdvanceBuffer())) {
610 memcpy(p, data(), len);
612 advanceBufferIfEmpty();
616 void pullSlow(void* buf, size_t len) {
617 if (UNLIKELY(pullAtMostSlow(buf, len) != len)) {
618 std::__throw_out_of_range("underflow");
622 size_t skipAtMostSlow(size_t len) {
624 for (size_t available; (available = length()) < len; ) {
625 skipped += available;
626 if (UNLIKELY(!tryAdvanceBuffer())) {
632 advanceBufferIfEmpty();
633 return skipped + len;
636 void skipSlow(size_t len) {
637 if (UNLIKELY(skipAtMostSlow(len) != len)) {
638 std::__throw_out_of_range("underflow");
642 size_t retreatAtMostSlow(size_t len) {
643 size_t retreated = 0;
644 for (size_t available; (available = crtPos_ - crtBegin_) < len;) {
645 retreated += available;
646 if (UNLIKELY(!tryRetreatBuffer())) {
652 return retreated + len;
655 void retreatSlow(size_t len) {
656 if (UNLIKELY(retreatAtMostSlow(len) != len)) {
657 std::__throw_out_of_range("underflow");
667 } // namespace detail
669 class Cursor : public detail::CursorBase<Cursor, const IOBuf> {
671 explicit Cursor(const IOBuf* buf)
672 : detail::CursorBase<Cursor, const IOBuf>(buf) {}
674 template <class OtherDerived, class OtherBuf>
675 explicit Cursor(const detail::CursorBase<OtherDerived, OtherBuf>& cursor)
676 : detail::CursorBase<Cursor, const IOBuf>(cursor) {}
681 template <class Derived>
685 typename std::enable_if<std::is_arithmetic<T>::value>::type
687 const uint8_t* u8 = reinterpret_cast<const uint8_t*>(&value);
688 Derived* d = static_cast<Derived*>(this);
689 d->push(u8, sizeof(T));
693 void writeBE(T value) {
694 Derived* d = static_cast<Derived*>(this);
695 d->write(Endian::big(value));
699 void writeLE(T value) {
700 Derived* d = static_cast<Derived*>(this);
701 d->write(Endian::little(value));
704 void push(const uint8_t* buf, size_t len) {
705 Derived* d = static_cast<Derived*>(this);
706 if (d->pushAtMost(buf, len) != len) {
707 std::__throw_out_of_range("overflow");
711 void push(ByteRange buf) {
712 if (this->pushAtMost(buf) != buf.size()) {
713 std::__throw_out_of_range("overflow");
717 size_t pushAtMost(ByteRange buf) {
718 Derived* d = static_cast<Derived*>(this);
719 return d->pushAtMost(buf.data(), buf.size());
723 * push len bytes of data from input cursor, data could be in an IOBuf chain.
724 * If input cursor contains less than len bytes, or this cursor has less than
725 * len bytes writable space, an out_of_range exception will be thrown.
727 void push(Cursor cursor, size_t len) {
728 if (this->pushAtMost(cursor, len) != len) {
729 std::__throw_out_of_range("overflow");
733 size_t pushAtMost(Cursor cursor, size_t len) {
736 auto currentBuffer = cursor.peekBytes();
737 const uint8_t* crtData = currentBuffer.data();
738 size_t available = currentBuffer.size();
739 if (available == 0) {
740 // end of buffer chain
743 // all data is in current buffer
744 if (available >= len) {
745 this->push(crtData, len);
747 return written + len;
750 // write the whole current IOBuf
751 this->push(crtData, available);
752 cursor.skip(available);
753 written += available;
759 } // namespace detail
761 enum class CursorAccess {
766 template <CursorAccess access>
768 : public detail::CursorBase<RWCursor<access>, IOBuf>,
769 public detail::Writable<RWCursor<access>> {
770 friend class detail::CursorBase<RWCursor<access>, IOBuf>;
772 explicit RWCursor(IOBuf* buf)
773 : detail::CursorBase<RWCursor<access>, IOBuf>(buf),
774 maybeShared_(true) {}
776 template <class OtherDerived, class OtherBuf>
777 explicit RWCursor(const detail::CursorBase<OtherDerived, OtherBuf>& cursor)
778 : detail::CursorBase<RWCursor<access>, IOBuf>(cursor),
779 maybeShared_(true) {}
781 * Gather at least n bytes contiguously into the current buffer,
782 * by coalescing subsequent buffers from the chain as necessary.
784 void gather(size_t n) {
785 // Forbid attempts to gather beyond the end of this IOBuf chain.
786 // Otherwise we could try to coalesce the head of the chain and end up
787 // accidentally freeing it, invalidating the pointer owned by external
790 // If crtBuf_ == head() then IOBuf::gather() will perform all necessary
791 // checking. We only have to perform an explicit check here when calling
792 // gather() on a non-head element.
793 if (this->crtBuf_ != this->head() && this->totalLength() < n) {
794 throw std::overflow_error("cannot gather() past the end of the chain");
796 size_t offset = this->crtPos_ - this->crtBegin_;
797 this->crtBuf_->gather(offset + n);
798 this->crtBegin_ = this->crtBuf_->data();
799 this->crtEnd_ = this->crtBuf_->tail();
800 this->crtPos_ = this->crtBegin_ + offset;
802 void gatherAtMost(size_t n) {
803 size_t size = std::min(n, this->totalLength());
804 size_t offset = this->crtPos_ - this->crtBegin_;
805 this->crtBuf_->gather(offset + size);
806 this->crtBegin_ = this->crtBuf_->data();
807 this->crtEnd_ = this->crtBuf_->tail();
808 this->crtPos_ = this->crtBegin_ + offset;
811 using detail::Writable<RWCursor<access>>::pushAtMost;
812 size_t pushAtMost(const uint8_t* buf, size_t len) {
813 // We have to explicitly check for an input length of 0.
814 // We support buf being nullptr in this case, but we need to avoid calling
815 // memcpy() with a null source pointer, since that is undefined behavior
816 // even if the length is 0.
823 // Fast path: the current buffer is big enough.
824 size_t available = this->length();
825 if (LIKELY(available >= len)) {
826 if (access == CursorAccess::UNSHARE) {
829 memcpy(writableData(), buf, len);
830 this->crtPos_ += len;
834 if (access == CursorAccess::UNSHARE) {
837 memcpy(writableData(), buf, available);
839 if (UNLIKELY(!this->tryAdvanceBuffer())) {
847 void insert(std::unique_ptr<folly::IOBuf> buf) {
848 folly::IOBuf* nextBuf;
849 if (this->crtPos_ == this->crtBegin_) {
851 nextBuf = this->crtBuf_;
852 this->crtBuf_->prependChain(std::move(buf));
854 std::unique_ptr<folly::IOBuf> remaining;
855 if (this->crtPos_ != this->crtEnd_) {
856 // Need to split current IOBuf in two.
857 remaining = this->crtBuf_->cloneOne();
858 remaining->trimStart(this->crtPos_ - this->crtBegin_);
859 nextBuf = remaining.get();
860 buf->prependChain(std::move(remaining));
863 nextBuf = this->crtBuf_->next();
865 this->crtBuf_->trimEnd(this->length());
866 this->crtBuf_->appendChain(std::move(buf));
868 // Jump past the new links
869 this->crtBuf_ = nextBuf;
870 this->crtPos_ = this->crtBegin_ = this->crtBuf_->data();
871 this->crtEnd_ = this->crtBuf_->tail();
875 uint8_t* writableData() {
876 return this->crtBuf_->writableData() + (this->crtPos_ - this->crtBegin_);
880 void maybeUnshare() {
881 if (UNLIKELY(maybeShared_)) {
882 size_t offset = this->crtPos_ - this->crtBegin_;
883 this->crtBuf_->unshareOne();
884 this->crtBegin_ = this->crtBuf_->data();
885 this->crtEnd_ = this->crtBuf_->tail();
886 this->crtPos_ = this->crtBegin_ + offset;
887 maybeShared_ = false;
898 typedef RWCursor<CursorAccess::PRIVATE> RWPrivateCursor;
899 typedef RWCursor<CursorAccess::UNSHARE> RWUnshareCursor;
902 * Append to the end of a buffer chain, growing the chain (by allocating new
903 * buffers) in increments of at least growth bytes every time. Won't grow
904 * (and push() and ensure() will throw) if growth == 0.
906 * TODO(tudorb): add a flavor of Appender that reallocates one IOBuf instead
909 class Appender : public detail::Writable<Appender> {
911 Appender(IOBuf* buf, uint64_t growth)
913 crtBuf_(buf->prev()),
917 uint8_t* writableData() {
918 return crtBuf_->writableTail();
921 size_t length() const {
922 return crtBuf_->tailroom();
926 * Mark n bytes (must be <= length()) as appended, as per the
927 * IOBuf::append() method.
929 void append(size_t n) {
934 * Ensure at least n contiguous bytes available to write.
935 * Postcondition: length() >= n.
937 void ensure(uint64_t n) {
938 if (LIKELY(length() >= n)) {
942 // Waste the rest of the current buffer and allocate a new one.
943 // Don't make it too small, either.
945 std::__throw_out_of_range("can't grow buffer chain");
948 n = std::max(n, growth_);
949 buffer_->prependChain(IOBuf::create(n));
950 crtBuf_ = buffer_->prev();
953 using detail::Writable<Appender>::pushAtMost;
954 size_t pushAtMost(const uint8_t* buf, size_t len) {
955 // We have to explicitly check for an input length of 0.
956 // We support buf being nullptr in this case, but we need to avoid calling
957 // memcpy() with a null source pointer, since that is undefined behavior
958 // even if the length is 0.
965 // Fast path: it all fits in one buffer.
966 size_t available = length();
967 if (LIKELY(available >= len)) {
968 memcpy(writableData(), buf, len);
973 memcpy(writableData(), buf, available);
976 if (UNLIKELY(!tryGrowChain())) {
985 * Append to the end of this buffer, using a printf() style
988 * Note that folly/Format.h provides nicer and more type-safe mechanisms
989 * for formatting strings, which should generally be preferred over
990 * printf-style formatting. Appender objects can be used directly as an
991 * output argument for Formatter objects. For example:
993 * Appender app(&iobuf);
994 * format("{} {}", "hello", "world")(app);
996 * However, printf-style strings are still needed when dealing with existing
997 * third-party code in some cases.
999 * This will always add a nul-terminating character after the end
1000 * of the output. However, the buffer data length will only be updated to
1001 * include the data itself. The nul terminator will be the first byte in the
1004 * This method may throw exceptions on error.
1006 void printf(FOLLY_PRINTF_FORMAT const char* fmt, ...)
1007 FOLLY_PRINTF_FORMAT_ATTR(2, 3);
1009 void vprintf(const char* fmt, va_list ap);
1012 * Calling an Appender object with a StringPiece will append the string
1013 * piece. This allows Appender objects to be used directly with
1016 void operator()(StringPiece sp) {
1017 push(ByteRange(sp));
1021 bool tryGrowChain() {
1022 assert(crtBuf_->next() == buffer_);
1027 buffer_->prependChain(IOBuf::create(growth_));
1028 crtBuf_ = buffer_->prev();
1037 class QueueAppender : public detail::Writable<QueueAppender> {
1040 * Create an Appender that writes to a IOBufQueue. When we allocate
1041 * space in the queue, we grow no more than growth bytes at once
1042 * (unless you call ensure() with a bigger value yourself).
1044 QueueAppender(IOBufQueue* queue, uint64_t growth)
1045 : queueCache_(queue), growth_(growth) {}
1047 void reset(IOBufQueue* queue, uint64_t growth) {
1048 queueCache_.reset(queue);
1052 uint8_t* writableData() {
1053 return queueCache_.writableData();
1057 return queueCache_.length();
1060 void append(size_t n) {
1061 queueCache_.append(n);
1064 // Ensure at least n contiguous; can go above growth_, throws if
1066 void ensure(size_t n) {
1073 typename std::enable_if<std::is_arithmetic<T>::value>::type write(T value) {
1075 if (length() >= sizeof(T)) {
1076 storeUnaligned(queueCache_.writableData(), value);
1077 queueCache_.appendUnsafe(sizeof(T));
1079 writeSlow<T>(value);
1083 using detail::Writable<QueueAppender>::pushAtMost;
1084 size_t pushAtMost(const uint8_t* buf, size_t len) {
1085 // Fill the current buffer
1086 const size_t copyLength = std::min(len, length());
1087 if (copyLength != 0) {
1088 memcpy(writableData(), buf, copyLength);
1089 queueCache_.appendUnsafe(copyLength);
1092 size_t remaining = len - copyLength;
1093 // Allocate more buffers as necessary
1094 while (remaining != 0) {
1095 auto p = queueCache_.queue()->preallocate(
1096 std::min(remaining, growth_), growth_, remaining);
1097 memcpy(p.first, buf, p.second);
1098 queueCache_.queue()->postallocate(p.second);
1100 remaining -= p.second;
1105 void insert(std::unique_ptr<folly::IOBuf> buf) {
1107 queueCache_.queue()->append(std::move(buf), true);
1111 void insert(const folly::IOBuf& buf) {
1112 insert(buf.clone());
1116 folly::IOBufQueue::WritableRangeCache queueCache_{nullptr};
1119 FOLLY_NOINLINE void ensureSlow(size_t n) {
1120 queueCache_.queue()->preallocate(n, growth_);
1121 queueCache_.fillCache();
1125 typename std::enable_if<std::is_arithmetic<T>::value>::type FOLLY_NOINLINE
1126 writeSlow(T value) {
1127 queueCache_.queue()->preallocate(sizeof(T), growth_);
1128 queueCache_.fillCache();
1130 storeUnaligned(queueCache_.writableData(), value);
1131 queueCache_.appendUnsafe(sizeof(T));
1136 } // namespace folly
1138 #include <folly/io/Cursor-inl.h>