X-Git-Url: http://plrg.eecs.uci.edu/git/?p=folly.git;a=blobdiff_plain;f=folly%2Fsmall_vector.h;h=67f18108b0c7b7dc37d08af0f331e57d954c6812;hp=92a5a2fc29d3b1561476273f2377e8e51fcf2370;hb=23e45679ddec0cc620ee6fedbb7891e488669bdd;hpb=e892dc54738ff07c2827775a208767b2018a6b13 diff --git a/folly/small_vector.h b/folly/small_vector.h index 92a5a2fc..67f18108 100644 --- a/folly/small_vector.h +++ b/folly/small_vector.h @@ -1,5 +1,5 @@ /* - * Copyright 2014 Facebook, Inc. + * Copyright 2017 Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,58 +20,45 @@ * * @author Jordan DeLong */ -#ifndef FOLLY_SMALL_VECTOR_H_ -#define FOLLY_SMALL_VECTOR_H_ -#include +#pragma once -#include -#include -#include #include -#include #include +#include +#include +#include +#include +#include +#include -#include -#include -#include +#include +#include #include -#include -#include #include +#include #include +#include #include -#include #include -#include -#include - -#include - -#if defined(__GNUC__) && FOLLY_X64 -# include -# define FB_PACK_ATTR FOLLY_PACK_ATTR -# define FB_PACK_PUSH FOLLY_PACK_PUSH -# define FB_PACK_POP FOLLY_PACK_POP -#else -# define FB_PACK_ATTR -# define FB_PACK_PUSH -# define FB_PACK_POP -#endif +#include +#include +#include -#if FOLLY_HAVE_MALLOC_SIZE - extern "C" std::size_t malloc_size(const void*); -# if !FOLLY_HAVE_MALLOC_USABLE_SIZE -# define malloc_usable_size malloc_size -# endif -# ifndef malloc_usable_size -# define malloc_usable_size malloc_size -# endif -#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include // Ignore shadowing warnings within this file, so includers can use -Wshadow. -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wshadow" +FOLLY_PUSH_WARNING +FOLLY_GCC_DISABLE_WARNING("-Wshadow") namespace folly { @@ -89,30 +76,149 @@ struct NoHeap; ////////////////////////////////////////////////////////////////////// -} // small_vector_policy +} // namespace small_vector_policy ////////////////////////////////////////////////////////////////////// -template +template class small_vector; ////////////////////////////////////////////////////////////////////// namespace detail { +/* + * Move objects in memory to the right into some uninitialized + * memory, where the region overlaps. This doesn't just use + * std::move_backward because move_backward only works if all the + * memory is initialized to type T already. + */ +template +typename std::enable_if::type +moveObjectsRight(T* first, T* lastConstructed, T* realLast) { + if (lastConstructed == realLast) { + return; + } + + T* end = first - 1; // Past the end going backwards. + T* out = realLast - 1; + T* in = lastConstructed - 1; + try { + for (; in != end && out >= lastConstructed; --in, --out) { + new (out) T(std::move(*in)); + } + for (; in != end; --in, --out) { + *out = std::move(*in); + } + for (; out >= lastConstructed; --out) { + new (out) T(); + } + } catch (...) { + // We want to make sure the same stuff is uninitialized memory + // if we exit via an exception (this is to make sure we provide + // the basic exception safety guarantee for insert functions). + if (out < lastConstructed) { + out = lastConstructed - 1; + } + for (auto it = out + 1; it != realLast; ++it) { + it->~T(); + } + throw; + } +} + +// Specialization for trivially copyable types. The call to +// std::move_backward here will just turn into a memmove. (TODO: +// change to std::is_trivially_copyable when that works.) +template +typename std::enable_if::type +moveObjectsRight(T* first, T* lastConstructed, T* realLast) { + std::move_backward(first, lastConstructed, realLast); +} + +/* + * Populate a region of memory using `op' to construct elements. If + * anything throws, undo what we did. + */ +template +void populateMemForward(T* mem, std::size_t n, Function const& op) { + std::size_t idx = 0; + try { + for (size_t i = 0; i < n; ++i) { + op(&mem[idx]); + ++idx; + } + } catch (...) { + for (std::size_t i = 0; i < idx; ++i) { + mem[i].~T(); + } + throw; + } +} + +template +struct IntegralSizePolicyBase { + typedef SizeType InternalSizeType; + + IntegralSizePolicyBase() : size_(0) {} + + protected: + static constexpr std::size_t policyMaxSize() { + return SizeType(~kExternMask); + } + + std::size_t doSize() const { + return size_ & ~kExternMask; + } + + std::size_t isExtern() const { + return kExternMask & size_; + } + + void setExtern(bool b) { + if (b) { + size_ |= kExternMask; + } else { + size_ &= ~kExternMask; + } + } + + void setSize(std::size_t sz) { + assert(sz <= policyMaxSize()); + size_ = (kExternMask & size_) | SizeType(sz); + } + + void swapSizePolicy(IntegralSizePolicyBase& o) { + std::swap(size_, o.size_); + } + + protected: + static bool constexpr kShouldUseHeap = ShouldUseHeap; + + private: + static SizeType constexpr kExternMask = + kShouldUseHeap ? SizeType(1) << (sizeof(SizeType) * 8 - 1) : 0; + + SizeType size_; +}; + +template +struct IntegralSizePolicy; + +template +struct IntegralSizePolicy + : public IntegralSizePolicyBase { + public: /* * Move a range to a range of uninitialized memory. Assumes the * ranges don't overlap. */ - template - typename std::enable_if< - !FOLLY_IS_TRIVIALLY_COPYABLE(T) - >::type + template + typename std::enable_if::type moveToUninitialized(T* first, T* last, T* out) { - auto const count = last - first; std::size_t idx = 0; try { - for (; idx < count; ++first, ++idx) { + for (; first != last; ++first, ++idx) { new (&out[idx]) T(std::move(*first)); } } catch (...) { @@ -129,234 +235,168 @@ namespace detail { } // Specialization for trivially copyable types. - template - typename std::enable_if< - FOLLY_IS_TRIVIALLY_COPYABLE(T) - >::type + template + typename std::enable_if::type moveToUninitialized(T* first, T* last, T* out) { std::memmove(out, first, (last - first) * sizeof *first); } /* - * Move objects in memory to the right into some uninitialized - * memory, where the region overlaps. This doesn't just use - * std::move_backward because move_backward only works if all the - * memory is initialized to type T already. + * Move a range to a range of uninitialized memory. Assumes the + * ranges don't overlap. Inserts an element at out + pos using + * emplaceFunc(). out will contain (end - begin) + 1 elements on success and + * none on failure. If emplaceFunc() throws [begin, end) is unmodified. */ - template - typename std::enable_if< - !FOLLY_IS_TRIVIALLY_COPYABLE(T) - >::type - moveObjectsRight(T* first, T* lastConstructed, T* realLast) { - if (lastConstructed == realLast) { - return; - } - - T* end = first - 1; // Past the end going backwards. - T* out = realLast - 1; - T* in = lastConstructed - 1; + template + void moveToUninitializedEmplace( + T* begin, + T* end, + T* out, + SizeType pos, + EmplaceFunc&& emplaceFunc) { + // Must be called first so that if it throws [begin, end) is unmodified. + // We have to support the strong exception guarantee for emplace_back(). + emplaceFunc(out + pos); + // move old elements to the left of the new one try { - for (; in != end && out >= lastConstructed; --in, --out) { - new (out) T(std::move(*in)); - } - for (; in != end; --in, --out) { - *out = std::move(*in); - } - for (; out >= lastConstructed; --out) { - new (out) T(); - } + this->moveToUninitialized(begin, begin + pos, out); } catch (...) { - // We want to make sure the same stuff is uninitialized memory - // if we exit via an exception (this is to make sure we provide - // the basic exception safety guarantee for insert functions). - if (out < lastConstructed) { - out = lastConstructed - 1; - } - for (auto it = out + 1; it != realLast; ++it) { - it->~T(); - } + out[pos].~T(); throw; } - } - - // Specialization for trivially copyable types. The call to - // std::move_backward here will just turn into a memmove. (TODO: - // change to std::is_trivially_copyable when that works.) - template - typename std::enable_if< - FOLLY_IS_TRIVIALLY_COPYABLE(T) - >::type - moveObjectsRight(T* first, T* lastConstructed, T* realLast) { - std::move_backward(first, lastConstructed, realLast); - } - - /* - * Populate a region of memory using `op' to construct elements. If - * anything throws, undo what we did. - */ - template - void populateMemForward(T* mem, std::size_t n, Function const& op) { - std::size_t idx = 0; + // move old elements to the right of the new one try { - for (size_t i = 0; i < n; ++i) { - op(&mem[idx]); - ++idx; + if (begin + pos < end) { + this->moveToUninitialized(begin + pos, end, out + pos + 1); } } catch (...) { - for (std::size_t i = 0; i < idx; ++i) { - mem[i].~T(); + for (SizeType i = 0; i <= pos; ++i) { + out[i].~T(); } throw; } } +}; - template - struct IntegralSizePolicy { - typedef SizeType InternalSizeType; - - IntegralSizePolicy() : size_(0) {} - - protected: - static constexpr std::size_t policyMaxSize() { - return SizeType(~kExternMask); - } - - std::size_t doSize() const { - return size_ & ~kExternMask; - } - - std::size_t isExtern() const { - return kExternMask & size_; - } - - void setExtern(bool b) { - if (b) { - size_ |= kExternMask; - } else { - size_ &= ~kExternMask; - } - } - - void setSize(std::size_t sz) { - assert(sz <= policyMaxSize()); - size_ = (kExternMask & size_) | SizeType(sz); - } - - void swapSizePolicy(IntegralSizePolicy& o) { - std::swap(size_, o.size_); - } - - protected: - static bool const kShouldUseHeap = ShouldUseHeap; - - private: - static SizeType const kExternMask = - kShouldUseHeap ? SizeType(1) << (sizeof(SizeType) * 8 - 1) - : 0; +template +struct IntegralSizePolicy + : public IntegralSizePolicyBase { + public: + template + void moveToUninitialized(T* /*first*/, T* /*last*/, T* /*out*/) { + assume_unreachable(); + } + template + void moveToUninitializedEmplace( + T* /* begin */, + T* /* end */, + T* /* out */, + SizeType /* pos */, + EmplaceFunc&& /* emplaceFunc */) { + assume_unreachable(); + } +}; - SizeType size_; - }; +/* + * If you're just trying to use this class, ignore everything about + * this next small_vector_base class thing. + * + * The purpose of this junk is to minimize sizeof(small_vector<>) + * and allow specifying the template parameters in whatever order is + * convenient for the user. There's a few extra steps here to try + * to keep the error messages at least semi-reasonable. + * + * Apologies for all the black magic. + */ +namespace mpl = boost::mpl; +template < + class Value, + std::size_t RequestedMaxInline, + class InPolicyA, + class InPolicyB, + class InPolicyC> +struct small_vector_base { + typedef mpl::vector PolicyList; /* - * If you're just trying to use this class, ignore everything about - * this next small_vector_base class thing. - * - * The purpose of this junk is to minimize sizeof(small_vector<>) - * and allow specifying the template parameters in whatever order is - * convenient for the user. There's a few extra steps here to try - * to keep the error messages at least semi-reasonable. - * - * Apologies for all the black magic. + * Determine the size type */ - namespace mpl = boost::mpl; - template - struct small_vector_base { - typedef mpl::vector PolicyList; - - /* - * Determine the size type - */ - typedef typename mpl::filter_view< + typedef typename mpl::filter_view< PolicyList, - boost::is_integral - >::type Integrals; - typedef typename mpl::eval_if< + boost::is_integral>::type Integrals; + typedef typename mpl::eval_if< mpl::empty, mpl::identity, - mpl::front - >::type SizeType; + mpl::front>::type SizeType; - static_assert(std::is_unsigned::value, - "Size type should be an unsigned integral type"); - static_assert(mpl::size::value == 0 || - mpl::size::value == 1, - "Multiple size types specified in small_vector<>"); + static_assert( + std::is_unsigned::value, + "Size type should be an unsigned integral type"); + static_assert( + mpl::size::value == 0 || mpl::size::value == 1, + "Multiple size types specified in small_vector<>"); - /* - * Determine whether we should allow spilling to the heap or not. - */ - typedef typename mpl::count< - PolicyList,small_vector_policy::NoHeap - >::type HasNoHeap; - - static_assert(HasNoHeap::value == 0 || HasNoHeap::value == 1, - "Multiple copies of small_vector_policy::NoHeap " - "supplied; this is probably a mistake"); + /* + * Determine whether we should allow spilling to the heap or not. + */ + typedef typename mpl::count::type + HasNoHeap; - /* - * Make the real policy base classes. - */ - typedef IntegralSizePolicy - ActualSizePolicy; + static_assert( + HasNoHeap::value == 0 || HasNoHeap::value == 1, + "Multiple copies of small_vector_policy::NoHeap " + "supplied; this is probably a mistake"); - /* - * Now inherit from them all. This is done in such a convoluted - * way to make sure we get the empty base optimizaton on all these - * types to keep sizeof(small_vector<>) minimal. - */ - typedef boost::totally_ordered1< - small_vector, - ActualSizePolicy - > type; - }; + /* + * Make the real policy base classes. + */ + typedef IntegralSizePolicy ActualSizePolicy; - template - T* pointerFlagSet(T* p) { - return reinterpret_cast(reinterpret_cast(p) | 1); - } - template - bool pointerFlagGet(T* p) { - return reinterpret_cast(p) & 1; - } - template - T* pointerFlagClear(T* p) { - return reinterpret_cast( - reinterpret_cast(p) & ~uintptr_t(1)); - } - inline void* shiftPointer(void* p, size_t sizeBytes) { - return static_cast(p) + sizeBytes; - } + /* + * Now inherit from them all. This is done in such a convoluted + * way to make sure we get the empty base optimizaton on all these + * types to keep sizeof(small_vector<>) minimal. + */ + typedef boost::totally_ordered1< + small_vector, + ActualSizePolicy> + type; +}; + +template +T* pointerFlagSet(T* p) { + return reinterpret_cast(reinterpret_cast(p) | 1); } +template +bool pointerFlagGet(T* p) { + return reinterpret_cast(p) & 1; +} +template +T* pointerFlagClear(T* p) { + return reinterpret_cast(reinterpret_cast(p) & ~uintptr_t(1)); +} +inline void* shiftPointer(void* p, size_t sizeBytes) { + return static_cast(p) + sizeBytes; +} +} // namespace detail ////////////////////////////////////////////////////////////////////// -FB_PACK_PUSH -template -class small_vector - : public detail::small_vector_base< - Value,RequestedMaxInline,PolicyA,PolicyB,PolicyC - >::type -{ - typedef typename detail::small_vector_base< - Value,RequestedMaxInline,PolicyA,PolicyB,PolicyC - >::type BaseType; +FOLLY_PACK_PUSH +template < + class Value, + std::size_t RequestedMaxInline = 1, + class PolicyA = void, + class PolicyB = void, + class PolicyC = void> +class small_vector : public detail::small_vector_base< + Value, + RequestedMaxInline, + PolicyA, + PolicyB, + PolicyC>::type { + typedef typename detail:: + small_vector_base:: + type BaseType; typedef typename BaseType::InternalSizeType InternalSizeType; /* @@ -364,26 +404,26 @@ class small_vector * the user asks for less inlined elements than we can fit unioned * into our value_type*, we will inline more than they asked.) */ - enum { - MaxInline = boost::mpl::max< - boost::mpl::int_, - boost::mpl::int_ - >::type::value - }; - -public: - typedef std::size_t size_type; - typedef Value value_type; - typedef value_type& reference; - typedef value_type const& const_reference; - typedef value_type* iterator; - typedef value_type const* const_iterator; - typedef std::ptrdiff_t difference_type; - - typedef std::reverse_iterator reverse_iterator; + static constexpr std::size_t MaxInline{ + constexpr_max(sizeof(Value*) / sizeof(Value), RequestedMaxInline)}; + + public: + typedef std::size_t size_type; + typedef Value value_type; + typedef value_type& reference; + typedef value_type const& const_reference; + typedef value_type* iterator; + typedef value_type* pointer; + typedef value_type const* const_iterator; + typedef std::ptrdiff_t difference_type; + + typedef std::reverse_iterator reverse_iterator; typedef std::reverse_iterator const_reverse_iterator; - explicit small_vector() {} + small_vector() = default; + // Allocator is unused here. It is taken in for compatibility with std::vector + // interface, but it will be ignored. + small_vector(const std::allocator&) {} small_vector(small_vector const& o) { auto n = o.size(); @@ -391,19 +431,23 @@ public: try { std::uninitialized_copy(o.begin(), o.end(), begin()); } catch (...) { - if (this->isExtern()) u.freeHeap(); + if (this->isExtern()) { + u.freeHeap(); + } throw; } this->setSize(n); } - small_vector(small_vector&& o) { + small_vector(small_vector&& o) noexcept( + std::is_nothrow_move_constructible::value) { if (o.isExtern()) { swap(o); } else { - std::uninitialized_copy(std::make_move_iterator(o.begin()), - std::make_move_iterator(o.end()), - begin()); + std::uninitialized_copy( + std::make_move_iterator(o.begin()), + std::make_move_iterator(o.end()), + begin()); this->setSize(o.size()); } } @@ -412,12 +456,16 @@ public: constructImpl(il.begin(), il.end(), std::false_type()); } - explicit small_vector(size_type n, value_type const& t = value_type()) { - doConstruct(n, t); + explicit small_vector(size_type n) { + doConstruct(n, [&](void* p) { new (p) value_type(); }); } - template - explicit small_vector(Arg arg1, Arg arg2) { + small_vector(size_type n, value_type const& t) { + doConstruct(n, [&](void* p) { new (p) value_type(t); }); + } + + template + explicit small_vector(Arg arg1, Arg arg2) { // Forward using std::is_arithmetic to get to the proper // implementation; this disambiguates between the iterators and // (size_t, value_type) meaning for this constructor. @@ -441,7 +489,9 @@ public: small_vector& operator=(small_vector&& o) { // TODO: optimization: // if both are internal, use move assignment where possible - if (this == &o) return *this; + if (this == &o) { + return *this; + } clear(); swap(o); return *this; @@ -456,22 +506,42 @@ public: } static constexpr size_type max_size() { - return !BaseType::kShouldUseHeap ? MaxInline + return !BaseType::kShouldUseHeap ? static_cast(MaxInline) : BaseType::policyMaxSize(); } - size_type size() const { return this->doSize(); } - bool empty() const { return !size(); } + size_type size() const { + return this->doSize(); + } + bool empty() const { + return !size(); + } - iterator begin() { return data(); } - iterator end() { return data() + size(); } - const_iterator begin() const { return data(); } - const_iterator end() const { return data() + size(); } - const_iterator cbegin() const { return begin(); } - const_iterator cend() const { return end(); } + iterator begin() { + return data(); + } + iterator end() { + return data() + size(); + } + const_iterator begin() const { + return data(); + } + const_iterator end() const { + return data() + size(); + } + const_iterator cbegin() const { + return begin(); + } + const_iterator cend() const { + return end(); + } - reverse_iterator rbegin() { return reverse_iterator(end()); } - reverse_iterator rend() { return reverse_iterator(begin()); } + reverse_iterator rbegin() { + return reverse_iterator(end()); + } + reverse_iterator rend() { + return reverse_iterator(begin()); + } const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); @@ -481,8 +551,12 @@ public: return const_reverse_iterator(begin()); } - const_reverse_iterator crbegin() const { return rbegin(); } - const_reverse_iterator crend() const { return rend(); } + const_reverse_iterator crbegin() const { + return rbegin(); + } + const_reverse_iterator crend() const { + return rend(); + } /* * Usually one of the simplest functions in a Container-like class @@ -501,7 +575,9 @@ public: auto thisCapacity = this->capacity(); auto oCapacity = o.capacity(); - std::swap(unpackHack(&u.pdata_.heap_), unpackHack(&o.u.pdata_.heap_)); + auto* tmp = u.pdata_.heap_; + u.pdata_.heap_ = o.u.pdata_.heap_; + o.u.pdata_.heap_ = tmp; this->setCapacity(oCapacity); o.setCapacity(thisCapacity); @@ -543,7 +619,7 @@ public: auto& oldIntern = o.isExtern() ? *this : o; auto oldExternCapacity = oldExtern.capacity(); - auto oldExternHeap = oldExtern.u.pdata_.heap_; + auto oldExternHeap = oldExtern.u.pdata_.heap_; auto buff = oldExtern.u.buffer(); size_type i = 0; @@ -575,9 +651,8 @@ public: return; } makeSize(sz); - detail::populateMemForward(begin() + size(), sz - size(), - [&] (void* p) { new (p) value_type(); } - ); + detail::populateMemForward( + begin() + size(), sz - size(), [&](void* p) { new (p) value_type(); }); this->setSize(sz); } @@ -587,9 +662,8 @@ public: return; } makeSize(sz); - detail::populateMemForward(begin() + size(), sz - size(), - [&] (void* p) { new (p) value_type(v); } - ); + detail::populateMemForward( + begin() + size(), sz - size(), [&](void* p) { new (p) value_type(v); }); this->setSize(sz); } @@ -601,7 +675,7 @@ public: return this->isExtern() ? u.heap() : u.buffer(); } - template + template iterator emplace(const_iterator p, Args&&... args) { if (p == cend()) { emplace_back(std::forward(args)...); @@ -634,7 +708,7 @@ public: size_type capacity() const { if (this->isExtern()) { if (u.hasCapacity()) { - return *u.getCapacity(); + return u.getCapacity(); } return malloc_usable_size(u.pdata_.heap_) / sizeof(value_type); } @@ -650,25 +724,28 @@ public: tmp.swap(*this); } - template + template void emplace_back(Args&&... args) { - // call helper function for static dispatch of special cases - emplaceBack(std::forward(args)...); - } - - void push_back(value_type&& t) { if (capacity() == size()) { - makeSize(std::max(size_type(2), 3 * size() / 2), &t, size()); + // Any of args may be references into the vector. + // When we are reallocating, we have to be careful to construct the new + // element before modifying the data in the old buffer. + makeSize( + size() + 1, + [&](void* p) { new (p) value_type(std::forward(args)...); }, + size()); } else { - new (end()) value_type(std::move(t)); + new (end()) value_type(std::forward(args)...); } this->setSize(size() + 1); } + void push_back(value_type&& t) { + return emplace_back(std::move(t)); + } + void push_back(value_type const& t) { - // Make a copy and forward to the rvalue value_type&& overload - // above. - push_back(value_type(t)); + emplace_back(t); } void pop_back() { @@ -686,18 +763,18 @@ public: auto offset = p - begin(); if (capacity() == size()) { - makeSize(size() + 1, &t, offset); + makeSize( + size() + 1, + [&t](void* ptr) { new (ptr) value_type(std::move(t)); }, + offset); this->setSize(this->size() + 1); } else { - makeSize(size() + 1); - detail::moveObjectsRight(data() + offset, - data() + size(), - data() + size() + 1); + detail::moveObjectsRight( + data() + offset, data() + size(), data() + size() + 1); this->setSize(size() + 1); data()[offset] = std::move(t); } return begin() + offset; - } iterator insert(const_iterator p, value_type const& t) { @@ -709,15 +786,14 @@ public: iterator insert(const_iterator pos, size_type n, value_type const& val) { auto offset = pos - begin(); makeSize(size() + n); - detail::moveObjectsRight(data() + offset, - data() + size(), - data() + size() + n); + detail::moveObjectsRight( + data() + offset, data() + size(), data() + size() + n); this->setSize(size() + n); std::generate_n(begin() + offset, n, [&] { return val; }); return begin() + offset; } - template + template iterator insert(const_iterator p, Arg arg1, Arg arg2) { // Forward using std::is_arithmetic to get to the proper // implementation; this disambiguates between the iterators and @@ -737,7 +813,9 @@ public: } iterator erase(const_iterator q1, const_iterator q2) { - if (q1 == q2) return unconst(q1); + if (q1 == q2) { + return unconst(q1); + } std::move(unconst(q2), end(), unconst(q1)); for (auto it = (end() - std::distance(q1, q2)); it != end(); ++it) { it->~value_type(); @@ -750,7 +828,7 @@ public: erase(begin(), end()); } - template + template void assign(Arg first, Arg last) { clear(); insert(end(), first, last); @@ -765,10 +843,22 @@ public: insert(end(), n, t); } - reference front() { assert(!empty()); return *begin(); } - reference back() { assert(!empty()); return *(end() - 1); } - const_reference front() const { assert(!empty()); return *begin(); } - const_reference back() const { assert(!empty()); return *(end() - 1); } + reference front() { + assert(!empty()); + return *begin(); + } + reference back() { + assert(!empty()); + return *(end() - 1); + } + const_reference front() const { + assert(!empty()); + return *begin(); + } + const_reference back() const { + assert(!empty()); + return *(end() - 1); + } reference operator[](size_type i) { assert(i < size()); @@ -782,57 +872,29 @@ public: reference at(size_type i) { if (i >= size()) { - throw std::out_of_range("index out of range"); + std::__throw_out_of_range("index out of range"); } return (*this)[i]; } const_reference at(size_type i) const { if (i >= size()) { - throw std::out_of_range("index out of range"); + std::__throw_out_of_range("index out of range"); } return (*this)[i]; } -private: - - /* - * This is doing the same like emplace_back, but we need this helper - * to catch the special case - see the next overload function.. - */ - template - void emplaceBack(Args&&... args) { - makeSize(size() + 1); - new (end()) value_type(std::forward(args)...); - this->setSize(size() + 1); - } - - /* - * Special case of emplaceBack for rvalue - */ - void emplaceBack(value_type&& t) { - push_back(std::move(t)); - } - + private: static iterator unconst(const_iterator it) { return const_cast(it); } - /* - * g++ doesn't allow you to bind a non-const reference to a member - * of a packed structure, presumably because it would make it too - * easy to accidentally make an unaligned memory access? - */ - template static T& unpackHack(T* p) { - return *p; - } - // The std::false_type argument is part of disambiguating the // iterator insert functions from integral types (see insert().) - template + template iterator insertImpl(iterator pos, It first, It last, std::false_type) { typedef typename std::iterator_traits::iterator_category categ; - if (std::is_same::value) { + if (std::is_same::value) { auto offset = pos - begin(); while (first != last) { pos = insert(pos, *first++); @@ -844,16 +906,15 @@ private: auto distance = std::distance(first, last); auto offset = pos - begin(); makeSize(size() + distance); - detail::moveObjectsRight(data() + offset, - data() + size(), - data() + size() + distance); + detail::moveObjectsRight( + data() + offset, data() + size(), data() + size() + distance); this->setSize(size() + distance); std::copy_n(first, distance, begin() + offset); return begin() + offset; } - iterator insertImpl(iterator pos, size_type n, const value_type& val, - std::true_type) { + iterator + insertImpl(iterator pos, size_type n, const value_type& val, std::true_type) { // The true_type means this should call the size_t,value_type // overload. (See insert().) return insert(pos, n, val); @@ -862,14 +923,14 @@ private: // The std::false_type argument came from std::is_arithmetic as part // of disambiguating an overload (see the comment in the // constructor). - template + template void constructImpl(It first, It last, std::false_type) { typedef typename std::iterator_traits::iterator_category categ; - if (std::is_same::value) { + if (std::is_same::value) { // With iterators that only allow a single pass, we can't really // do anything sane here. while (first != last) { - push_back(*first++); + emplace_back(*first++); } return; } @@ -877,55 +938,95 @@ private: auto distance = std::distance(first, last); makeSize(distance); this->setSize(distance); - - detail::populateMemForward(data(), distance, - [&] (void* p) { new (p) value_type(*first++); } - ); + try { + detail::populateMemForward( + data(), distance, [&](void* p) { new (p) value_type(*first++); }); + } catch (...) { + if (this->isExtern()) { + u.freeHeap(); + } + throw; + } } - void doConstruct(size_type n, value_type const& val) { + template + void doConstruct(size_type n, InitFunc&& func) { makeSize(n); this->setSize(n); - detail::populateMemForward(data(), n, - [&] (void* p) { new (p) value_type(val); } - ); + try { + detail::populateMemForward(data(), n, std::forward(func)); + } catch (...) { + if (this->isExtern()) { + u.freeHeap(); + } + throw; + } } // The true_type means we should forward to the size_t,value_type // overload. void constructImpl(size_type n, value_type const& val, std::true_type) { - doConstruct(n, val); + doConstruct(n, [&](void* p) { new (p) value_type(val); }); + } + + /* + * Compute the size after growth. + */ + size_type computeNewSize() const { + return std::min((3 * capacity()) / 2 + 1, max_size()); + } + + void makeSize(size_type newSize) { + makeSizeInternal(newSize, false, [](void*) { assume_unreachable(); }, 0); } - void makeSize(size_type size, value_type* v = nullptr) { - makeSize(size, v, size - 1); + template + void makeSize(size_type newSize, EmplaceFunc&& emplaceFunc, size_type pos) { + assert(size() == capacity()); + makeSizeInternal( + newSize, true, std::forward(emplaceFunc), pos); } /* - * Ensure we have a large enough memory region to be size `size'. + * Ensure we have a large enough memory region to be size `newSize'. * Will move/copy elements if we are spilling to heap_ or needed to * allocate a new region, but if resized in place doesn't initialize * anything in the new region. In any case doesn't change size(). * Supports insertion of new element during reallocation by given * pointer to new element and position of new element. - * NOTE: If reallocation is not needed, and new element should be - * inserted in the middle of vector (not at the end), do the move - * objects and insertion outside the function, otherwise exception is thrown. + * NOTE: If reallocation is not needed, insert must be false, + * because we only know how to emplace elements into new memory. */ - void makeSize(size_type size, value_type* v, size_type pos) { - if (size > this->max_size()) { + template + void makeSizeInternal( + size_type newSize, + bool insert, + EmplaceFunc&& emplaceFunc, + size_type pos) { + if (newSize > max_size()) { throw std::length_error("max_size exceeded in small_vector"); } - if (size <= this->capacity()) { + if (newSize <= capacity()) { + assert(!insert); return; } - auto needBytes = size * sizeof(value_type); + assert(this->kShouldUseHeap); + // This branch isn't needed for correctness, but allows the optimizer to + // skip generating code for the rest of this function in NoHeap + // small_vectors. + if (!this->kShouldUseHeap) { + return; + } + + newSize = std::max(newSize, computeNewSize()); + + auto needBytes = newSize * sizeof(value_type); // If the capacity isn't explicitly stored inline, but the heap // allocation is grown to over some threshold, we should store // a capacity at the front of the heap allocation. bool heapifyCapacity = - !kHasInlineCapacity && needBytes > kHeapifyCapacityThreshold; + !kHasInlineCapacity && needBytes > kHeapifyCapacityThreshold; if (heapifyCapacity) { needBytes += kHeapifyCapacitySize; } @@ -936,48 +1037,21 @@ private: assert(!detail::pointerFlagGet(newh)); value_type* newp = static_cast( - heapifyCapacity ? - detail::shiftPointer(newh, kHeapifyCapacitySize) : - newh); - - if (v != nullptr) { - // move new element - try { - new (&newp[pos]) value_type(std::move(*v)); - } catch (...) { - free(newh); - throw; - } - - // move old elements to the left of the new one - try { - detail::moveToUninitialized(begin(), begin() + pos, newp); - } catch (...) { - newp[pos].~value_type(); - free(newh); - throw; - } + heapifyCapacity ? detail::shiftPointer(newh, kHeapifyCapacitySize) + : newh); - // move old elements to the right of the new one - try { - if (pos < size-1) { - detail::moveToUninitialized(begin() + pos, end(), newp + pos + 1); - } - } catch (...) { - for (size_type i = 0; i <= pos; ++i) { - newp[i].~value_type(); - } - free(newh); - throw; - } - } else { - // move without inserting new element - try { - detail::moveToUninitialized(begin(), end(), newp); - } catch (...) { - free(newh); - throw; + try { + if (insert) { + // move and insert the new element + this->moveToUninitializedEmplace( + begin(), end(), newp, pos, std::forward(emplaceFunc)); + } else { + // move without inserting new element + this->moveToUninitialized(begin(), end(), newp); } + } catch (...) { + free(newh); + throw; } for (auto& val : *this) { val.~value_type(); @@ -1005,62 +1079,70 @@ private: assert(this->isExtern()); if (u.hasCapacity()) { assert(newCapacity < std::numeric_limits::max()); - *u.getCapacity() = InternalSizeType(newCapacity); + u.setCapacity(newCapacity); } } -private: + private: struct HeapPtrWithCapacity { void* heap_; InternalSizeType capacity_; - InternalSizeType* getCapacity() { - return &capacity_; + InternalSizeType getCapacity() const { + return capacity_; } - } FB_PACK_ATTR; + void setCapacity(InternalSizeType c) { + capacity_ = c; + } + } FOLLY_PACK_ATTR; struct HeapPtr { // Lower order bit of heap_ is used as flag to indicate whether capacity is // stored at the front of the heap allocation. void* heap_; - InternalSizeType* getCapacity() { + InternalSizeType getCapacity() const { assert(detail::pointerFlagGet(heap_)); - return static_cast( - detail::pointerFlagClear(heap_)); + return *static_cast(detail::pointerFlagClear(heap_)); + } + void setCapacity(InternalSizeType c) { + *static_cast(detail::pointerFlagClear(heap_)) = c; } - } FB_PACK_ATTR; + } FOLLY_PACK_ATTR; -#if FOLLY_X64 - typedef unsigned char InlineStorageType[sizeof(value_type) * MaxInline]; +#if (FOLLY_X64 || FOLLY_PPC64) + typedef unsigned char InlineStorageDataType[sizeof(value_type) * MaxInline]; #else typedef typename std::aligned_storage< - sizeof(value_type) * MaxInline, - alignof(value_type) - >::type InlineStorageType; + sizeof(value_type) * MaxInline, + alignof(value_type)>::type InlineStorageDataType; #endif - static bool const kHasInlineCapacity = - sizeof(HeapPtrWithCapacity) < sizeof(InlineStorageType); + typedef typename std::conditional< + sizeof(value_type) * MaxInline != 0, + InlineStorageDataType, + void*>::type InlineStorageType; + + static bool constexpr kHasInlineCapacity = + sizeof(HeapPtrWithCapacity) < sizeof(InlineStorageType); // This value should we multiple of word size. - static size_t const kHeapifyCapacitySize = sizeof( - typename std::aligned_storage< - sizeof(InternalSizeType), - alignof(value_type) - >::type); + static size_t constexpr kHeapifyCapacitySize = sizeof( + typename std:: + aligned_storage::type); + // Threshold to control capacity heapifying. - static size_t const kHeapifyCapacityThreshold = - 100 * kHeapifyCapacitySize; + static size_t constexpr kHeapifyCapacityThreshold = + 100 * kHeapifyCapacitySize; - typedef typename std::conditional< - kHasInlineCapacity, - HeapPtrWithCapacity, - HeapPtr - >::type PointerType; + typedef typename std:: + conditional::type + PointerType; union Data { - explicit Data() { pdata_.heap_ = 0; } + explicit Data() { + pdata_.heap_ = nullptr; + } PointerType pdata_; InlineStorageType storage_; @@ -1075,10 +1157,10 @@ private: value_type* heap() noexcept { if (kHasInlineCapacity || !detail::pointerFlagGet(pdata_.heap_)) { return static_cast(pdata_.heap_); + } else { + return static_cast(detail::shiftPointer( + detail::pointerFlagClear(pdata_.heap_), kHeapifyCapacitySize)); } - return static_cast( - detail::shiftPointer( - detail::pointerFlagClear(pdata_.heap_), kHeapifyCapacitySize)); } value_type const* heap() const noexcept { return const_cast(this)->heap(); @@ -1087,41 +1169,43 @@ private: bool hasCapacity() const { return kHasInlineCapacity || detail::pointerFlagGet(pdata_.heap_); } - InternalSizeType* getCapacity() { + InternalSizeType getCapacity() const { return pdata_.getCapacity(); } - InternalSizeType* getCapacity() const { - return const_cast(this)->getCapacity(); + void setCapacity(InternalSizeType c) { + pdata_.setCapacity(c); } void freeHeap() { auto vp = detail::pointerFlagClear(pdata_.heap_); free(vp); } - } FB_PACK_ATTR u; -} FB_PACK_ATTR; -FB_PACK_POP + } FOLLY_PACK_ATTR u; +} FOLLY_PACK_ATTR; +FOLLY_PACK_POP ////////////////////////////////////////////////////////////////////// // Basic guarantee only, or provides the nothrow guarantee iff T has a // nothrow move or copy constructor. -template -void swap(small_vector& a, - small_vector& b) { +template +void swap( + small_vector& a, + small_vector& b) { a.swap(b); } ////////////////////////////////////////////////////////////////////// -} +namespace detail { -#pragma GCC diagnostic pop +// Format support. +template +struct IndexableTraits> + : public IndexableTraitsSeq> {}; -#ifdef FB_PACK_ATTR -# undef FB_PACK_ATTR -# undef FB_PACK_PUSH -# undef FB_PACK_POP -#endif +} // namespace detail -#endif +} // namespace folly + +FOLLY_POP_WARNING