/*
- * Copyright 2014 Facebook, Inc.
+ * Copyright 2017 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
* @author Jordan DeLong <delong.j@fb.com>
*/
-#ifndef FOLLY_SMALL_VECTOR_H_
-#define FOLLY_SMALL_VECTOR_H_
-#include "Portability.h"
+#pragma once
#include <stdexcept>
#include <cstdlib>
#include <boost/mpl/empty.hpp>
#include <boost/mpl/size.hpp>
#include <boost/mpl/count.hpp>
-#include <boost/mpl/max.hpp>
-#include "folly/Malloc.h"
-
-#if defined(__GNUC__) && defined(__x86_64__)
-# include "folly/SmallLocks.h"
-# define FB_PACKED __attribute__((packed))
-#else
-# define FB_PACKED
-#endif
-
-#if FOLLY_HAVE_MALLOC_SIZE
- extern "C" std::size_t malloc_size(const void*);
-# if !FOLLY_HAVE_MALLOC_USABLE_SIZE
-# define malloc_usable_size malloc_size
-# endif
-# ifndef malloc_usable_size
-# define malloc_usable_size malloc_size
-# endif
-#endif
+#include <folly/Assume.h>
+#include <folly/FormatTraits.h>
+#include <folly/Malloc.h>
+#include <folly/Portability.h>
+#include <folly/SmallLocks.h>
+#include <folly/Traits.h>
+#include <folly/portability/BitsFunctexcept.h>
+#include <folly/portability/Constexpr.h>
+#include <folly/portability/Malloc.h>
+#include <folly/portability/TypeTraits.h>
// Ignore shadowing warnings within this file, so includers can use -Wshadow.
#pragma GCC diagnostic push
*/
struct NoHeap;
-/*
- * Passing this policy will cause small_vector to provide lock() and
- * unlock() functions using a 1-bit spin lock in the size value.
- *
- * Note that this is intended for a fairly specialized (although
- * strangely common at facebook) use case, where you have billions of
- * vectors in memory where none of them are "hot" and most of them are
- * small. This allows you to get fine-grained locks without spending
- * a lot of memory on mutexes (the alternative of a large hashtable of
- * locks leads to extra cache misses in the lookup path).
- *
- * __x86_64__ only.
- */
-struct OneBitMutex;
-
//////////////////////////////////////////////////////////////////////
} // small_vector_policy
!FOLLY_IS_TRIVIALLY_COPYABLE(T)
>::type
moveToUninitialized(T* first, T* last, T* out) {
- auto const count = last - first;
std::size_t idx = 0;
try {
- for (; idx < count; ++first, ++idx) {
+ for (; first != last; ++first, ++idx) {
new (&out[idx]) T(std::move(*first));
}
} catch (...) {
std::memmove(out, first, (last - first) * sizeof *first);
}
+ /*
+ * Move a range to a range of uninitialized memory. Assumes the
+ * ranges don't overlap. Inserts an element at out + pos using emplaceFunc().
+ * out will contain (end - begin) + 1 elements on success and none on failure.
+ * If emplaceFunc() throws [begin, end) is unmodified.
+ */
+ template <class T, class Size, class EmplaceFunc>
+ void moveToUninitializedEmplace(
+ T* begin,
+ T* end,
+ T* out,
+ Size pos,
+ EmplaceFunc&& emplaceFunc) {
+ // Must be called first so that if it throws [begin, end) is unmodified.
+ // We have to support the strong exception guarantee for emplace_back().
+ emplaceFunc(out + pos);
+ // move old elements to the left of the new one
+ try {
+ detail::moveToUninitialized(begin, begin + pos, out);
+ } catch (...) {
+ out[pos].~T();
+ throw;
+ }
+ // move old elements to the right of the new one
+ try {
+ if (begin + pos < end) {
+ detail::moveToUninitialized(begin + pos, end, out + pos + 1);
+ }
+ } catch (...) {
+ for (Size i = 0; i <= pos; ++i) {
+ out[i].~T();
+ }
+ throw;
+ }
+ }
+
/*
* Move objects in memory to the right into some uninitialized
* memory, where the region overlaps. This doesn't just use
SizeType size_;
};
-#ifdef __x86_64__
- template<class SizeType, bool ShouldUseHeap>
- struct OneBitMutexImpl {
- typedef SizeType InternalSizeType;
-
- OneBitMutexImpl() { psl_.init(); }
-
- void lock() const { psl_.lock(); }
- void unlock() const { psl_.unlock(); }
- bool try_lock() const { return psl_.try_lock(); }
-
- protected:
- static bool const kShouldUseHeap = ShouldUseHeap;
-
- static constexpr std::size_t policyMaxSize() {
- return SizeType(~(SizeType(1) << kLockBit | kExternMask));
- }
-
- std::size_t doSize() const {
- return psl_.getData() & ~kExternMask;
- }
-
- std::size_t isExtern() const {
- return psl_.getData() & kExternMask;
- }
-
- void setExtern(bool b) {
- if (b) {
- setSize(SizeType(doSize()) | kExternMask);
- } else {
- setSize(SizeType(doSize()) & ~kExternMask);
- }
- }
-
- void setSize(std::size_t sz) {
- assert(sz < (std::size_t(1) << kLockBit));
- psl_.setData((kExternMask & psl_.getData()) | SizeType(sz));
- }
-
- void swapSizePolicy(OneBitMutexImpl& o) {
- std::swap(psl_, o.psl_);
- }
-
- private:
- static SizeType const kLockBit = sizeof(SizeType) * 8 - 1;
- static SizeType const kExternMask =
- kShouldUseHeap ? SizeType(1) << (sizeof(SizeType) * 8 - 2)
- : 0;
-
- PicoSpinLock<SizeType,kLockBit> psl_;
- };
-#else
- template<class SizeType, bool ShouldUseHeap>
- struct OneBitMutexImpl {
- static_assert(std::is_same<SizeType,void>::value,
- "OneBitMutex only works on x86-64");
- };
-#endif
-
/*
* If you're just trying to use this class, ignore everything about
* this next small_vector_base class thing.
mpl::size<Integrals>::value == 1,
"Multiple size types specified in small_vector<>");
- /*
- * Figure out if we're supposed to supply a one-bit mutex. :)
- */
- typedef typename mpl::count<
- PolicyList,small_vector_policy::OneBitMutex
- >::type HasMutex;
-
- static_assert(HasMutex::value == 0 || HasMutex::value == 1,
- "Multiple copies of small_vector_policy::OneBitMutex "
- "supplied; this is probably a mistake");
-
/*
* Determine whether we should allow spilling to the heap or not.
*/
/*
* Make the real policy base classes.
*/
- typedef typename mpl::if_<
- HasMutex,
- OneBitMutexImpl<SizeType,!HasNoHeap::value>,
- IntegralSizePolicy<SizeType,!HasNoHeap::value>
- >::type ActualSizePolicy;
+ typedef IntegralSizePolicy<SizeType,!HasNoHeap::value>
+ ActualSizePolicy;
/*
* Now inherit from them all. This is done in such a convoluted
}
//////////////////////////////////////////////////////////////////////
-
+FOLLY_PACK_PUSH
template<class Value,
std::size_t RequestedMaxInline = 1,
class PolicyA = void,
* the user asks for less inlined elements than we can fit unioned
* into our value_type*, we will inline more than they asked.)
*/
- enum {
- MaxInline = boost::mpl::max<
- boost::mpl::int_<sizeof(Value*) / sizeof(Value)>,
- boost::mpl::int_<RequestedMaxInline>
- >::type::value
- };
+ static constexpr std::size_t MaxInline{
+ constexpr_max(sizeof(Value*) / sizeof(Value), RequestedMaxInline)};
-public:
- typedef std::size_t size_type;
+ public:
+ typedef std::size_t size_type;
typedef Value value_type;
typedef value_type& reference;
typedef value_type const& const_reference;
typedef std::reverse_iterator<iterator> reverse_iterator;
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
- explicit small_vector() {}
+ explicit small_vector() = default;
small_vector(small_vector const& o) {
- assign(o.begin(), o.end());
+ auto n = o.size();
+ makeSize(n);
+ try {
+ std::uninitialized_copy(o.begin(), o.end(), begin());
+ } catch (...) {
+ if (this->isExtern()) {
+ u.freeHeap();
+ }
+ throw;
+ }
+ this->setSize(n);
}
- small_vector(small_vector&& o) {
- *this = std::move(o);
+ small_vector(small_vector&& o)
+ noexcept(std::is_nothrow_move_constructible<Value>::value) {
+ if (o.isExtern()) {
+ swap(o);
+ } else {
+ std::uninitialized_copy(std::make_move_iterator(o.begin()),
+ std::make_move_iterator(o.end()),
+ begin());
+ this->setSize(o.size());
+ }
}
small_vector(std::initializer_list<value_type> il) {
constructImpl(il.begin(), il.end(), std::false_type());
}
- explicit small_vector(size_type n, value_type const& t = value_type()) {
- doConstruct(n, t);
+ explicit small_vector(size_type n) {
+ doConstruct(n, [&](void* p) { new (p) value_type(); });
+ }
+
+ small_vector(size_type n, value_type const& t) {
+ doConstruct(n, [&](void* p) { new (p) value_type(t); });
}
template<class Arg>
}
small_vector& operator=(small_vector&& o) {
+ // TODO: optimization:
+ // if both are internal, use move assignment where possible
+ if (this == &o) return *this;
clear();
- if (!o.isExtern()) {
- makeSize(o.size());
- for (std::size_t i = 0; i < o.size(); ++i) {
- new (data() + i) value_type(std::move(o[i]));
- }
- this->setSize(o.size());
- } else {
- swap(o);
- }
+ swap(o);
return *this;
}
}
static constexpr size_type max_size() {
- return !BaseType::kShouldUseHeap ? MaxInline
+ return !BaseType::kShouldUseHeap ? static_cast<size_type>(MaxInline)
: BaseType::policyMaxSize();
}
auto thisCapacity = this->capacity();
auto oCapacity = o.capacity();
- std::swap(unpackHack(&u.pdata_.heap_), unpackHack(&o.u.pdata_.heap_));
+ auto* tmp = u.pdata_.heap_;
+ u.pdata_.heap_ = o.u.pdata_.heap_;
+ o.u.pdata_.heap_ = tmp;
this->setCapacity(oCapacity);
o.setCapacity(thisCapacity);
}
size_type i = oldSmall.size();
+ const size_type ci = i;
try {
for (; i < oldLarge.size(); ++i) {
- new (&oldSmall[i]) value_type(std::move(oldLarge[i]));
+ auto addr = oldSmall.begin() + i;
+ new (addr) value_type(std::move(oldLarge[i]));
oldLarge[i].~value_type();
}
} catch (...) {
+ oldSmall.setSize(i);
for (; i < oldLarge.size(); ++i) {
oldLarge[i].~value_type();
}
- oldLarge.setSize(oldSmall.size());
+ oldLarge.setSize(ci);
throw;
}
- this->swapSizePolicy(o);
+ oldSmall.setSize(i);
+ oldLarge.setSize(ci);
return;
}
size_type capacity() const {
if (this->isExtern()) {
if (u.hasCapacity()) {
- return *u.getCapacity();
+ return u.getCapacity();
}
return malloc_usable_size(u.pdata_.heap_) / sizeof(value_type);
}
tmp.swap(*this);
}
- template<class ...Args>
+ template <class... Args>
void emplace_back(Args&&... args) {
- // call helper function for static dispatch of special cases
- emplaceBack(std::forward<Args>(args)...);
- }
-
- void push_back(value_type&& t) {
if (capacity() == size()) {
- makeSize(std::max(size_type(2), 3 * size() / 2), &t, size());
+ // Any of args may be references into the vector.
+ // When we are reallocating, we have to be careful to construct the new
+ // element before modifying the data in the old buffer.
+ makeSize(
+ size() + 1,
+ [&](void* p) { new (p) value_type(std::forward<Args>(args)...); },
+ size());
} else {
- new (end()) value_type(std::move(t));
+ new (end()) value_type(std::forward<Args>(args)...);
}
this->setSize(size() + 1);
}
+ void push_back(value_type&& t) {
+ return emplace_back(std::move(t));
+ }
+
void push_back(value_type const& t) {
- // Make a copy and forward to the rvalue value_type&& overload
- // above.
- push_back(value_type(t));
+ emplace_back(t);
}
void pop_back() {
auto offset = p - begin();
if (capacity() == size()) {
- makeSize(size() + 1, &t, offset);
+ makeSize(
+ size() + 1,
+ [&t](void* ptr) { new (ptr) value_type(std::move(t)); },
+ offset);
this->setSize(this->size() + 1);
} else {
- makeSize(size() + 1);
detail::moveObjectsRight(data() + offset,
data() + size(),
data() + size() + 1);
}
iterator erase(const_iterator q1, const_iterator q2) {
+ if (q1 == q2) return unconst(q1);
std::move(unconst(q2), end(), unconst(q1));
- for (auto it = q1; it != end(); ++it) {
+ for (auto it = (end() - std::distance(q1, q2)); it != end(); ++it) {
it->~value_type();
}
this->setSize(size() - (q2 - q1));
reference at(size_type i) {
if (i >= size()) {
- throw std::out_of_range("index out of range");
+ std::__throw_out_of_range("index out of range");
}
return (*this)[i];
}
const_reference at(size_type i) const {
if (i >= size()) {
- throw std::out_of_range("index out of range");
+ std::__throw_out_of_range("index out of range");
}
return (*this)[i];
}
private:
- /*
- * This is doing the same like emplace_back, but we need this helper
- * to catch the special case - see the next overload function..
- */
- template<class ...Args>
- void emplaceBack(Args&&... args) {
- makeSize(size() + 1);
- new (end()) value_type(std::forward<Args>(args)...);
- this->setSize(size() + 1);
- }
-
- /*
- * Special case of emplaceBack for rvalue
- */
- void emplaceBack(value_type&& t) {
- push_back(std::move(t));
- }
-
static iterator unconst(const_iterator it) {
return const_cast<iterator>(it);
}
- /*
- * g++ doesn't allow you to bind a non-const reference to a member
- * of a packed structure, presumably because it would make it too
- * easy to accidentally make an unaligned memory access?
- */
- template<class T> static T& unpackHack(T* p) {
- return *p;
- }
-
// The std::false_type argument is part of disambiguating the
// iterator insert functions from integral types (see insert().)
template<class It>
// With iterators that only allow a single pass, we can't really
// do anything sane here.
while (first != last) {
- push_back(*first++);
+ emplace_back(*first++);
}
return;
}
auto distance = std::distance(first, last);
makeSize(distance);
this->setSize(distance);
-
- detail::populateMemForward(data(), distance,
- [&] (void* p) { new (p) value_type(*first++); }
- );
+ try {
+ detail::populateMemForward(data(), distance,
+ [&] (void* p) { new (p) value_type(*first++); }
+ );
+ } catch (...) {
+ if (this->isExtern()) {
+ u.freeHeap();
+ }
+ throw;
+ }
}
- void doConstruct(size_type n, value_type const& val) {
+ template <typename InitFunc>
+ void doConstruct(size_type n, InitFunc&& func) {
makeSize(n);
this->setSize(n);
- detail::populateMemForward(data(), n,
- [&] (void* p) { new (p) value_type(val); }
- );
+ try {
+ detail::populateMemForward(data(), n, std::forward<InitFunc>(func));
+ } catch (...) {
+ if (this->isExtern()) {
+ u.freeHeap();
+ }
+ throw;
+ }
}
// The true_type means we should forward to the size_t,value_type
// overload.
void constructImpl(size_type n, value_type const& val, std::true_type) {
- doConstruct(n, val);
+ doConstruct(n, [&](void* p) { new (p) value_type(val); });
}
- void makeSize(size_type size, value_type* v = nullptr) {
- makeSize(size, v, size - 1);
+ /*
+ * Compute the size after growth.
+ */
+ size_type computeNewSize() const {
+ return std::min((3 * capacity()) / 2 + 1, max_size());
+ }
+
+ void makeSize(size_type newSize) {
+ makeSizeInternal(newSize, false, [](void*) { assume_unreachable(); }, 0);
+ }
+
+ template <typename EmplaceFunc>
+ void makeSize(size_type newSize, EmplaceFunc&& emplaceFunc, size_type pos) {
+ assert(size() == capacity());
+ makeSizeInternal(
+ newSize, true, std::forward<EmplaceFunc>(emplaceFunc), pos);
}
/*
- * Ensure we have a large enough memory region to be size `size'.
+ * Ensure we have a large enough memory region to be size `newSize'.
* Will move/copy elements if we are spilling to heap_ or needed to
* allocate a new region, but if resized in place doesn't initialize
* anything in the new region. In any case doesn't change size().
* Supports insertion of new element during reallocation by given
* pointer to new element and position of new element.
- * NOTE: If reallocation is not needed, and new element should be
- * inserted in the middle of vector (not at the end), do the move
- * objects and insertion outside the function, otherwise exception is thrown.
+ * NOTE: If reallocation is not needed, insert must be false,
+ * because we only know how to emplace elements into new memory.
*/
- void makeSize(size_type size, value_type* v, size_type pos) {
- if (size > this->max_size()) {
+ template <typename EmplaceFunc>
+ void makeSizeInternal(
+ size_type newSize,
+ bool insert,
+ EmplaceFunc&& emplaceFunc,
+ size_type pos) {
+ if (newSize > max_size()) {
throw std::length_error("max_size exceeded in small_vector");
}
- if (size <= this->capacity()) {
+ if (newSize <= capacity()) {
+ assert(!insert);
return;
}
+ newSize = std::max(newSize, computeNewSize());
- auto needBytes = size * sizeof(value_type);
+ auto needBytes = newSize * sizeof(value_type);
// If the capacity isn't explicitly stored inline, but the heap
// allocation is grown to over some threshold, we should store
// a capacity at the front of the heap allocation.
detail::shiftPointer(newh, kHeapifyCapacitySize) :
newh);
- if (v != nullptr) {
- // move new element
- try {
- new (&newp[pos]) value_type(std::move(*v));
- } catch (...) {
- free(newh);
- throw;
- }
-
- // move old elements to the left of the new one
- try {
- detail::moveToUninitialized(begin(), begin() + pos, newp);
- } catch (...) {
- newp[pos].~value_type();
- free(newh);
- throw;
- }
-
- // move old elements to the right of the new one
- try {
- if (pos < size-1) {
- detail::moveToUninitialized(begin() + pos, end(), newp + pos + 1);
- }
- } catch (...) {
- for (size_type i = 0; i <= pos; ++i) {
- newp[i].~value_type();
- }
- free(newh);
- throw;
- }
- } else {
- // move without inserting new element
- try {
+ try {
+ if (insert) {
+ // move and insert the new element
+ detail::moveToUninitializedEmplace(
+ begin(), end(), newp, pos, std::forward<EmplaceFunc>(emplaceFunc));
+ } else {
+ // move without inserting new element
detail::moveToUninitialized(begin(), end(), newp);
- } catch (...) {
- free(newh);
- throw;
}
+ } catch (...) {
+ free(newh);
+ throw;
}
for (auto& val : *this) {
val.~value_type();
assert(this->isExtern());
if (u.hasCapacity()) {
assert(newCapacity < std::numeric_limits<InternalSizeType>::max());
- *u.getCapacity() = InternalSizeType(newCapacity);
+ u.setCapacity(newCapacity);
}
}
void* heap_;
InternalSizeType capacity_;
- InternalSizeType* getCapacity() {
- return &capacity_;
+ InternalSizeType getCapacity() const {
+ return capacity_;
+ }
+ void setCapacity(InternalSizeType c) {
+ capacity_ = c;
}
- } FB_PACKED;
+ } FOLLY_PACK_ATTR;
struct HeapPtr {
// Lower order bit of heap_ is used as flag to indicate whether capacity is
// stored at the front of the heap allocation.
void* heap_;
- InternalSizeType* getCapacity() {
+ InternalSizeType getCapacity() const {
assert(detail::pointerFlagGet(heap_));
- return static_cast<InternalSizeType*>(
- detail::pointerFlagClear(heap_));
+ return *static_cast<InternalSizeType*>(detail::pointerFlagClear(heap_));
}
- } FB_PACKED;
+ void setCapacity(InternalSizeType c) {
+ *static_cast<InternalSizeType*>(detail::pointerFlagClear(heap_)) = c;
+ }
+ } FOLLY_PACK_ATTR;
-#if defined(__x86_64_)
- typedef unsigned char InlineStorageType[sizeof(value_type) * MaxInline];
+#if (FOLLY_X64 || FOLLY_PPC64)
+ typedef unsigned char InlineStorageDataType[sizeof(value_type) * MaxInline];
#else
typedef typename std::aligned_storage<
sizeof(value_type) * MaxInline,
alignof(value_type)
- >::type InlineStorageType;
+ >::type InlineStorageDataType;
#endif
+ typedef typename std::conditional<
+ sizeof(value_type) * MaxInline != 0,
+ InlineStorageDataType,
+ void*
+ >::type InlineStorageType;
+
static bool const kHasInlineCapacity =
sizeof(HeapPtrWithCapacity) < sizeof(InlineStorageType);
bool hasCapacity() const {
return kHasInlineCapacity || detail::pointerFlagGet(pdata_.heap_);
}
- InternalSizeType* getCapacity() {
+ InternalSizeType getCapacity() const {
return pdata_.getCapacity();
}
- InternalSizeType* getCapacity() const {
- return const_cast<Data*>(this)->getCapacity();
+ void setCapacity(InternalSizeType c) {
+ pdata_.setCapacity(c);
}
void freeHeap() {
auto vp = detail::pointerFlagClear(pdata_.heap_);
free(vp);
}
- } FB_PACKED u;
-} FB_PACKED;
+ } FOLLY_PACK_ATTR u;
+} FOLLY_PACK_ATTR;
+FOLLY_PACK_POP
//////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
-}
+namespace detail {
-#pragma GCC diagnostic pop
+// Format support.
+template <class T, size_t M, class A, class B, class C>
+struct IndexableTraits<small_vector<T, M, A, B, C>>
+ : public IndexableTraitsSeq<small_vector<T, M, A, B, C>> {
+};
-#ifdef FB_PACKED
-# undef FB_PACKED
-#endif
+} // namespace detail
-#endif
+} // namespace folly
+
+#pragma GCC diagnostic pop