/*
- * Copyright 2012 Facebook, Inc.
+ * Copyright 2017 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*/
/*
- * For high-level documentation and usage examples see folly/doc/small_vector.md
+ * For high-level documentation and usage examples see
+ * folly/docs/small_vector.md
*
* @author Jordan DeLong <delong.j@fb.com>
*/
-#ifndef FOLLY_SMALL_VECTOR_H_
-#define FOLLY_SMALL_VECTOR_H_
-#include "Portability.h"
+#pragma once
#include <stdexcept>
#include <cstdlib>
#include <boost/mpl/empty.hpp>
#include <boost/mpl/size.hpp>
#include <boost/mpl/count.hpp>
-#include <boost/mpl/max.hpp>
-#include "folly/Malloc.h"
+#include <folly/FormatTraits.h>
+#include <folly/Malloc.h>
+#include <folly/Portability.h>
+#include <folly/SmallLocks.h>
+#include <folly/Traits.h>
+#include <folly/portability/BitsFunctexcept.h>
+#include <folly/portability/Constexpr.h>
+#include <folly/portability/Malloc.h>
+#include <folly/portability/TypeTraits.h>
-#if defined(__GNUC__) && defined(__x86_64__)
-# include "folly/SmallLocks.h"
-# define FB_PACKED __attribute__((packed))
-#else
-# define FB_PACKED
-#endif
-
-#ifdef FOLLY_HAVE_MALLOC_SIZE
- extern "C" std::size_t malloc_size(const void*);
-# ifndef FOLLY_HAVE_MALLOC_USABLE_SIZE
-# define malloc_usable_size malloc_size
-# endif
-# ifndef malloc_usable_size
-# define malloc_usable_size malloc_size
-# endif
-#endif
+// Ignore shadowing warnings within this file, so includers can use -Wshadow.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
namespace folly {
*/
struct NoHeap;
-/*
- * Passing this policy will cause small_vector to provide lock() and
- * unlock() functions using a 1-bit spin lock in the size value.
- *
- * Note that this is intended for a fairly specialized (although
- * strangely common at facebook) use case, where you have billions of
- * vectors in memory where none of them are "hot" and most of them are
- * small. This allows you to get fine-grained locks without spending
- * a lot of memory on mutexes (the alternative of a large hashtable of
- * locks leads to extra cache misses in the lookup path).
- *
- * __x86_64__ only.
- */
-struct OneBitMutex;
-
//////////////////////////////////////////////////////////////////////
} // small_vector_policy
//////////////////////////////////////////////////////////////////////
template<class T, std::size_t M, class A, class B, class C>
-struct small_vector;
+class small_vector;
//////////////////////////////////////////////////////////////////////
*/
template<class T>
typename std::enable_if<
- !boost::has_trivial_copy<T>::value
+ !FOLLY_IS_TRIVIALLY_COPYABLE(T)
>::type
moveToUninitialized(T* first, T* last, T* out) {
- auto const count = last - first;
std::size_t idx = 0;
try {
- for (; idx < count; ++first, ++idx) {
+ for (; first != last; ++first, ++idx) {
new (&out[idx]) T(std::move(*first));
}
} catch (...) {
}
}
- // Specialization for trivially copyable types. (TODO: change to
- // std::is_trivially_copyable when that works.)
+ // Specialization for trivially copyable types.
template<class T>
typename std::enable_if<
- boost::has_trivial_copy<T>::value
+ FOLLY_IS_TRIVIALLY_COPYABLE(T)
>::type
moveToUninitialized(T* first, T* last, T* out) {
std::memmove(out, first, (last - first) * sizeof *first);
*/
template<class T>
typename std::enable_if<
- !boost::has_trivial_copy<T>::value
+ !FOLLY_IS_TRIVIALLY_COPYABLE(T)
>::type
moveObjectsRight(T* first, T* lastConstructed, T* realLast) {
if (lastConstructed == realLast) {
// change to std::is_trivially_copyable when that works.)
template<class T>
typename std::enable_if<
- boost::has_trivial_copy<T>::value
+ FOLLY_IS_TRIVIALLY_COPYABLE(T)
>::type
moveObjectsRight(T* first, T* lastConstructed, T* realLast) {
std::move_backward(first, lastConstructed, realLast);
void populateMemForward(T* mem, std::size_t n, Function const& op) {
std::size_t idx = 0;
try {
- for (int i = 0; i < n; ++i) {
+ for (size_t i = 0; i < n; ++i) {
op(&mem[idx]);
++idx;
}
IntegralSizePolicy() : size_(0) {}
protected:
- std::size_t policyMaxSize() const {
+ static constexpr std::size_t policyMaxSize() {
return SizeType(~kExternMask);
}
SizeType size_;
};
-#ifdef __x86_64__
- template<class SizeType, bool ShouldUseHeap>
- struct OneBitMutexImpl {
- typedef SizeType InternalSizeType;
-
- OneBitMutexImpl() { psl_.init(); }
-
- void lock() const { psl_.lock(); }
- void unlock() const { psl_.unlock(); }
- bool try_lock() const { return psl_.try_lock(); }
-
- protected:
- static bool const kShouldUseHeap = ShouldUseHeap;
-
- std::size_t policyMaxSize() const {
- return SizeType(~(SizeType(1) << kLockBit | kExternMask));
- }
-
- std::size_t doSize() const {
- return psl_.getData() & ~kExternMask;
- }
-
- std::size_t isExtern() const {
- return psl_.getData() & kExternMask;
- }
-
- void setExtern(bool b) {
- if (b) {
- setSize(SizeType(doSize()) | kExternMask);
- } else {
- setSize(SizeType(doSize()) & ~kExternMask);
- }
- }
-
- void setSize(std::size_t sz) {
- assert(sz < (std::size_t(1) << kLockBit));
- psl_.setData((kExternMask & psl_.getData()) | SizeType(sz));
- }
-
- void swapSizePolicy(OneBitMutexImpl& o) {
- std::swap(psl_, o.psl_);
- }
-
- private:
- static SizeType const kLockBit = sizeof(SizeType) * 8 - 1;
- static SizeType const kExternMask =
- kShouldUseHeap ? SizeType(1) << (sizeof(SizeType) * 8 - 2)
- : 0;
-
- PicoSpinLock<SizeType,kLockBit> psl_;
- };
-#else
- template<class SizeType, bool ShouldUseHeap>
- struct OneBitMutexImpl {
- static_assert(std::is_same<SizeType,void>::value,
- "OneBitMutex only works on x86-64");
- };
-#endif
-
/*
* If you're just trying to use this class, ignore everything about
* this next small_vector_base class thing.
mpl::size<Integrals>::value == 1,
"Multiple size types specified in small_vector<>");
- /*
- * Figure out if we're supposed to supply a one-bit mutex. :)
- */
- typedef typename mpl::count<
- PolicyList,small_vector_policy::OneBitMutex
- >::type HasMutex;
-
- static_assert(HasMutex::value == 0 || HasMutex::value == 1,
- "Multiple copies of small_vector_policy::OneBitMutex "
- "supplied; this is probably a mistake");
-
/*
* Determine whether we should allow spilling to the heap or not.
*/
/*
* Make the real policy base classes.
*/
- typedef typename mpl::if_<
- HasMutex,
- OneBitMutexImpl<SizeType,!HasNoHeap::value>,
- IntegralSizePolicy<SizeType,!HasNoHeap::value>
- >::type ActualSizePolicy;
+ typedef IntegralSizePolicy<SizeType,!HasNoHeap::value>
+ ActualSizePolicy;
/*
* Now inherit from them all. This is done in such a convoluted
}
template <class T>
T* pointerFlagClear(T* p) {
- return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(p) & ~1);
+ return reinterpret_cast<T*>(
+ reinterpret_cast<uintptr_t>(p) & ~uintptr_t(1));
}
inline void* shiftPointer(void* p, size_t sizeBytes) {
return static_cast<char*>(p) + sizeBytes;
}
//////////////////////////////////////////////////////////////////////
-
+FOLLY_PACK_PUSH
template<class Value,
std::size_t RequestedMaxInline = 1,
class PolicyA = void,
* the user asks for less inlined elements than we can fit unioned
* into our value_type*, we will inline more than they asked.)
*/
- enum {
- MaxInline = boost::mpl::max<
- boost::mpl::int_<sizeof(Value*) / sizeof(Value)>,
- boost::mpl::int_<RequestedMaxInline>
- >::type::value
- };
+ static constexpr std::size_t MaxInline{
+ constexpr_max(sizeof(Value*) / sizeof(Value), RequestedMaxInline)};
-public:
- typedef std::size_t size_type;
+ public:
+ typedef std::size_t size_type;
typedef Value value_type;
typedef value_type& reference;
typedef value_type const& const_reference;
typedef std::reverse_iterator<iterator> reverse_iterator;
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
- explicit small_vector() {}
+ explicit small_vector() = default;
small_vector(small_vector const& o) {
- assign(o.begin(), o.end());
+ auto n = o.size();
+ makeSize(n);
+ try {
+ std::uninitialized_copy(o.begin(), o.end(), begin());
+ } catch (...) {
+ if (this->isExtern()) {
+ u.freeHeap();
+ }
+ throw;
+ }
+ this->setSize(n);
}
- small_vector(small_vector&& o) {
- *this = std::move(o);
+ small_vector(small_vector&& o)
+ noexcept(std::is_nothrow_move_constructible<Value>::value) {
+ if (o.isExtern()) {
+ swap(o);
+ } else {
+ std::uninitialized_copy(std::make_move_iterator(o.begin()),
+ std::make_move_iterator(o.end()),
+ begin());
+ this->setSize(o.size());
+ }
}
small_vector(std::initializer_list<value_type> il) {
constructImpl(il.begin(), il.end(), std::false_type());
}
- explicit small_vector(size_type n, value_type const& t = value_type()) {
- doConstruct(n, t);
+ explicit small_vector(size_type n) {
+ doConstruct(n, [&](void* p) { new (p) value_type(); });
+ }
+
+ small_vector(size_type n, value_type const& t) {
+ doConstruct(n, [&](void* p) { new (p) value_type(t); });
}
template<class Arg>
}
small_vector& operator=(small_vector&& o) {
+ // TODO: optimization:
+ // if both are internal, use move assignment where possible
+ if (this == &o) return *this;
clear();
- if (!o.isExtern()) {
- makeSize(o.size());
- for (std::size_t i = 0; i < o.size(); ++i) {
- new (data() + i) value_type(std::move(o[i]));
- }
- this->setSize(o.size());
- } else {
- swap(o);
- }
+ swap(o);
return *this;
}
return std::lexicographical_compare(begin(), end(), o.begin(), o.end());
}
- size_type max_size() const {
- return !BaseType::kShouldUseHeap ? MaxInline
- : this->policyMaxSize();
+ static constexpr size_type max_size() {
+ return !BaseType::kShouldUseHeap ? static_cast<size_type>(MaxInline)
+ : BaseType::policyMaxSize();
}
size_type size() const { return this->doSize(); }
}
size_type i = oldSmall.size();
+ const size_type ci = i;
try {
for (; i < oldLarge.size(); ++i) {
- new (&oldSmall[i]) value_type(std::move(oldLarge[i]));
+ auto addr = oldSmall.begin() + i;
+ new (addr) value_type(std::move(oldLarge[i]));
oldLarge[i].~value_type();
}
} catch (...) {
+ oldSmall.setSize(i);
for (; i < oldLarge.size(); ++i) {
oldLarge[i].~value_type();
}
- oldLarge.setSize(oldSmall.size());
+ oldLarge.setSize(ci);
throw;
}
- this->swapSizePolicy(o);
+ oldSmall.setSize(i);
+ oldLarge.setSize(ci);
return;
}
emplaceBack(std::forward<Args>(args)...);
}
+ void emplace_back(const value_type& t) {
+ push_back(t);
+ }
+ void emplace_back(value_type& t) {
+ push_back(t);
+ }
+
+ void emplace_back(value_type&& t) {
+ push_back(std::move(t));
+ }
+
void push_back(value_type&& t) {
if (capacity() == size()) {
makeSize(std::max(size_type(2), 3 * size() / 2), &t, size());
}
void push_back(value_type const& t) {
- // Make a copy and forward to the rvalue value_type&& overload
- // above.
- push_back(value_type(t));
+ // TODO: we'd like to make use of makeSize (it can be optimized better,
+ // because it manipulates the internals)
+ // unfortunately the current implementation only supports moving from
+ // a supplied rvalue, and doing an extra move just to reuse it is a perf
+ // net loss
+ if (size() == capacity()) {// && isInside(&t)) {
+ value_type tmp(t);
+ emplaceBack(std::move(tmp));
+ } else {
+ emplaceBack(t);
+ }
}
void pop_back() {
}
iterator erase(const_iterator q1, const_iterator q2) {
+ if (q1 == q2) return unconst(q1);
std::move(unconst(q2), end(), unconst(q1));
- for (auto it = q1; it != end(); ++it) {
+ for (auto it = (end() - std::distance(q1, q2)); it != end(); ++it) {
it->~value_type();
}
this->setSize(size() - (q2 - q1));
reference at(size_type i) {
if (i >= size()) {
- throw std::out_of_range();
+ std::__throw_out_of_range("index out of range");
}
return (*this)[i];
}
const_reference at(size_type i) const {
if (i >= size()) {
- throw std::out_of_range();
+ std::__throw_out_of_range("index out of range");
}
return (*this)[i];
}
this->setSize(size() + 1);
}
- /*
- * Special case of emplaceBack for rvalue
- */
- void emplaceBack(value_type&& t) {
- push_back(std::move(t));
- }
-
static iterator unconst(const_iterator it) {
return const_cast<iterator>(it);
}
// With iterators that only allow a single pass, we can't really
// do anything sane here.
while (first != last) {
- push_back(*first++);
+ emplace_back(*first++);
}
return;
}
auto distance = std::distance(first, last);
makeSize(distance);
this->setSize(distance);
-
- detail::populateMemForward(data(), distance,
- [&] (void* p) { new (p) value_type(*first++); }
- );
+ try {
+ detail::populateMemForward(data(), distance,
+ [&] (void* p) { new (p) value_type(*first++); }
+ );
+ } catch (...) {
+ if (this->isExtern()) {
+ u.freeHeap();
+ }
+ throw;
+ }
}
- void doConstruct(size_type n, value_type const& val) {
+ template <typename InitFunc>
+ void doConstruct(size_type n, InitFunc&& func) {
makeSize(n);
this->setSize(n);
- detail::populateMemForward(data(), n,
- [&] (void* p) { new (p) value_type(val); }
- );
+ try {
+ detail::populateMemForward(data(), n, std::forward<InitFunc>(func));
+ } catch (...) {
+ if (this->isExtern()) {
+ u.freeHeap();
+ }
+ throw;
+ }
}
// The true_type means we should forward to the size_t,value_type
// overload.
void constructImpl(size_type n, value_type const& val, std::true_type) {
- doConstruct(n, val);
+ doConstruct(n, [&](void* p) { new (p) value_type(val); });
}
- void makeSize(size_type size, value_type* v = NULL) {
+ void makeSize(size_type size, value_type* v = nullptr) {
makeSize(size, v, size - 1);
}
detail::shiftPointer(newh, kHeapifyCapacitySize) :
newh);
- if (v != NULL) {
+ if (v != nullptr) {
// move new element
try {
new (&newp[pos]) value_type(std::move(*v));
} catch (...) {
- std::free(newh);
+ free(newh);
throw;
}
detail::moveToUninitialized(begin(), begin() + pos, newp);
} catch (...) {
newp[pos].~value_type();
- std::free(newh);
+ free(newh);
throw;
}
for (size_type i = 0; i <= pos; ++i) {
newp[i].~value_type();
}
- std::free(newh);
+ free(newh);
throw;
}
} else {
try {
detail::moveToUninitialized(begin(), end(), newp);
} catch (...) {
- std::free(newh);
+ free(newh);
throw;
}
}
InternalSizeType* getCapacity() {
return &capacity_;
}
- } FB_PACKED;
+ } FOLLY_PACK_ATTR;
struct HeapPtr {
// Lower order bit of heap_ is used as flag to indicate whether capacity is
return static_cast<InternalSizeType*>(
detail::pointerFlagClear(heap_));
}
- } FB_PACKED;
+ } FOLLY_PACK_ATTR;
-#if defined(__x86_64_)
- typedef unsigned char InlineStorageType[sizeof(value_type) * MaxInline];
+#if (FOLLY_X64 || FOLLY_PPC64)
+ typedef unsigned char InlineStorageDataType[sizeof(value_type) * MaxInline];
#else
typedef typename std::aligned_storage<
sizeof(value_type) * MaxInline,
alignof(value_type)
- >::type InlineStorageType;
+ >::type InlineStorageDataType;
#endif
+ typedef typename std::conditional<
+ sizeof(value_type) * MaxInline != 0,
+ InlineStorageDataType,
+ void*
+ >::type InlineStorageType;
+
static bool const kHasInlineCapacity =
sizeof(HeapPtrWithCapacity) < sizeof(InlineStorageType);
void freeHeap() {
auto vp = detail::pointerFlagClear(pdata_.heap_);
- std::free(vp);
+ free(vp);
}
- } FB_PACKED u;
-} FB_PACKED;
+ } FOLLY_PACK_ATTR u;
+} FOLLY_PACK_ATTR;
+FOLLY_PACK_POP
//////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
-}
+namespace detail {
-#ifdef FB_PACKED
-# undef FB_PACKED
-#endif
+// Format support.
+template <class T, size_t M, class A, class B, class C>
+struct IndexableTraits<small_vector<T, M, A, B, C>>
+ : public IndexableTraitsSeq<small_vector<T, M, A, B, C>> {
+};
-#endif
+} // namespace detail
+
+} // namespace folly
+
+#pragma GCC diagnostic pop