From: Yedidya Feldblum Date: Sun, 30 Jul 2017 02:30:13 +0000 (-0700) Subject: template< -> template < X-Git-Tag: v2017.07.31.00~6 X-Git-Url: http://plrg.eecs.uci.edu/git/?p=folly.git;a=commitdiff_plain;h=fbfe105970bcf88e8c123046f84bebdfe24f8801 template< -> template < Summary: [Folly] `template<` -> `template <`. And then apply `clang-format` style to affected `template <...>` lines. Reviewed By: andrewjcg Differential Revision: D5524792 fbshipit-source-id: 6614eecf384bf3e3ccc2f0cc7c5334a0cb9c76af --- diff --git a/folly/AtomicHashArray-inl.h b/folly/AtomicHashArray-inl.h index d2cb08f4..ce857a92 100644 --- a/folly/AtomicHashArray-inl.h +++ b/folly/AtomicHashArray-inl.h @@ -370,7 +370,7 @@ struct AtomicHashArray magic keeps us well-behaved for // is_convertible<> (v. the iterator_facade documentation). - template + template aha_iterator(const aha_iterator& o, typename std::enable_if< std::is_convertible::value >::type* = 0) diff --git a/folly/AtomicHashArray.h b/folly/AtomicHashArray.h index e590dad5..7dce3a0e 100644 --- a/folly/AtomicHashArray.h +++ b/folly/AtomicHashArray.h @@ -127,7 +127,7 @@ class AtomicHashArray : boost::noncopyable { const KeyT kLockedKey_; const KeyT kErasedKey_; - template + template struct aha_iterator; typedef aha_iterator const_iterator; diff --git a/folly/AtomicHashMap-inl.h b/folly/AtomicHashMap-inl.h index e0031266..25ca05e1 100644 --- a/folly/AtomicHashMap-inl.h +++ b/folly/AtomicHashMap-inl.h @@ -456,7 +456,7 @@ struct AtomicHashMap magic keeps us well-behaved for // is_convertible<> (v. the iterator_facade documentation). - template + template ahm_iterator(const ahm_iterator& o, typename std::enable_if< std::is_convertible::value >::type* = 0) diff --git a/folly/AtomicHashMap.h b/folly/AtomicHashMap.h index 962e9de9..be683e58 100644 --- a/folly/AtomicHashMap.h +++ b/folly/AtomicHashMap.h @@ -155,8 +155,14 @@ struct AtomicHashMapFullError : std::runtime_error { {} }; -template +template < + class KeyT, + class ValueT, + class HashFcn, + class EqualFcn, + class Allocator, + class ProbeFcn, + class KeyConvertFcn> class AtomicHashMap : boost::noncopyable { typedef AtomicHashArray @@ -176,7 +182,7 @@ typedef AtomicHashArray + template struct ahm_iterator; typedef ahm_iterator struct AtomicStructIntPick {}; /// type <= 8 bytes. template < typename T, - template class Atom = std::atomic, + template class Atom = std::atomic, typename Raw = typename detail::AtomicStructIntPick::type> class AtomicStruct { static_assert(alignof(T) <= alignof(Raw), diff --git a/folly/AtomicUnorderedMap.h b/folly/AtomicUnorderedMap.h index 3a13aa9f..ee39006d 100644 --- a/folly/AtomicUnorderedMap.h +++ b/folly/AtomicUnorderedMap.h @@ -129,16 +129,17 @@ namespace folly { /// which is much faster than destructing all of the keys and values. /// Feel free to override if std::is_trivial_destructor isn't recognizing /// the triviality of your destructors. -template , - typename KeyEqual = std::equal_to, - bool SkipKeyValueDeletion = - (boost::has_trivial_destructor::value && - boost::has_trivial_destructor::value), - template class Atom = std::atomic, - typename IndexType = uint32_t, - typename Allocator = folly::detail::MMapAlloc> +template < + typename Key, + typename Value, + typename Hash = std::hash, + typename KeyEqual = std::equal_to, + bool SkipKeyValueDeletion = + (boost::has_trivial_destructor::value && + boost::has_trivial_destructor::value), + template class Atom = std::atomic, + typename IndexType = uint32_t, + typename Allocator = folly::detail::MMapAlloc> struct AtomicUnorderedInsertMap { @@ -262,7 +263,7 @@ struct AtomicUnorderedInsertMap { /// auto value = memo.findOrConstruct(key, [=](void* raw) { /// new (raw) std::string(computation(key)); /// })->first; - template + template std::pair findOrConstruct(const Key& key, Func&& func) { auto const slot = keyToSlotIdx(key); auto prev = slots_[slot].headAndState_.load(std::memory_order_acquire); @@ -314,7 +315,7 @@ struct AtomicUnorderedInsertMap { /// Eventually we can duplicate all of the std::pair constructor /// forms, including a recursive tuple forwarding template /// http://functionalcpp.wordpress.com/2013/08/28/tuple-forwarding/). - template + template std::pair emplace(const K& key, V&& value) { return findOrConstruct(key, [&](void* raw) { new (raw) Value(std::forward(value)); @@ -501,8 +502,7 @@ using AtomicUnorderedInsertMap64 = /// updating values inserted into an AtomicUnorderedInsertMap>. This relies on AtomicUnorderedInsertMap's guarantee /// that it doesn't move values. -template class Atom = std::atomic> +template class Atom = std::atomic> struct MutableAtom { mutable Atom data; diff --git a/folly/ConcurrentSkipList-inl.h b/folly/ConcurrentSkipList-inl.h index 61279a96..ac6023aa 100644 --- a/folly/ConcurrentSkipList-inl.h +++ b/folly/ConcurrentSkipList-inl.h @@ -38,9 +38,9 @@ namespace folly { namespace detail { -template class csl_iterator; +template class csl_iterator; -template +template class SkipListNode : private boost::noncopyable { enum : uint16_t { IS_HEAD_NODE = 1, @@ -51,8 +51,11 @@ class SkipListNode : private boost::noncopyable { public: typedef T value_type; - template::value>::type> + template < + typename NodeAlloc, + typename U, + typename = + typename std::enable_if::value>::type> static SkipListNode* create( NodeAlloc& alloc, int height, U&& data, bool isHead = false) { DCHECK(height >= 1 && height < 64) << height; @@ -65,13 +68,13 @@ class SkipListNode : private boost::noncopyable { return node; } - template + template static void destroy(NodeAlloc& alloc, SkipListNode* node) { node->~SkipListNode(); alloc.deallocate(node); } - template + template struct DestroyIsNoOp : std::integral_constant::value && boost::has_trivial_destructor::value> { }; @@ -130,7 +133,7 @@ class SkipListNode : private boost::noncopyable { private: // Note! this can only be called from create() as a placement new. - template + template SkipListNode(uint8_t height, U&& data, bool isHead) : height_(height), data_(std::forward(data)) { spinLock_.init(); @@ -226,10 +229,10 @@ class SkipListRandomHeight { size_t sizeLimitTable_[kMaxHeight]; }; -template +template class NodeRecycler; -template +template class NodeRecycler::value>::type> { public: @@ -315,7 +318,7 @@ class NodeRecycler +template class NodeRecycler::value>::type> { public: diff --git a/folly/ConcurrentSkipList.h b/folly/ConcurrentSkipList.h index a1a694a6..1a2fdadc 100644 --- a/folly/ConcurrentSkipList.h +++ b/folly/ConcurrentSkipList.h @@ -135,12 +135,13 @@ Sample usage: namespace folly { -template, - // All nodes are allocated using provided SimpleAllocator, - // it should be thread-safe. - typename NodeAlloc = SysAlloc, - int MAX_HEIGHT = 24> +template < + typename T, + typename Comp = std::less, + // All nodes are allocated using provided SimpleAllocator, + // it should be thread-safe. + typename NodeAlloc = SysAlloc, + int MAX_HEIGHT = 24> class ConcurrentSkipList { // MAX_HEIGHT needs to be at least 2 to suppress compiler // warnings/errors (Werror=uninitialized tiggered due to preds_[1] @@ -297,7 +298,7 @@ class ConcurrentSkipList { // list with the same key. // pair.second stores whether the data is added successfully: // 0 means not added, otherwise reutrns the new size. - template + template std::pair addOrGetData(U &&data) { NodeType *preds[MAX_HEIGHT], *succs[MAX_HEIGHT]; NodeType *newNode; @@ -518,7 +519,7 @@ class ConcurrentSkipList { std::atomic size_; }; -template +template class ConcurrentSkipList::Accessor { typedef detail::SkipListNode NodeType; typedef ConcurrentSkipList SkipListType; @@ -593,8 +594,10 @@ class ConcurrentSkipList::Accessor { const_iterator cbegin() const { return begin(); } const_iterator cend() const { return end(); } - template::value>::type> + template < + typename U, + typename = + typename std::enable_if::value>::type> std::pair insert(U&& data) { auto ret = sl_->addOrGetData(std::forward(data)); return std::make_pair(iterator(ret.first), ret.second); @@ -652,7 +655,7 @@ class ConcurrentSkipList::Accessor { }; // implements forward iterator concept. -template +template class detail::csl_iterator : public boost::iterator_facade, ValT, boost::forward_traversal_tag> { @@ -664,7 +667,7 @@ class detail::csl_iterator : explicit csl_iterator(NodeT* node = nullptr) : node_(node) {} - template + template csl_iterator(const csl_iterator &other, typename std::enable_if::value>::type* = 0) : node_(other.node_) {} @@ -678,7 +681,7 @@ class detail::csl_iterator : private: friend class boost::iterator_core_access; - template friend class csl_iterator; + template friend class csl_iterator; void increment() { node_ = node_->next(); } bool equal(const csl_iterator& other) const { return node_ == other.node_; } @@ -688,7 +691,7 @@ class detail::csl_iterator : }; // Skipper interface -template +template class ConcurrentSkipList::Skipper { typedef detail::SkipListNode NodeType; typedef ConcurrentSkipList SkipListType; diff --git a/folly/Conv.h b/folly/Conv.h index 72518d80..d70b1d5b 100644 --- a/folly/Conv.h +++ b/folly/Conv.h @@ -400,7 +400,7 @@ void toAppend(char value, Tgt * result) { *result += value; } -template +template constexpr typename std::enable_if< std::is_same::value, size_t>::type @@ -435,7 +435,7 @@ typename std::enable_if::value, size_t>:: return 0; } -template +template typename std::enable_if< (std::is_convertible::value || IsSomeString::value) && @@ -450,7 +450,7 @@ inline size_t estimateSpaceNeeded(std::nullptr_t /* value */) { return 0; } -template +template typename std::enable_if< std::is_pointer::value && IsSomeString>::value, @@ -523,7 +523,7 @@ toAppend(unsigned __int128 value, Tgt * result) { result->append(buffer + p, buffer + sizeof(buffer)); } -template +template constexpr typename std::enable_if< std::is_same::value, size_t>::type @@ -531,7 +531,7 @@ estimateSpaceNeeded(T) { return detail::digitsEnough<__int128>(); } -template +template constexpr typename std::enable_if< std::is_same::value, size_t>::type @@ -742,7 +742,7 @@ estimateSpaceNeeded(Src value) { * for estimateSpaceNeed for your type, so that we allocate * as much as you need instead of the default */ -template +template struct HasLengthEstimator : std::false_type {}; template @@ -776,12 +776,12 @@ size_t estimateSpaceToReserve(size_t sofar, const T& v, const Ts&... vs) { return estimateSpaceToReserve(sofar + estimateSpaceNeeded(v), vs...); } -template +template void reserveInTarget(const Ts&...vs) { getLastElement(vs...)->reserve(estimateSpaceToReserve(0, vs...)); } -template +template void reserveInTargetDelim(const Delimiter& d, const Ts&...vs) { static_assert(sizeof...(vs) >= 2, "Needs at least 2 args"); size_t fordelim = (sizeof...(vs) - 2) * diff --git a/folly/DynamicConverter.h b/folly/DynamicConverter.h index f29737a8..e63bf842 100644 --- a/folly/DynamicConverter.h +++ b/folly/DynamicConverter.h @@ -100,7 +100,7 @@ using is_associative = StrictConjunction, has_key_type>; namespace dynamicconverter_detail { -template +template struct Dereferencer { static inline void derefToCache( Optional* /* mem */, @@ -115,7 +115,7 @@ struct Dereferencer { } }; -template +template struct Dereferencer> { static inline void derefToCache( Optional>* mem, @@ -356,7 +356,7 @@ struct DynamicConstructor< }; // pair -template +template struct DynamicConstructor, void> { static dynamic construct(const std::pair& x) { dynamic d = dynamic::array; @@ -374,7 +374,7 @@ T convertTo(const dynamic& d) { return DynamicConverter::type>::convert(d); } -template +template dynamic toDynamic(const T& x) { return DynamicConstructor::type>::construct(x); } diff --git a/folly/FBString.h b/folly/FBString.h index 5d32df53..b36bb948 100644 --- a/folly/FBString.h +++ b/folly/FBString.h @@ -1404,7 +1404,7 @@ public: basic_fbstring& append(size_type n, value_type c); - template + template basic_fbstring& append(InputIterator first, InputIterator last) { insert(end(), first, last); return *this; diff --git a/folly/Hash.h b/folly/Hash.h index e8b75df3..78bb5f48 100644 --- a/folly/Hash.h +++ b/folly/Hash.h @@ -370,7 +370,7 @@ inline uint32_t hsieh_hash32_str(const std::string& str) { } // namespace hash -template +template struct hasher; struct Hash { @@ -393,43 +393,43 @@ struct hasher { } }; -template<> struct hasher { +template <> struct hasher { size_t operator()(int32_t key) const { return hash::jenkins_rev_mix32(uint32_t(key)); } }; -template<> struct hasher { +template <> struct hasher { size_t operator()(uint32_t key) const { return hash::jenkins_rev_mix32(key); } }; -template<> struct hasher { +template <> struct hasher { size_t operator()(int16_t key) const { return hasher()(key); // as impl accident, sign-extends } }; -template<> struct hasher { +template <> struct hasher { size_t operator()(uint16_t key) const { return hasher()(key); } }; -template<> struct hasher { +template <> struct hasher { size_t operator()(int8_t key) const { return hasher()(key); // as impl accident, sign-extends } }; -template<> struct hasher { +template <> struct hasher { size_t operator()(uint8_t key) const { return hasher()(key); } }; -template<> struct hasher { +template <> struct hasher { using explicit_type = std::conditional::value, int8_t, uint8_t>::type; size_t operator()(char key) const { @@ -437,19 +437,19 @@ template<> struct hasher { } }; -template<> struct hasher { +template <> struct hasher { size_t operator()(int64_t key) const { return static_cast(hash::twang_mix64(uint64_t(key))); } }; -template<> struct hasher { +template <> struct hasher { size_t operator()(uint64_t key) const { return static_cast(hash::twang_mix64(key)); } }; -template<> struct hasher { +template <> struct hasher { size_t operator()(const std::string& key) const { return static_cast( hash::SpookyHashV2::Hash64(key.data(), key.size(), 0)); diff --git a/folly/IPAddress.h b/folly/IPAddress.h index 4ed42a98..372d9cbc 100644 --- a/folly/IPAddress.h +++ b/folly/IPAddress.h @@ -460,7 +460,7 @@ inline bool operator>=(const IPAddress& a, const IPAddress& b) { } // folly namespace std { -template<> +template <> struct hash { size_t operator()(const folly::IPAddress& addr) const { return addr.hash(); diff --git a/folly/IPAddressV4.h b/folly/IPAddressV4.h index 1e2c65e7..b6390f8c 100644 --- a/folly/IPAddressV4.h +++ b/folly/IPAddressV4.h @@ -311,7 +311,7 @@ inline bool operator>=(const IPAddressV4& a, const IPAddressV4& b) { } // folly namespace std { -template<> +template <> struct hash { size_t operator()(const folly::IPAddressV4 addr) const { return addr.hash(); diff --git a/folly/IPAddressV6.h b/folly/IPAddressV6.h index d0c5cd60..d2fd02a9 100644 --- a/folly/IPAddressV6.h +++ b/folly/IPAddressV6.h @@ -398,7 +398,7 @@ void toAppend(IPAddressV6 addr, fbstring* result); } // folly namespace std { -template<> +template <> struct hash { size_t operator()(const folly::IPAddressV6& addr) const { return addr.hash(); diff --git a/folly/IntrusiveList.h b/folly/IntrusiveList.h index 0c799f1b..ffd5c9b7 100644 --- a/folly/IntrusiveList.h +++ b/folly/IntrusiveList.h @@ -61,7 +61,7 @@ using IntrusiveListHook = boost::intrusive::list_member_hook< * The elements stored in the list must contain an IntrusiveListHook member * variable. */ -template +template using IntrusiveList = boost::intrusive::list< T, boost::intrusive::member_hook, @@ -109,7 +109,7 @@ using SafeIntrusiveListHook = boost::intrusive::list_member_hook< * The elements stored in the list must contain an SafeIntrusiveListHook member * variable. */ -template +template using CountedIntrusiveList = boost::intrusive::list< T, boost::intrusive::member_hook, diff --git a/folly/Lazy.h b/folly/Lazy.h index 32723bc1..963dd479 100644 --- a/folly/Lazy.h +++ b/folly/Lazy.h @@ -86,7 +86,7 @@ namespace folly { namespace detail { -template +template struct Lazy { typedef typename std::result_of::type result_type; @@ -120,7 +120,7 @@ private: ////////////////////////////////////////////////////////////////////// -template +template detail::Lazy::type> lazy(Func&& fun) { return detail::Lazy::type>( diff --git a/folly/LifoSem.h b/folly/LifoSem.h index 8fc46ae2..90176094 100644 --- a/folly/LifoSem.h +++ b/folly/LifoSem.h @@ -31,8 +31,9 @@ namespace folly { -template class Atom = std::atomic, - class BatonType = Baton> +template < + template class Atom = std::atomic, + class BatonType = Baton> struct LifoSemImpl; /// LifoSem is a semaphore that wakes its waiters in a manner intended to @@ -114,7 +115,7 @@ namespace detail { /// LifoSemRawNode is the actual pooled storage that backs LifoSemNode /// for user-specified Handoff types. This is done so that we can have /// a large static IndexedMemPool of nodes, instead of per-type pools -template class Atom> +template