X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;ds=sidebyside;f=cds%2Fintrusive%2Fsplit_list_rcu.h;h=6f264cceb5c8f2bcd052ee058de96df6ecaa7cbd;hb=eb9f6566c16b481e1ac6bb516097fc0bdc2a55c6;hp=4eeb2b0c0ceac3bbe3f3386cc15f90443f62afec;hpb=785a8577b9d9ad4988c494055a9a95f1cbf62d65;p=libcds.git diff --git a/cds/intrusive/split_list_rcu.h b/cds/intrusive/split_list_rcu.h index 4eeb2b0c..6f264cce 100644 --- a/cds/intrusive/split_list_rcu.h +++ b/cds/intrusive/split_list_rcu.h @@ -1,9 +1,39 @@ -//$$CDS-header$$ - -#ifndef __CDS_INTRUSIVE_SPLIT_LIST_RCU_H -#define __CDS_INTRUSIVE_SPLIT_LIST_RCU_H - -#include +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H +#define CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H + +#include + +#include #include namespace cds { namespace intrusive { @@ -27,10 +57,10 @@ namespace cds { namespace intrusive { - \p OrderedList - ordered list implementation used as bucket for hash set, for example, MichaelList, LazyList. The intrusive ordered list implementation specifies the type \p T stored in the hash-set, the comparing functor for the type \p T and other features specific for the ordered list. - - \p Traits - type traits. See split_list::type_traits for explanation. - Instead of defining \p Traits struct you may use option-based syntax with split_list::make_traits metafunction. + - \p Traits - set traits, default isd \p split_list::traits. + Instead of defining \p Traits struct you may use option-based syntax with \p split_list::make_traits metafunction. - @note About features of hash functor needed for \p %SplitList see \ref cds_SplitList_hash_functor "SplitList general description". + @note About required features of hash functor see \ref cds_SplitList_hash_functor "SplitList general description". \par How to use Before including you should include appropriate RCU header file, @@ -54,7 +84,7 @@ namespace cds { namespace intrusive { class RCU, class OrderedList, # ifdef CDS_DOXYGEN_INVOKED - class Traits = split_list::type_traits + class Traits = split_list::traits # else class Traits # endif @@ -62,39 +92,41 @@ namespace cds { namespace intrusive { class SplitListSet< cds::urcu::gc< RCU >, OrderedList, Traits > { public: - typedef Traits options ; ///< Traits template parameters - typedef cds::urcu::gc< RCU > gc ; ///< RCU garbage collector + typedef cds::urcu::gc< RCU > gc; ///< RCU garbage collector + typedef Traits traits; ///< Traits template parameters /// Hash functor for \ref value_type and all its derivatives that you use - typedef typename cds::opt::v::hash_selector< typename options::hash >::type hash; + typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; protected: //@cond - typedef split_list::details::rebind_list_options wrapped_ordered_list; + typedef split_list::details::rebind_list_traits wrapped_ordered_list; //@endcond public: # ifdef CDS_DOXYGEN_INVOKED - typedef OrderedList ordered_list ; ///< type of ordered list used as base for split-list + typedef OrderedList ordered_list; ///< type of ordered list used as base for split-list # else typedef typename wrapped_ordered_list::result ordered_list; # endif - typedef typename ordered_list::value_type value_type ; ///< type of value stored in the split-list - typedef typename ordered_list::key_comparator key_comparator ; ///< key compare functor - typedef typename ordered_list::disposer disposer ; ///< Node disposer functor - typedef typename ordered_list::rcu_lock rcu_lock ; ///< RCU scoped lock - typedef typename ordered_list::exempt_ptr exempt_ptr ; ///< pointer to extracted node + typedef typename ordered_list::value_type value_type; ///< type of value stored in the split-list + typedef typename ordered_list::key_comparator key_comparator; ///< key compare functor + typedef typename ordered_list::disposer disposer; ///< Node disposer functor + typedef typename ordered_list::rcu_lock rcu_lock; ///< RCU scoped lock + typedef typename ordered_list::exempt_ptr exempt_ptr; ///< pointer to extracted node + typedef typename ordered_list::raw_ptr raw_ptr; ///< pointer to the node for \p get() function /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that - static CDS_CONSTEXPR_CONST bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal; + static CDS_CONSTEXPR const bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal; - typedef typename options::item_counter item_counter ; ///< Item counter type - typedef typename options::back_off back_off ; ///< back-off strategy for spinning - typedef typename options::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + typedef typename traits::item_counter item_counter; ///< Item counter type + typedef typename traits::back_off back_off; ///< back-off strategy for spinning + typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename traits::stat stat; ///< Internal statistics protected: - typedef typename ordered_list::node_type list_node_type ; ///< Node type as declared in ordered list - typedef split_list::node node_type ; ///< split-list node type - typedef node_type dummy_node_type ; ///< dummy node type + typedef typename ordered_list::node_type list_node_type; ///< Node type as declared in ordered list + typedef split_list::node node_type; ///< split-list node type + typedef node_type aux_node_type; ///< dummy node type /// Split-list node traits /** @@ -106,10 +138,10 @@ namespace cds { namespace intrusive { //@cond /// Bucket table implementation typedef typename split_list::details::bucket_table_selector< - options::dynamic_bucket_table + traits::dynamic_bucket_table , gc - , dummy_node_type - , opt::allocator< typename options::allocator > + , aux_node_type + , opt::allocator< typename traits::allocator > , opt::memory_model< memory_model > >::type bucket_table; @@ -121,10 +153,10 @@ namespace cds { namespace intrusive { class ordered_list_wrapper: public ordered_list { typedef ordered_list base_class; - typedef typename base_class::auxiliary_head bucket_head_type; + typedef typename base_class::auxiliary_head bucket_head_type; public: - bool insert_at( dummy_node_type * pHead, value_type& val ) + bool insert_at( aux_node_type * pHead, value_type& val ) { assert( pHead != nullptr ); bucket_head_type h(pHead); @@ -132,7 +164,7 @@ namespace cds { namespace intrusive { } template - bool insert_at( dummy_node_type * pHead, value_type& val, Func f ) + bool insert_at( aux_node_type * pHead, value_type& val, Func f ) { assert( pHead != nullptr ); bucket_head_type h(pHead); @@ -140,14 +172,14 @@ namespace cds { namespace intrusive { } template - std::pair ensure_at( dummy_node_type * pHead, value_type& val, Func func ) + std::pair update_at( aux_node_type * pHead, value_type& val, Func func, bool bAllowInsert ) { assert( pHead != nullptr ); bucket_head_type h(pHead); - return base_class::ensure_at( h, val, func ); + return base_class::update_at( h, val, func, bAllowInsert ); } - bool unlink_at( dummy_node_type * pHead, value_type& val ) + bool unlink_at( aux_node_type * pHead, value_type& val ) { assert( pHead != nullptr ); bucket_head_type h(pHead); @@ -155,7 +187,7 @@ namespace cds { namespace intrusive { } template - bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp, Func f ) + bool erase_at( aux_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp, Func f ) { assert( pHead != nullptr ); bucket_head_type h(pHead); @@ -163,7 +195,7 @@ namespace cds { namespace intrusive { } template - bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp ) + bool erase_at( aux_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp ) { assert( pHead != nullptr ); bucket_head_type h(pHead); @@ -171,7 +203,7 @@ namespace cds { namespace intrusive { } template - value_type * extract_at( dummy_node_type * pHead, split_list::details::search_value_type& val, Compare cmp ) + value_type * extract_at( aux_node_type * pHead, split_list::details::search_value_type& val, Compare cmp ) { assert( pHead != nullptr ); bucket_head_type h(pHead); @@ -179,7 +211,7 @@ namespace cds { namespace intrusive { } template - bool find_at( dummy_node_type * pHead, split_list::details::search_value_type& val, Compare cmp, Func f ) const + bool find_at( aux_node_type * pHead, split_list::details::search_value_type& val, Compare cmp, Func f ) { assert( pHead != nullptr ); bucket_head_type h(pHead); @@ -187,7 +219,7 @@ namespace cds { namespace intrusive { } template - bool find_at( dummy_node_type * pHead, split_list::details::search_value_type const & val, Compare cmp ) const + bool find_at( aux_node_type * pHead, split_list::details::search_value_type const & val, Compare cmp ) { assert( pHead != nullptr ); bucket_head_type h(pHead); @@ -195,18 +227,18 @@ namespace cds { namespace intrusive { } template - value_type * get_at( dummy_node_type * pHead, split_list::details::search_value_type& val, Compare cmp ) const + raw_ptr get_at( aux_node_type * pHead, split_list::details::search_value_type& val, Compare cmp ) { assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::get_at( h, val, cmp ); } - bool insert_aux_node( dummy_node_type * pNode ) + bool insert_aux_node( aux_node_type * pNode ) { return base_class::insert_aux_node( pNode ); } - bool insert_aux_node( dummy_node_type * pHead, dummy_node_type * pNode ) + bool insert_aux_node( aux_node_type * pHead, aux_node_type * pNode ) { bucket_head_type h(pHead); return base_class::insert_aux_node( h, pNode ); @@ -233,22 +265,27 @@ namespace cds { namespace intrusive { //@endcond protected: - ordered_list_wrapper m_List ; ///< Ordered list containing split-list items - bucket_table m_Buckets ; ///< bucket table - atomics::atomic m_nBucketCountLog2 ; ///< log2( current bucket count ) - item_counter m_ItemCounter ; ///< Item counter - hash m_HashFunctor ; ///< Hash functor + ordered_list_wrapper m_List; ///< Ordered list containing split-list items + bucket_table m_Buckets; ///< bucket table + atomics::atomic m_nBucketCountLog2; ///< log2( current bucket count ) + atomics::atomic m_nMaxItemCount; ///< number of items container can hold, before we have to resize + item_counter m_ItemCounter; ///< Item counter + hash m_HashFunctor; ///< Hash functor + stat m_Stat; ///< Internal statistics accumulator protected: //@cond - typedef cds::details::Allocator< dummy_node_type, typename options::allocator > dummy_node_allocator; - static dummy_node_type * alloc_dummy_node( size_t nHash ) + typedef cds::details::Allocator< aux_node_type, typename traits::allocator > aux_node_allocator; + + aux_node_type * alloc_aux_node( size_t nHash ) { - return dummy_node_allocator().New( nHash ); + m_Stat.onHeadNodeAllocated(); + return aux_node_allocator().New( nHash ); } - static void free_dummy_node( dummy_node_type * p ) + void free_aux_node( aux_node_type * p ) { - dummy_node_allocator().Delete( p ); + aux_node_allocator().Delete( p ); + m_Stat.onHeadNodeFreed(); } /// Calculates hash value of \p key @@ -260,7 +297,7 @@ namespace cds { namespace intrusive { size_t bucket_no( size_t nHash ) const { - return nHash & ( (1 << m_nBucketCountLog2.load(atomics::memory_order_relaxed)) - 1 ); + return nHash & ( (1 << m_nBucketCountLog2.load(memory_model::memory_order_relaxed)) - 1 ); } static size_t parent_bucket( size_t nBucket ) @@ -269,26 +306,28 @@ namespace cds { namespace intrusive { return nBucket & ~( 1 << bitop::MSBnz( nBucket ) ); } - dummy_node_type * init_bucket( size_t nBucket ) + aux_node_type * init_bucket( size_t nBucket ) { assert( nBucket > 0 ); size_t nParent = parent_bucket( nBucket ); - dummy_node_type * pParentBucket = m_Buckets.bucket( nParent ); + aux_node_type * pParentBucket = m_Buckets.bucket( nParent ); if ( pParentBucket == nullptr ) { pParentBucket = init_bucket( nParent ); + m_Stat.onRecursiveInitBucket(); } assert( pParentBucket != nullptr ); // Allocate a dummy node for new bucket { - dummy_node_type * pBucket = alloc_dummy_node( split_list::dummy_hash( nBucket ) ); + aux_node_type * pBucket = alloc_aux_node( split_list::dummy_hash( nBucket ) ); if ( m_List.insert_aux_node( pParentBucket, pBucket ) ) { m_Buckets.bucket( nBucket, pBucket ); + m_Stat.onNewBucket(); return pBucket; } - free_dummy_node( pBucket ); + free_aux_node( pBucket ); } // Another thread set the bucket. Wait while it done @@ -297,20 +336,22 @@ namespace cds { namespace intrusive { // The compiler can decide that waiting loop can be "optimized" (stripped) // To prevent this situation, we use waiting on volatile bucket_head_ptr pointer. // + m_Stat.onBucketInitContenton(); back_off bkoff; while ( true ) { - dummy_node_type volatile * p = m_Buckets.bucket( nBucket ); + aux_node_type volatile * p = m_Buckets.bucket( nBucket ); if ( p != nullptr ) - return const_cast( p ); + return const_cast( p ); bkoff(); + m_Stat.onBusyWaitBucketInit(); } } - dummy_node_type * get_bucket( size_t nHash ) + aux_node_type * get_bucket( size_t nHash ) { size_t nBucket = bucket_no( nHash ); - dummy_node_type * pHead = m_Buckets.bucket( nBucket ); + aux_node_type * pHead = m_Buckets.bucket( nBucket ); if ( pHead == nullptr ) pHead = init_bucket( nBucket ); @@ -322,13 +363,14 @@ namespace cds { namespace intrusive { void init() { // GC and OrderedList::gc must be the same - static_assert(( std::is_same::value ), "GC and OrderedList::gc must be the same"); + static_assert( std::is_same::value, "GC and OrderedList::gc must be the same"); // atomicity::empty_item_counter is not allowed as a item counter - static_assert(( !std::is_same::value ), "atomicity::empty_item_counter is not allowed as a item counter"); + static_assert( !std::is_same::value, + "cds::atomicity::empty_item_counter is not allowed as a item counter"); // Initialize bucket 0 - dummy_node_type * pNode = alloc_dummy_node( 0 /*split_list::dummy_hash(0)*/ ); + aux_node_type * pNode = alloc_aux_node( 0 /*split_list::dummy_hash(0)*/ ); // insert_aux_node cannot return false for empty list CDS_VERIFY( m_List.insert_aux_node( pNode )); @@ -336,13 +378,31 @@ namespace cds { namespace intrusive { m_Buckets.bucket( 0, pNode ); } - void inc_item_count() + static size_t max_item_count( size_t nBucketCount, size_t nLoadFactor ) { - size_t sz = m_nBucketCountLog2.load(atomics::memory_order_relaxed); - if ( ( ++m_ItemCounter >> sz ) > m_Buckets.load_factor() && ((size_t)(1 << sz )) < m_Buckets.capacity() ) - { - m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ); + return nBucketCount * nLoadFactor; + } + + void inc_item_count() + { + size_t nMaxCount = m_nMaxItemCount.load(memory_model::memory_order_relaxed); + if ( ++m_ItemCounter <= nMaxCount ) + return; + + size_t sz = m_nBucketCountLog2.load(memory_model::memory_order_relaxed); + const size_t nBucketCount = static_cast(1) << sz; + if ( nBucketCount < m_Buckets.capacity() ) { + // we may grow the bucket table + const size_t nLoadFactor = m_Buckets.load_factor(); + if ( nMaxCount < max_item_count( nBucketCount, nLoadFactor )) + return; // someone already have updated m_nBucketCountLog2, so stop here + + m_nMaxItemCount.compare_exchange_strong( nMaxCount, max_item_count( nBucketCount << 1, nLoadFactor ), + memory_model::memory_order_relaxed, atomics::memory_order_relaxed ); + m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed ); } + else + m_nMaxItemCount.store( std::numeric_limits::max(), memory_model::memory_order_relaxed ); } template @@ -350,16 +410,11 @@ namespace cds { namespace intrusive { { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); - dummy_node_type * pHead = get_bucket( nHash ); + aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); -# ifdef CDS_CXX11_LAMBDA_SUPPORT - return m_List.find_at( pHead, sv, cmp, - [&f](value_type& item, split_list::details::search_value_type& val){ cds::unref(f)(item, val.val ); }); -# else - split_list::details::find_functor_wrapper ffw( f ); - return m_List.find_at( pHead, sv, cmp, cds::ref(ffw) ); -# endif + return m_Stat.onFind( m_List.find_at( pHead, sv, cmp, + [&f](value_type& item, split_list::details::search_value_type& val){ f(item, val.val ); })); } template @@ -367,21 +422,23 @@ namespace cds { namespace intrusive { { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); - dummy_node_type * pHead = get_bucket( nHash ); + aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); - return m_List.find_at( pHead, sv, cmp ); + return m_Stat.onFind( m_List.find_at( pHead, sv, cmp )); } template - value_type * get_( Q const& val, Compare cmp ) + raw_ptr get_( Q const& val, Compare cmp ) { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); - dummy_node_type * pHead = get_bucket( nHash ); + aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); - return m_List.get_at( pHead, sv, cmp ); + raw_ptr p = m_List.get_at( pHead, sv, cmp ); + m_Stat.onFind( !!p ); + return p; } template @@ -389,18 +446,23 @@ namespace cds { namespace intrusive { { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); - dummy_node_type * pHead = get_bucket( nHash ); + aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); value_type * pNode = m_List.extract_at( pHead, sv, cmp ); - if ( pNode ) + if ( pNode ) { --m_ItemCounter; + m_Stat.onExtractSuccess(); + } + else + m_Stat.onExtractFailed(); return pNode; } template value_type * extract_with_( Q const& val, Less pred ) { + CDS_UNUSED( pred ); return extract_( val, typename wrapped_ordered_list::template make_compare_from_less()); } @@ -409,13 +471,15 @@ namespace cds { namespace intrusive { { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); - dummy_node_type * pHead = get_bucket( nHash ); + aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); if ( m_List.erase_at( pHead, sv, cmp ) ) { --m_ItemCounter; + m_Stat.onEraseSuccess(); return true; } + m_Stat.onEraseFailed(); return false; } @@ -424,13 +488,15 @@ namespace cds { namespace intrusive { { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); - dummy_node_type * pHead = get_bucket( nHash ); + aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); if ( m_List.erase_at( pHead, sv, cmp, f )) { --m_ItemCounter; + m_Stat.onEraseSuccess(); return true; } + m_Stat.onEraseFailed(); return false; } @@ -445,6 +511,7 @@ namespace cds { namespace intrusive { */ SplitListSet() : m_nBucketCountLog2(1) + , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()) ) { init(); } @@ -456,6 +523,7 @@ namespace cds { namespace intrusive { ) : m_Buckets( nItemCount, nLoadFactor ) , m_nBucketCountLog2(1) + , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()) ) { init(); } @@ -473,15 +541,17 @@ namespace cds { namespace intrusive { bool insert( value_type& val ) { size_t nHash = hash_value( val ); - dummy_node_type * pHead = get_bucket( nHash ); + aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); if ( m_List.insert_at( pHead, val )) { inc_item_count(); + m_Stat.onInsertSuccess(); return true; } + m_Stat.onInsertFailed(); return false; } @@ -498,34 +568,38 @@ namespace cds { namespace intrusive { \code void func( value_type& val ); \endcode - where \p val is the item inserted. User-defined functor \p f should guarantee that during changing - \p val no any other changes could be made on this set's item by concurrent threads. - The user-defined functor is called only if the inserting is success and may be passed by reference - using boost::ref + where \p val is the item inserted. The function makes RCU lock internally. - */ + + @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ template bool insert( value_type& val, Func f ) { size_t nHash = hash_value( val ); - dummy_node_type * pHead = get_bucket( nHash ); + aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); if ( m_List.insert_at( pHead, val, f )) { inc_item_count(); + m_Stat.onInsertSuccess(); return true; } + m_Stat.onInsertFailed(); return false; } - /// Ensures that the \p val exists in the set + /// Updates the node /** The operation performs inserting or changing data with lock-free manner. - If the item \p val is not found in the set, then \p val is inserted into the set. + If the item \p val is not found in the set, then \p val is inserted + iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor signature is: \code @@ -534,35 +608,48 @@ namespace cds { namespace intrusive { with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - - \p val - argument \p val passed into the \p ensure function + - \p val - argument \p val passed into the \p update() function If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments - refers to the same thing. - - The functor can change non-key fields of the \p item; however, \p func must guarantee - that during changing no any other modifications could be made on this item by concurrent threads. + refers to the same stuff. - You can pass \p func argument by value or by reference using boost::ref or cds::ref. + The functor may change non-key fields of the \p item. - The function makes RCU lock internally. + The function applies RCU lock internally. - Returns std::pair where \p first is \p true if operation is successfull, + Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if the item with \p key - already is in the set. + already is in the list. + + @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level + synchronization. */ template - std::pair ensure( value_type& val, Func func ) + std::pair update( value_type& val, Func func, bool bAllowInsert = true ) { size_t nHash = hash_value( val ); - dummy_node_type * pHead = get_bucket( nHash ); + aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); - std::pair bRet = m_List.ensure_at( pHead, val, func ); - if ( bRet.first && bRet.second ) + std::pair bRet = m_List.update_at( pHead, val, func, bAllowInsert ); + if ( bRet.first && bRet.second ) { inc_item_count(); + m_Stat.onUpdateNew(); + } + else + m_Stat.onUpdateExist(); return bRet; } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( value_type& val, Func func ) + { + return update( val, func, true ); + } + //@endcond /// Unlinks the item \p val from the set /** @@ -581,35 +668,37 @@ namespace cds { namespace intrusive { bool unlink( value_type& val ) { size_t nHash = hash_value( val ); - dummy_node_type * pHead = get_bucket( nHash ); + aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); if ( m_List.unlink_at( pHead, val ) ) { --m_ItemCounter; + m_Stat.onEraseSuccess(); return true; } + m_Stat.onEraseFailed(); return false; } /// Deletes the item from the set /** \anchor cds_intrusive_SplitListSet_rcu_erase - The function searches an item with key equal to \p val in the set, + The function searches an item with key equal to \p key in the set, unlinks it from the set, and returns \p true. - If the item with key equal to \p val is not found the function return \p false. + If the item with key equal to \p key is not found the function return \p false. Difference between \ref erase and \p unlink functions: \p erase finds a key and deletes the item found. \p unlink finds an item by key and deletes it - only if \p val is an item of that set, i.e. the pointer to item found - is equal to &val . + only if \p key is an item of that set, i.e. the pointer to item found + is equal to &key . RCU \p synchronize method can be called, therefore, RCU should not be locked. Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. */ template - bool erase( Q const& val ) + bool erase( Q const& key ) { - return erase_( val, key_comparator() ); + return erase_( key, key_comparator() ); } /// Deletes the item from the set using \p pred for searching @@ -620,14 +709,15 @@ namespace cds { namespace intrusive { \p pred must imply the same element order as the comparator used for building the set. */ template - bool erase_with( Q const& val, Less pred ) + bool erase_with( Q const& key, Less pred ) { - return erase_( val, typename wrapped_ordered_list::template make_compare_from_less() ); + CDS_UNUSED( pred ); + return erase_( key, typename wrapped_ordered_list::template make_compare_from_less() ); } /// Deletes the item from the set /** \anchor cds_intrusive_SplitListSet_rcu_erase_func - The function searches an item with key equal to \p val in the set, + The function searches an item with key equal to \p key in the set, call \p f functor with item found, unlinks it from the set, and returns \p true. The \ref disposer specified by \p OrderedList class template parameter is called by garbage collector \p GC asynchronously. @@ -638,18 +728,17 @@ namespace cds { namespace intrusive { void operator()( value_type const& item ); }; \endcode - The functor can be passed by reference with boost:ref - If the item with key equal to \p val is not found the function return \p false. + If the item with key equal to \p key is not found the function return \p false. RCU \p synchronize method can be called, therefore, RCU should not be locked. Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. */ template - bool erase( Q const& val, Func f ) + bool erase( Q const& key, Func f ) { - return erase_( val, key_comparator(), f ); + return erase_( key, key_comparator(), f ); } /// Deletes the item from the set using \p pred for searching @@ -660,22 +749,22 @@ namespace cds { namespace intrusive { \p pred must imply the same element order as the comparator used for building the set. */ template - bool erase_with( Q const& val, Less pred, Func f ) + bool erase_with( Q const& key, Less pred, Func f ) { - return erase_( val, typename wrapped_ordered_list::template make_compare_from_less(), f ); + CDS_UNUSED( pred ); + return erase_( key, typename wrapped_ordered_list::template make_compare_from_less(), f ); } /// Extracts an item from the set /** \anchor cds_intrusive_SplitListSet_rcu_extract - The function searches an item with key equal to \p val in the set, - unlinks it, and returns pointer to an item found in \p dest argument. - If the item with the key equal to \p val is not found the function returns \p false. + The function searches an item with key equal to \p key in the set, + unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. + If the item with the key equal to \p key is not found the function returns an empty \p exempt_ptr. - @note The function does NOT call RCU read-side lock or synchronization, - and does NOT dispose the item found. It just excludes the item from the set - and returns a pointer to item found. - You should lock RCU before calling of the function, and you should synchronize RCU - outside the RCU lock before reusing returned pointer. + Depends on \p bucket_type you should or should not lock RCU before calling of this function: + - for the set based on \ref cds_intrusive_MichaelList_rcu "MichaelList" RCU should not be locked + - for the set based on \ref cds_intrusive_LazyList_rcu "LazyList" RCU should be locked + See ordered list implementation for details. \code typedef cds::urcu::gc< general_buffered<> > rcu; @@ -686,16 +775,15 @@ namespace cds { namespace intrusive { // ... rcu_splitlist_set::exempt_ptr p; - { - // first, we should lock RCU - rcu_splitlist_set::rcu_lock lock; - - // Now, you can apply extract function - // Note that you must not delete the item found inside the RCU lock - if ( theList.extract( p, 10 )) { - // do something with p - ... - } + + // For MichaelList we should not lock RCU + + // Now, you can apply extract function + // Note that you must not delete the item found inside the RCU lock + p = theList.extract( 10 ); + if ( p ) { + // do something with p + ... } // We may safely release p here @@ -705,53 +793,40 @@ namespace cds { namespace intrusive { \endcode */ template - bool extract( exempt_ptr& dest, Q const& val ) + exempt_ptr extract( Q const& key ) { - value_type * pNode = extract_( val, key_comparator() ); - if ( pNode ) { - dest = pNode; - return true; - } - return false; + return exempt_ptr(extract_( key, key_comparator() )); } /// Extracts an item from the set using \p pred for searching /** - The function is an analog of \ref cds_intrusive_SplitListSet_rcu_extract "extract(exempt_ptr&, Q const&)" - but \p pred is used for key compare. + The function is an analog of \p extract(Q const&) but \p pred is used for key compare. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ template - bool extract_with( exempt_ptr& dest, Q const& val, Less pred ) + exempt_ptr extract_with( Q const& key, Less pred ) { - value_type * pNode = extract_with_( val, pred ); - if ( pNode ) { - dest = pNode; - return true; - } - return false; + return exempt_ptr( extract_with_( key, pred )); } - /// Finds the key \p val + /// Finds the key \p key /** \anchor cds_intrusive_SplitListSet_rcu_find_func - The function searches the item with key equal to \p val and calls the functor \p f for item found. + The function searches the item with key equal to \p key and calls the functor \p f for item found. The interface of \p Func functor is: \code struct functor { - void operator()( value_type& item, Q& val ); + void operator()( value_type& item, Q& key ); }; \endcode - where \p item is the item found, \p val is the find function argument. - - You can pass \p f argument by value or by reference using boost::ref or cds::ref. + where \p item is the item found, \p key is the find function argument. The functor can change non-key fields of \p item. Note that the functor is only guarantee that \p item cannot be disposed during functor is executing. The functor does not serialize simultaneous access to the set \p item. If such access is possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. - The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor can modify both arguments. Note the hash functor specified for class \p Traits template parameter @@ -759,15 +834,22 @@ namespace cds { namespace intrusive { The function applies RCU lock internally. - The function returns \p true if \p val is found, \p false otherwise. + The function returns \p true if \p key is found, \p false otherwise. */ template - bool find( Q& val, Func f ) + bool find( Q& key, Func f ) { - return find_( val, key_comparator(), f ); + return find_( key, key_comparator(), f ); } + //@cond + template + bool find( Q const& key, Func f ) + { + return find_( key, key_comparator(), f ); + } + //@endcond - /// Finds the key \p val with \p pred predicate for comparing + /// Finds the key \p key with \p pred predicate for comparing /** The function is an analog of \ref cds_intrusive_SplitListSet_rcu_find_func "find(Q&, Func)" but \p cmp is used for key compare. @@ -775,112 +857,99 @@ namespace cds { namespace intrusive { \p cmp must imply the same element order as the comparator used for building the set. */ template - bool find_with( Q& val, Less pred, Func f ) + bool find_with( Q& key, Less pred, Func f ) { - return find_( val, typename wrapped_ordered_list::template make_compare_from_less(), f ); + CDS_UNUSED( pred ); + return find_( key, typename wrapped_ordered_list::template make_compare_from_less(), f ); } - - /// Finds the key \p val - /** \anchor cds_intrusive_SplitListSet_rcu_find_cfunc - The function searches the item with key equal to \p val and calls the functor \p f for item found. - The interface of \p Func functor is: - \code - struct functor { - void operator()( value_type& item, Q const& val ); - }; - \endcode - where \p item is the item found, \p val is the find function argument. - - You can pass \p f argument by value or by reference using boost::ref or cds::ref. - - The functor can change non-key fields of \p item. Note that the functor is only guarantee - that \p item cannot be disposed during functor is executing. - The functor does not serialize simultaneous access to the set \p item. If such access is - possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. - - Note the hash functor specified for class \p Traits template parameter - should accept a parameter of type \p Q that can be not the same as \p value_type. - - The function applies RCU lock internally. - - The function returns \p true if \p val is found, \p false otherwise. - */ - template - bool find( Q const& val, Func f ) + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) { - return find_( val, key_comparator(), f ); + CDS_UNUSED( pred ); + return find_( key, typename wrapped_ordered_list::template make_compare_from_less(), f ); } + //@endcond - /// Finds the key \p val with \p pred predicate for comparing + /// Checks whether the set contains \p key /** - The function is an analog of \ref cds_intrusive_SplitListSet_rcu_find_cfunc "find(Q const&, Func)" - but \p cmp is used for key compare. - \p Less has the interface like \p std::less. - \p pred must imply the same element order as the comparator used for building the set. + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + Otherwise, you may use \p contains( Q const&, Less pred ) functions with explicit predicate for key comparing. */ - template - bool find_with( Q const& val, Less pred, Func f ) + template + bool contains( Q const& key ) { - return find_( val, typename wrapped_ordered_list::template make_compare_from_less(), f ); + return find_value( key, key_comparator() ); } - - /// Finds the key \p val - /** \anchor cds_intrusive_SplitListSet_rcu_find_val - The function searches the item with key equal to \p val - and returns \p true if \p val found or \p false otherwise. - */ + //@cond template - bool find( Q const& val ) + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) { - return find_value( val, key_comparator() ); + return contains( key ); } + //@endcond - /// Finds the key \p val with \p pred predicate for comparing + /// Checks whether the set contains \p key using \p pred predicate for searching /** - The function is an analog of \ref cds_intrusive_SplitListSet_rcu_find_val "find(Q const&)" - but \p cmp is used for key compare. - \p Less has the interface like \p std::less. - \p pred must imply the same element order as the comparator used for building the set. + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the list. */ template - bool find_with( Q const& val, Less pred ) + bool contains( Q const& key, Less pred ) { - return find_value( val, typename wrapped_ordered_list::template make_compare_from_less() ); + CDS_UNUSED( pred ); + return find_value( key, typename wrapped_ordered_list::template make_compare_from_less() ); } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond - /// Finds the key \p val and return the item found + /// Finds the key \p key and return the item found /** \anchor cds_intrusive_SplitListSet_rcu_get - The function searches the item with key equal to \p val and returns the pointer to item found. - If \p val is not found it returns \p nullptr. + The function searches the item with key equal to \p key and returns the pointer to item found. + If \p key is not found it returns \p nullptr. Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. RCU should be locked before call of this function. Returned item is valid only while RCU is locked: \code - cds::intrusive::SplitListSet< your_template_parameters > theSet; + typedef cds::intrusive::SplitListSet< your_template_parameters > set_class; + set_class theSet; // ... + typename set_class::raw_ptr rp; { // Lock RCU hash_set::rcu_lock lock; - foo * pVal = theSet.get( 5 ); - if ( pVal ) { - // Deal with pVal + rp = theSet.get( 5 ); + if ( rp ) { + // Deal with rp //... } // Unlock RCU by rcu_lock destructor - // pVal can be retired by disposer at any time after RCU has been unlocked + // rp can be retired by disposer at any time after RCU has been unlocked } \endcode */ template - value_type * get( Q const& val ) + raw_ptr get( Q const& key ) { - return get_( val, key_comparator() ); + return get_( key, key_comparator() ); } - /// Finds the key \p val and return the item found + /// Finds the key \p key and return the item found /** The function is an analog of \ref cds_intrusive_SplitListSet_rcu_get "get(Q const&)" but \p pred is used for comparing the keys. @@ -890,9 +959,10 @@ namespace cds { namespace intrusive { \p pred must imply the same element order as the comparator used for building the set. */ template - value_type * get_with( Q const& val, Less pred ) + raw_ptr get_with( Q const& key, Less pred ) { - return get_( val, typename wrapped_ordered_list::template make_compare_from_less()); + CDS_UNUSED( pred ); + return get_( key, typename wrapped_ordered_list::template make_compare_from_less()); } @@ -912,11 +982,7 @@ namespace cds { namespace intrusive { return size() == 0; } - /// Clears the set (non-atomic) - /** - The function unlink all items from the set. - The function is not atomic. Therefore, \p clear may be used only for debugging purposes. - */ + /// Clears the set (not atomic) void clear() { iterator it = begin(); @@ -928,6 +994,12 @@ namespace cds { namespace intrusive { } } + /// Returns internal statistics + stat const& statistics() const + { + return m_Stat; + } + protected: //@cond template @@ -951,20 +1023,21 @@ namespace cds { namespace intrusive { {} }; //@endcond + public: + ///@name Forward iterators (thread-safe under RCU lock) + //@{ /// Forward iterator /** The forward iterator for a split-list has some features: - it has no post-increment operator - it depends on iterator of underlying \p OrderedList - - The iterator cannot be moved across thread boundary since it may contain GC's guard that is thread-private GC data. - - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent - deleting operations it is no guarantee that you iterate all item in the split-list. - Therefore, the use of iterators in concurrent environment is not good idea. Use the iterator on the concurrent container - for debug purpose only. + You may safely use iterators in multi-threaded environment only under RCU lock. + Otherwise, a crash is possible if another thread deletes the element the iterator points to. */ typedef iterator_type iterator; + /// Const forward iterator /** For iterator's features and requirements see \ref iterator @@ -995,17 +1068,27 @@ namespace cds { namespace intrusive { /// Returns a forward const iterator addressing the first element in a split-list const_iterator begin() const { - return const_iterator( m_List.begin(), m_List.end() ); + return cbegin(); + } + /// Returns a forward const iterator addressing the first element in a split-list + const_iterator cbegin() const + { + return const_iterator( m_List.cbegin(), m_List.cend() ); } /// Returns an const iterator that addresses the location succeeding the last element in a split-list const_iterator end() const { - return const_iterator( m_List.end(), m_List.end() ); + return cend(); } - + /// Returns an const iterator that addresses the location succeeding the last element in a split-list + const_iterator cend() const + { + return const_iterator( m_List.cend(), m_List.cend() ); + } + //@} }; }} // namespace cds::intrusive -#endif // #ifndef __CDS_INTRUSIVE_SPLIT_LIST_RCU_H +#endif // #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H