X-Git-Url: http://plrg.eecs.uci.edu/git/?p=libcds.git;a=blobdiff_plain;f=cds%2Fintrusive%2Fsplit_list_rcu.h;h=8ea2078d97e53085d4f3061f758f623ee66f98f9;hp=384b2d6db2f8442ca472b25480712bcac02743cb;hb=df91b83e7b3e4048b3ea6584cb78a7e7531224d6;hpb=9e2654f9e05fa1ed4cbb78387363d735d385b74c diff --git a/cds/intrusive/split_list_rcu.h b/cds/intrusive/split_list_rcu.h index 384b2d6d..8ea2078d 100644 --- a/cds/intrusive/split_list_rcu.h +++ b/cds/intrusive/split_list_rcu.h @@ -1,10 +1,41 @@ -//$$CDS-header$$ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H #define CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H +#include + #include #include +#include namespace cds { namespace intrusive { @@ -30,7 +61,7 @@ namespace cds { namespace intrusive { - \p Traits - set traits, default isd \p split_list::traits. Instead of defining \p Traits struct you may use option-based syntax with \p split_list::make_traits metafunction. - @note About reqired features of hash functor see \ref cds_SplitList_hash_functor "SplitList general description". + @note About required features of hash functor see \ref cds_SplitList_hash_functor "SplitList general description". \par How to use Before including you should include appropriate RCU header file, @@ -84,6 +115,7 @@ namespace cds { namespace intrusive { typedef typename ordered_list::disposer disposer; ///< Node disposer functor typedef typename ordered_list::rcu_lock rcu_lock; ///< RCU scoped lock typedef typename ordered_list::exempt_ptr exempt_ptr; ///< pointer to extracted node + typedef typename ordered_list::raw_ptr raw_ptr; ///< pointer to the node for \p get() function /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that static CDS_CONSTEXPR const bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal; @@ -92,10 +124,17 @@ namespace cds { namespace intrusive { typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option typedef typename traits::stat stat; ///< Internal statistics + // GC and OrderedList::gc must be the same + static_assert( std::is_same::value, "GC and OrderedList::gc must be the same"); + + // atomicity::empty_item_counter is not allowed as a item counter + static_assert( !std::is_same::value, + "cds::atomicity::empty_item_counter is not allowed as a item counter"); + protected: typedef typename ordered_list::node_type list_node_type; ///< Node type as declared in ordered list typedef split_list::node node_type; ///< split-list node type - typedef node_type dummy_node_type; ///< dummy node type + typedef node_type aux_node_type; ///< dummy node type /// Split-list node traits /** @@ -109,7 +148,7 @@ namespace cds { namespace intrusive { typedef typename split_list::details::bucket_table_selector< traits::dynamic_bucket_table , gc - , dummy_node_type + , aux_node_type , opt::allocator< typename traits::allocator > , opt::memory_model< memory_model > >::type bucket_table; @@ -122,10 +161,10 @@ namespace cds { namespace intrusive { class ordered_list_wrapper: public ordered_list { typedef ordered_list base_class; - typedef typename base_class::auxiliary_head bucket_head_type; + typedef typename base_class::auxiliary_head bucket_head_type; public: - bool insert_at( dummy_node_type * pHead, value_type& val ) + bool insert_at( aux_node_type * pHead, value_type& val ) { assert( pHead != nullptr ); bucket_head_type h(pHead); @@ -133,7 +172,7 @@ namespace cds { namespace intrusive { } template - bool insert_at( dummy_node_type * pHead, value_type& val, Func f ) + bool insert_at( aux_node_type * pHead, value_type& val, Func f ) { assert( pHead != nullptr ); bucket_head_type h(pHead); @@ -141,14 +180,14 @@ namespace cds { namespace intrusive { } template - std::pair ensure_at( dummy_node_type * pHead, value_type& val, Func func ) + std::pair update_at( aux_node_type * pHead, value_type& val, Func func, bool bAllowInsert ) { assert( pHead != nullptr ); bucket_head_type h(pHead); - return base_class::ensure_at( h, val, func ); + return base_class::update_at( h, val, func, bAllowInsert ); } - bool unlink_at( dummy_node_type * pHead, value_type& val ) + bool unlink_at( aux_node_type * pHead, value_type& val ) { assert( pHead != nullptr ); bucket_head_type h(pHead); @@ -156,7 +195,7 @@ namespace cds { namespace intrusive { } template - bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp, Func f ) + bool erase_at( aux_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp, Func f ) { assert( pHead != nullptr ); bucket_head_type h(pHead); @@ -164,7 +203,7 @@ namespace cds { namespace intrusive { } template - bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp ) + bool erase_at( aux_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp ) { assert( pHead != nullptr ); bucket_head_type h(pHead); @@ -172,7 +211,7 @@ namespace cds { namespace intrusive { } template - value_type * extract_at( dummy_node_type * pHead, split_list::details::search_value_type& val, Compare cmp ) + value_type * extract_at( aux_node_type * pHead, split_list::details::search_value_type& val, Compare cmp ) { assert( pHead != nullptr ); bucket_head_type h(pHead); @@ -180,7 +219,7 @@ namespace cds { namespace intrusive { } template - bool find_at( dummy_node_type * pHead, split_list::details::search_value_type& val, Compare cmp, Func f ) const + bool find_at( aux_node_type * pHead, split_list::details::search_value_type& val, Compare cmp, Func f ) { assert( pHead != nullptr ); bucket_head_type h(pHead); @@ -188,7 +227,7 @@ namespace cds { namespace intrusive { } template - bool find_at( dummy_node_type * pHead, split_list::details::search_value_type const & val, Compare cmp ) const + bool find_at( aux_node_type * pHead, split_list::details::search_value_type const & val, Compare cmp ) { assert( pHead != nullptr ); bucket_head_type h(pHead); @@ -196,18 +235,18 @@ namespace cds { namespace intrusive { } template - value_type * get_at( dummy_node_type * pHead, split_list::details::search_value_type& val, Compare cmp ) const + raw_ptr get_at( aux_node_type * pHead, split_list::details::search_value_type& val, Compare cmp ) { assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::get_at( h, val, cmp ); } - bool insert_aux_node( dummy_node_type * pNode ) + bool insert_aux_node( aux_node_type * pNode ) { return base_class::insert_aux_node( pNode ); } - bool insert_aux_node( dummy_node_type * pHead, dummy_node_type * pNode ) + bool insert_aux_node( aux_node_type * pHead, aux_node_type * pNode ) { bucket_head_type h(pHead); return base_class::insert_aux_node( h, pNode ); @@ -233,225 +272,6 @@ namespace cds { namespace intrusive { }; //@endcond - protected: - ordered_list_wrapper m_List; ///< Ordered list containing split-list items - bucket_table m_Buckets; ///< bucket table - atomics::atomic m_nBucketCountLog2; ///< log2( current bucket count ) - item_counter m_ItemCounter; ///< Item counter - hash m_HashFunctor; ///< Hash functor - stat m_Stat; ///< Internal stattistics accumulator - - protected: - //@cond - typedef cds::details::Allocator< dummy_node_type, typename traits::allocator > dummy_node_allocator; - - dummy_node_type * alloc_dummy_node( size_t nHash ) - { - m_Stat.onHeadNodeAllocated(); - return dummy_node_allocator().New( nHash ); - } - void free_dummy_node( dummy_node_type * p ) - { - dummy_node_allocator().Delete( p ); - m_Stat.onHeadNodeFreed(); - } - - /// Calculates hash value of \p key - template - size_t hash_value( Q const& key ) const - { - return m_HashFunctor( key ); - } - - size_t bucket_no( size_t nHash ) const - { - return nHash & ( (1 << m_nBucketCountLog2.load(atomics::memory_order_relaxed)) - 1 ); - } - - static size_t parent_bucket( size_t nBucket ) - { - assert( nBucket > 0 ); - return nBucket & ~( 1 << bitop::MSBnz( nBucket ) ); - } - - dummy_node_type * init_bucket( size_t nBucket ) - { - assert( nBucket > 0 ); - size_t nParent = parent_bucket( nBucket ); - - dummy_node_type * pParentBucket = m_Buckets.bucket( nParent ); - if ( pParentBucket == nullptr ) { - pParentBucket = init_bucket( nParent ); - m_Stat.onRecursiveInitBucket(); - } - - assert( pParentBucket != nullptr ); - - // Allocate a dummy node for new bucket - { - dummy_node_type * pBucket = alloc_dummy_node( split_list::dummy_hash( nBucket ) ); - if ( m_List.insert_aux_node( pParentBucket, pBucket ) ) { - m_Buckets.bucket( nBucket, pBucket ); - m_Stat.onNewBucket(); - return pBucket; - } - free_dummy_node( pBucket ); - } - - // Another thread set the bucket. Wait while it done - - // In this point, we must wait while nBucket is empty. - // The compiler can decide that waiting loop can be "optimized" (stripped) - // To prevent this situation, we use waiting on volatile bucket_head_ptr pointer. - // - m_Stat.onBucketInitContenton(); - back_off bkoff; - while ( true ) { - dummy_node_type volatile * p = m_Buckets.bucket( nBucket ); - if ( p != nullptr ) - return const_cast( p ); - bkoff(); - m_Stat.onBusyWaitBucketInit(); - } - } - - dummy_node_type * get_bucket( size_t nHash ) - { - size_t nBucket = bucket_no( nHash ); - - dummy_node_type * pHead = m_Buckets.bucket( nBucket ); - if ( pHead == nullptr ) - pHead = init_bucket( nBucket ); - - assert( pHead->is_dummy() ); - - return pHead; - } - - void init() - { - // GC and OrderedList::gc must be the same - static_assert( std::is_same::value, "GC and OrderedList::gc must be the same"); - - // atomicity::empty_item_counter is not allowed as a item counter - static_assert( !std::is_same::value, - "cds::atomicity::empty_item_counter is not allowed as a item counter"); - - // Initialize bucket 0 - dummy_node_type * pNode = alloc_dummy_node( 0 /*split_list::dummy_hash(0)*/ ); - - // insert_aux_node cannot return false for empty list - CDS_VERIFY( m_List.insert_aux_node( pNode )); - - m_Buckets.bucket( 0, pNode ); - } - - void inc_item_count() - { - size_t sz = m_nBucketCountLog2.load(atomics::memory_order_relaxed); - if ( ( ++m_ItemCounter >> sz ) > m_Buckets.load_factor() && ((size_t)(1 << sz )) < m_Buckets.capacity() ) - { - m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ); - } - } - - template - bool find_( Q& val, Compare cmp, Func f ) - { - size_t nHash = hash_value( val ); - split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); - dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != nullptr ); - - return m_Stat.onFind( m_List.find_at( pHead, sv, cmp, - [&f](value_type& item, split_list::details::search_value_type& val){ f(item, val.val ); })); - } - - template - bool find_value( Q const& val, Compare cmp ) - { - size_t nHash = hash_value( val ); - split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); - dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != nullptr ); - - return m_Stat.onFind( m_List.find_at( pHead, sv, cmp )); - } - - template - value_type * get_( Q const& val, Compare cmp ) - { - size_t nHash = hash_value( val ); - split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); - dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != nullptr ); - - value_type * p = m_List.get_at( pHead, sv, cmp ); - m_Stat.onFind( p != nullptr ); - return p; - } - - template - value_type * extract_( Q const& val, Compare cmp ) - { - size_t nHash = hash_value( val ); - split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); - dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != nullptr ); - - value_type * pNode = m_List.extract_at( pHead, sv, cmp ); - if ( pNode ) { - --m_ItemCounter; - m_Stat.onExtractSuccess(); - } - else - m_Stat.onExtractFailed(); - return pNode; - } - - template - value_type * extract_with_( Q const& val, Less pred ) - { - CDS_UNUSED( pred ); - return extract_( val, typename wrapped_ordered_list::template make_compare_from_less()); - } - - template - bool erase_( const Q& val, Compare cmp ) - { - size_t nHash = hash_value( val ); - split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); - dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != nullptr ); - - if ( m_List.erase_at( pHead, sv, cmp ) ) { - --m_ItemCounter; - m_Stat.onEraseSuccess(); - return true; - } - m_Stat.onEraseFailed(); - return false; - } - - template - bool erase_( Q const& val, Compare cmp, Func f ) - { - size_t nHash = hash_value( val ); - split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); - dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != nullptr ); - - if ( m_List.erase_at( pHead, sv, cmp, f )) { - --m_ItemCounter; - m_Stat.onEraseSuccess(); - return true; - } - m_Stat.onEraseFailed(); - return false; - } - - //@endcond - public: /// Initialize split-ordered list of default capacity /** @@ -461,6 +281,7 @@ namespace cds { namespace intrusive { */ SplitListSet() : m_nBucketCountLog2(1) + , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()) ) { init(); } @@ -472,6 +293,7 @@ namespace cds { namespace intrusive { ) : m_Buckets( nItemCount, nLoadFactor ) , m_nBucketCountLog2(1) + , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()) ) { init(); } @@ -489,7 +311,7 @@ namespace cds { namespace intrusive { bool insert( value_type& val ) { size_t nHash = hash_value( val ); - dummy_node_type * pHead = get_bucket( nHash ); + aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); @@ -528,7 +350,7 @@ namespace cds { namespace intrusive { bool insert( value_type& val, Func f ) { size_t nHash = hash_value( val ); - dummy_node_type * pHead = get_bucket( nHash ); + aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); @@ -542,11 +364,12 @@ namespace cds { namespace intrusive { return false; } - /// Ensures that the \p val exists in the set + /// Updates the node /** The operation performs inserting or changing data with lock-free manner. - If the item \p val is not found in the set, then \p val is inserted into the set. + If the item \p val is not found in the set, then \p val is inserted + iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with item found. The functor signature is: \code @@ -555,38 +378,48 @@ namespace cds { namespace intrusive { with arguments: - \p bNew - \p true if the item has been inserted, \p false otherwise - \p item - item of the set - - \p val - argument \p val passed into the \p ensure function + - \p val - argument \p val passed into the \p update() function If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments - refers to the same thing. + refers to the same stuff. - The function makes RCU lock internally. + The functor may change non-key fields of the \p item. + + The function applies RCU lock internally. - Returns std::pair where \p first is \p true if operation is successfull, + Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if the item with \p key - already is in the set. + already is in the list. @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level synchronization. - */ + */ template - std::pair ensure( value_type& val, Func func ) + std::pair update( value_type& val, Func func, bool bAllowInsert = true ) { size_t nHash = hash_value( val ); - dummy_node_type * pHead = get_bucket( nHash ); + aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); - std::pair bRet = m_List.ensure_at( pHead, val, func ); + std::pair bRet = m_List.update_at( pHead, val, func, bAllowInsert ); if ( bRet.first && bRet.second ) { inc_item_count(); - m_Stat.onEnsureNew(); + m_Stat.onUpdateNew(); } else - m_Stat.onEnsureExist(); + m_Stat.onUpdateExist(); return bRet; } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( value_type& val, Func func ) + { + return update( val, func, true ); + } + //@endcond /// Unlinks the item \p val from the set /** @@ -605,7 +438,7 @@ namespace cds { namespace intrusive { bool unlink( value_type& val ) { size_t nHash = hash_value( val ); - dummy_node_type * pHead = get_bucket( nHash ); + aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); if ( m_List.unlink_at( pHead, val ) ) { @@ -698,11 +531,10 @@ namespace cds { namespace intrusive { unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. If the item with the key equal to \p key is not found the function returns an empty \p exempt_ptr. - @note The function does NOT call RCU read-side lock or synchronization, - and does NOT dispose the item found. It just excludes the item from the set - and returns a pointer to item found. - You should lock RCU before calling of the function, and you should synchronize RCU - outside the RCU lock before reusing returned pointer. + Depends on \p bucket_type you should or should not lock RCU before calling of this function: + - for the set based on \ref cds_intrusive_MichaelList_rcu "MichaelList" RCU should not be locked + - for the set based on \ref cds_intrusive_LazyList_rcu "LazyList" RCU should be locked + See ordered list implementation for details. \code typedef cds::urcu::gc< general_buffered<> > rcu; @@ -713,17 +545,15 @@ namespace cds { namespace intrusive { // ... rcu_splitlist_set::exempt_ptr p; - { - // first, we should lock RCU - rcu_splitlist_set::rcu_lock lock; - - // Now, you can apply extract function - // Note that you must not delete the item found inside the RCU lock - p = theList.extract( 10 ); - if ( p ) { - // do something with p - ... - } + + // For MichaelList we should not lock RCU + + // Now, you can apply extract function + // Note that you must not delete the item found inside the RCU lock + p = theList.extract( 10 ); + if ( p ) { + // do something with p + ... } // We may safely release p here @@ -740,8 +570,7 @@ namespace cds { namespace intrusive { /// Extracts an item from the set using \p pred for searching /** - The function is an analog of \ref cds_intrusive_SplitListSet_rcu_extract "extract(exempt_ptr&, Q const&)" - but \p pred is used for key compare. + The function is an analog of \p extract(Q const&) but \p pred is used for key compare. \p Less functor has the interface like \p std::less. \p pred must imply the same element order as the comparator used for building the set. */ @@ -812,30 +641,49 @@ namespace cds { namespace intrusive { } //@endcond - /// Finds the key \p key - /** \anchor cds_intrusive_SplitListSet_rcu_find_val + /// Checks whether the set contains \p key + /** The function searches the item with key equal to \p key - and returns \p true if \p key found or \p false otherwise. + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + Otherwise, you may use \p contains( Q const&, Less pred ) functions with explicit predicate for key comparing. */ template - bool find( Q const& key ) + bool contains( Q const& key ) { return find_value( key, key_comparator() ); } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) + { + return contains( key ); + } + //@endcond - /// Finds the key \p key with \p pred predicate for comparing + /// Checks whether the set contains \p key using \p pred predicate for searching /** - The function is an analog of \ref cds_intrusive_SplitListSet_rcu_find_val "find(Q const&)" - but \p cmp is used for key compare. - \p Less has the interface like \p std::less. - \p pred must imply the same element order as the comparator used for building the set. + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the list. */ template - bool find_with( Q const& key, Less pred ) + bool contains( Q const& key, Less pred ) { CDS_UNUSED( pred ); return find_value( key, typename wrapped_ordered_list::template make_compare_from_less() ); } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond /// Finds the key \p key and return the item found /** \anchor cds_intrusive_SplitListSet_rcu_get @@ -847,24 +695,26 @@ namespace cds { namespace intrusive { RCU should be locked before call of this function. Returned item is valid only while RCU is locked: \code - cds::intrusive::SplitListSet< your_template_parameters > theSet; + typedef cds::intrusive::SplitListSet< your_template_parameters > set_class; + set_class theSet; // ... + typename set_class::raw_ptr rp; { // Lock RCU hash_set::rcu_lock lock; - foo * pVal = theSet.get( 5 ); - if ( pVal ) { - // Deal with pVal + rp = theSet.get( 5 ); + if ( rp ) { + // Deal with rp //... } // Unlock RCU by rcu_lock destructor - // pVal can be retired by disposer at any time after RCU has been unlocked + // rp can be retired by disposer at any time after RCU has been unlocked } \endcode */ template - value_type * get( Q const& key ) + raw_ptr get( Q const& key ) { return get_( key, key_comparator() ); } @@ -879,7 +729,7 @@ namespace cds { namespace intrusive { \p pred must imply the same element order as the comparator used for building the set. */ template - value_type * get_with( Q const& key, Less pred ) + raw_ptr get_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); return get_( key, typename wrapped_ordered_list::template make_compare_from_less()); @@ -943,20 +793,21 @@ namespace cds { namespace intrusive { {} }; //@endcond + public: + ///@name Forward iterators (thread-safe under RCU lock) + //@{ /// Forward iterator /** The forward iterator for a split-list has some features: - it has no post-increment operator - it depends on iterator of underlying \p OrderedList - - The iterator cannot be moved across thread boundary since it may contain GC's guard that is thread-private GC data. - - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent - deleting operations it is no guarantee that you iterate all item in the split-list. - Therefore, the use of iterators in concurrent environment is not good idea. Use the iterator on the concurrent container - for debug purpose only. + You may safely use iterators in multi-threaded environment only under RCU lock. + Otherwise, a crash is possible if another thread deletes the element the iterator points to. */ typedef iterator_type iterator; + /// Const forward iterator /** For iterator's features and requirements see \ref iterator @@ -1005,7 +856,245 @@ namespace cds { namespace intrusive { { return const_iterator( m_List.cend(), m_List.cend() ); } + //@} + + protected: + //@cond + aux_node_type * alloc_aux_node( size_t nHash ) + { + m_Stat.onHeadNodeAllocated(); + aux_node_type* p = m_Buckets.alloc_aux_node(); + if ( p ) + p->m_nHash = nHash; + return p; + } + + void free_aux_node( aux_node_type * p ) + { + m_Buckets.free_aux_node( p ); + m_Stat.onHeadNodeFreed(); + } + + /// Calculates hash value of \p key + template + size_t hash_value( Q const& key ) const + { + return m_HashFunctor( key ); + } + + size_t bucket_no( size_t nHash ) const + { + return nHash & ( (1 << m_nBucketCountLog2.load(memory_model::memory_order_relaxed)) - 1 ); + } + + static size_t parent_bucket( size_t nBucket ) + { + assert( nBucket > 0 ); + return nBucket & ~( 1 << bitop::MSBnz( nBucket ) ); + } + + aux_node_type * init_bucket( size_t nBucket ) + { + assert( nBucket > 0 ); + size_t nParent = parent_bucket( nBucket ); + + aux_node_type * pParentBucket = m_Buckets.bucket( nParent ); + if ( pParentBucket == nullptr ) { + pParentBucket = init_bucket( nParent ); + m_Stat.onRecursiveInitBucket(); + } + + assert( pParentBucket != nullptr ); + + // Allocate a dummy node for new bucket + { + aux_node_type * pBucket = alloc_aux_node( split_list::dummy_hash( nBucket ) ); + if ( m_List.insert_aux_node( pParentBucket, pBucket ) ) { + m_Buckets.bucket( nBucket, pBucket ); + m_Stat.onNewBucket(); + return pBucket; + } + free_aux_node( pBucket ); + } + + // Another thread set the bucket. Wait while it done + + // In this point, we must wait while nBucket is empty. + // The compiler can decide that waiting loop can be "optimized" (stripped) + // To prevent this situation, we use waiting on volatile bucket_head_ptr pointer. + // + m_Stat.onBucketInitContenton(); + back_off bkoff; + while ( true ) { + aux_node_type volatile * p = m_Buckets.bucket( nBucket ); + if ( p != nullptr ) + return const_cast( p ); + bkoff(); + m_Stat.onBusyWaitBucketInit(); + } + } + + aux_node_type * get_bucket( size_t nHash ) + { + size_t nBucket = bucket_no( nHash ); + + aux_node_type * pHead = m_Buckets.bucket( nBucket ); + if ( pHead == nullptr ) + pHead = init_bucket( nBucket ); + + assert( pHead->is_dummy() ); + + return pHead; + } + + void init() + { + // Initialize bucket 0 + aux_node_type * pNode = alloc_aux_node( 0 /*split_list::dummy_hash(0)*/ ); + + // insert_aux_node cannot return false for empty list + CDS_VERIFY( m_List.insert_aux_node( pNode )); + + m_Buckets.bucket( 0, pNode ); + } + + static size_t max_item_count( size_t nBucketCount, size_t nLoadFactor ) + { + return nBucketCount * nLoadFactor; + } + + void inc_item_count() + { + size_t nMaxCount = m_nMaxItemCount.load(memory_model::memory_order_relaxed); + if ( ++m_ItemCounter <= nMaxCount ) + return; + + size_t sz = m_nBucketCountLog2.load(memory_model::memory_order_relaxed); + const size_t nBucketCount = static_cast(1) << sz; + if ( nBucketCount < m_Buckets.capacity() ) { + // we may grow the bucket table + const size_t nLoadFactor = m_Buckets.load_factor(); + if ( nMaxCount < max_item_count( nBucketCount, nLoadFactor )) + return; // someone already have updated m_nBucketCountLog2, so stop here + + m_nMaxItemCount.compare_exchange_strong( nMaxCount, max_item_count( nBucketCount << 1, nLoadFactor ), + memory_model::memory_order_relaxed, atomics::memory_order_relaxed ); + m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed ); + } + else + m_nMaxItemCount.store( std::numeric_limits::max(), memory_model::memory_order_relaxed ); + } + template + bool find_( Q& val, Compare cmp, Func f ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + return m_Stat.onFind( m_List.find_at( pHead, sv, cmp, + [&f](value_type& item, split_list::details::search_value_type& val){ f(item, val.val ); })); + } + + template + bool find_value( Q const& val, Compare cmp ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + return m_Stat.onFind( m_List.find_at( pHead, sv, cmp )); + } + + template + raw_ptr get_( Q const& val, Compare cmp ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + raw_ptr p = m_List.get_at( pHead, sv, cmp ); + m_Stat.onFind( !!p ); + return p; + } + + template + value_type * extract_( Q const& val, Compare cmp ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + value_type * pNode = m_List.extract_at( pHead, sv, cmp ); + if ( pNode ) { + --m_ItemCounter; + m_Stat.onExtractSuccess(); + } + else + m_Stat.onExtractFailed(); + return pNode; + } + + template + value_type * extract_with_( Q const& val, Less pred ) + { + CDS_UNUSED( pred ); + return extract_( val, typename wrapped_ordered_list::template make_compare_from_less()); + } + + template + bool erase_( const Q& val, Compare cmp ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + if ( m_List.erase_at( pHead, sv, cmp ) ) { + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return true; + } + m_Stat.onEraseFailed(); + return false; + } + + template + bool erase_( Q const& val, Compare cmp, Func f ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + if ( m_List.erase_at( pHead, sv, cmp, f )) { + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return true; + } + m_Stat.onEraseFailed(); + return false; + } + //@endcond + + protected: + //@cond + typedef typename cds::details::type_padding< bucket_table, traits::padding >::type padded_bucket_table; + padded_bucket_table m_Buckets; ///< bucket table + + typedef typename cds::details::type_padding< ordered_list_wrapper, traits::padding>::type padded_ordered_list; + padded_ordered_list m_List; ///< Ordered list containing split-list items + + atomics::atomic m_nBucketCountLog2; ///< log2( current bucket count ) + atomics::atomic m_nMaxItemCount; ///< number of items container can hold, before we have to resize + item_counter m_ItemCounter; ///< Item counter + hash m_HashFunctor; ///< Hash functor + stat m_Stat; ///< Internal statistics accumulator + //@endcond }; }} // namespace cds::intrusive