From 8277dbfa3fae52996a2c50499cde16a10f9c188c Mon Sep 17 00:00:00 2001 From: khizmax Date: Tue, 20 Dec 2016 21:22:18 +0300 Subject: [PATCH] SkipList: fixed erase() and find_fastpath() bugs --- cds/intrusive/details/skip_list_base.h | 21 +- cds/intrusive/impl/skip_list.h | 117 +- cds/intrusive/skip_list_rcu.h | 2118 ++++++++++----------- test/include/cds_test/stat_skiplist_out.h | 6 +- 4 files changed, 1152 insertions(+), 1110 deletions(-) diff --git a/cds/intrusive/details/skip_list_base.h b/cds/intrusive/details/skip_list_base.h index 42772bbb..dbc3a272 100644 --- a/cds/intrusive/details/skip_list_base.h +++ b/cds/intrusive/details/skip_list_base.h @@ -70,6 +70,7 @@ namespace cds { namespace intrusive { atomic_marked_ptr m_pNext; ///< Next item in bottom-list (list at level 0) unsigned int m_nHeight; ///< Node height (size of \p m_arrNext array). For node at level 0 the height is 1. atomic_marked_ptr * m_arrNext; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr + atomics::atomic m_nUnlink; ///< How many levels has been unlinked //@endcond public: @@ -77,6 +78,7 @@ namespace cds { namespace intrusive { : m_pNext( nullptr ) , m_nHeight( 1 ) , m_arrNext( nullptr ) + , m_nUnlink( 0 ) {} @@ -163,6 +165,11 @@ namespace cds { namespace intrusive { && m_arrNext == nullptr && m_nHeight <= 1; } + + bool level_unlinked( unsigned nCount = 1 ) + { + return m_nUnlink.fetch_add( nCount, std::memory_order_relaxed ) + 1 == height(); + } //@endcond }; @@ -395,10 +402,13 @@ namespace cds { namespace intrusive { event_counter m_nFindSlowSuccess ; ///< Count of successful call of \p find and all derivatives (via slow-path) event_counter m_nFindSlowFailed ; ///< Count of failed call of \p find and all derivatives (via slow-path) event_counter m_nRenewInsertPosition ; ///< Count of renewing position events while inserting - event_counter m_nLogicDeleteWhileInsert ; ///< Count of events "The node has been logically deleted while inserting" + event_counter m_nLogicDeleteWhileInsert; ///< Count of events "The node has been logically deleted while inserting" + event_counter m_nEraseWhileInsert ; ///< Count of events "The node has been disposed while inserting" event_counter m_nNotFoundWhileInsert ; ///< Count of events "Inserting node is not found" event_counter m_nFastErase ; ///< Fast erase event counter + event_counter m_nFastEraseHelped ; ///< Fast erase with helping of other thread event_counter m_nFastExtract ; ///< Fast extract event counter + event_counter m_nFastExtractHelped ; ///< Fast extract with helping of other thread event_counter m_nSlowErase ; ///< Slow erase event counter event_counter m_nSlowExtract ; ///< Slow extract event counter event_counter m_nExtractSuccess ; ///< Count of successful call of \p extract @@ -412,6 +422,7 @@ namespace cds { namespace intrusive { event_counter m_nExtractMaxRetries ; ///< Count of retries of \p extract_max call event_counter m_nEraseWhileFind ; ///< Count of erased item while searching event_counter m_nExtractWhileFind ; ///< Count of extracted item while searching (RCU only) + event_counter m_nMarkFailed ; ///< Count of failed node marking (logical deletion mark) //@cond void onAddNode( unsigned int nHeight ) @@ -443,9 +454,12 @@ namespace cds { namespace intrusive { void onExtractWhileFind() { ++m_nExtractWhileFind ; } void onRenewInsertPosition() { ++m_nRenewInsertPosition; } void onLogicDeleteWhileInsert() { ++m_nLogicDeleteWhileInsert; } + void onEraseWhileInsert() { ++m_nEraseWhileInsert; } void onNotFoundWhileInsert() { ++m_nNotFoundWhileInsert; } void onFastErase() { ++m_nFastErase; } + void onFastEraseHelped() { ++m_nFastEraseHelped; } void onFastExtract() { ++m_nFastExtract; } + void onFastExtractHelped() { ++m_nFastExtractHelped; } void onSlowErase() { ++m_nSlowErase; } void onSlowExtract() { ++m_nSlowExtract; } void onExtractSuccess() { ++m_nExtractSuccess; } @@ -457,6 +471,7 @@ namespace cds { namespace intrusive { void onExtractMaxSuccess() { ++m_nExtractMaxSuccess; } void onExtractMaxFailed() { ++m_nExtractMaxFailed; } void onExtractMaxRetry() { ++m_nExtractMaxRetries; } + void onMarkFailed() { ++m_nMarkFailed; } //@endcond }; @@ -483,9 +498,12 @@ namespace cds { namespace intrusive { void onExtractWhileFind() const {} void onRenewInsertPosition() const {} void onLogicDeleteWhileInsert() const {} + void onEraseWhileInsert() const {} void onNotFoundWhileInsert() const {} void onFastErase() const {} + void onFastEraseHelped() const {} void onFastExtract() const {} + void onFastExtractHelped() const {} void onSlowErase() const {} void onSlowExtract() const {} void onExtractSuccess() const {} @@ -497,6 +515,7 @@ namespace cds { namespace intrusive { void onExtractMaxSuccess() const {} void onExtractMaxFailed() const {} void onExtractMaxRetry() const {} + void onMarkFailed() const {} //@endcond }; diff --git a/cds/intrusive/impl/skip_list.h b/cds/intrusive/impl/skip_list.h index 80c43ee5..fb29f20a 100644 --- a/cds/intrusive/impl/skip_list.h +++ b/cds/intrusive/impl/skip_list.h @@ -1153,7 +1153,7 @@ namespace cds { namespace intrusive { if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()), memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) { - if ( nLevel == 0 ) { + if ( pCur->level_unlinked() ) { gc::retire( node_traits::to_value_ptr( pCur.ptr() ), dispose_node ); m_Stat.onEraseWhileFind(); } @@ -1197,7 +1197,7 @@ namespace cds { namespace intrusive { if ( pSucc.bits() ) { // pCur is marked, i.e. logically deleted - // try to help deleting pCur if pSucc is not being deleted + // try to help deleting pCur help_remove( nLevel, pPred, pCur, pSucc ); goto retry; } @@ -1258,7 +1258,7 @@ namespace cds { namespace intrusive { if ( pSucc.bits() ) { // pCur is marked, i.e. logically deleted. - // try to help deleting pCur if pSucc is not being deleted + // try to help deleting pCur help_remove( nLevel, pPred, pCur, pSucc ); goto retry; } @@ -1307,7 +1307,7 @@ namespace cds { namespace intrusive { if ( pSucc.bits() ) { // pCur is marked, i.e. logically deleted. - // try to help deleting pCur if pSucc is not being deleted + // try to help deleting pCur help_remove( nLevel, pPred, pCur, pSucc ); goto retry; } @@ -1331,17 +1331,15 @@ namespace cds { namespace intrusive { template bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f ) { - unsigned int nHeight = pNode->height(); + unsigned int const nHeight = pNode->height(); for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) pNode->next( nLevel ).store( marked_node_ptr(), memory_model::memory_order_relaxed ); // Insert at level 0 { - node_type* succ = pos.pSucc[0]; - - marked_node_ptr p( succ ); - pNode->next( 0 ).store( p, memory_model::memory_order_release ); + marked_node_ptr p( pos.pSucc[0] ); + pNode->next( 0 ).store( p, memory_model::memory_order_relaxed ); if ( !pos.pPrev[0]->next( 0 ).compare_exchange_strong( p, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed )) return false; @@ -1352,25 +1350,38 @@ namespace cds { namespace intrusive { for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) { marked_node_ptr p; while ( true ) { - marked_node_ptr q( pos.pSucc[nLevel] ); + marked_node_ptr pSucc( pos.pSucc[nLevel] ); - if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_release, atomics::memory_order_relaxed )) { + // Set pNode->next + // pNode->next must be null but can have a "logical deleted" flag if another thread is removing pNode right now + if ( !pNode->next( nLevel ).compare_exchange_strong( p, pSucc, + memory_model::memory_order_acq_rel, atomics::memory_order_acquire )) + { // pNode has been marked as removed while we are inserting it // Stop inserting - assert( p.bits() ); - m_Stat.onLogicDeleteWhileInsert(); + assert( p.bits() != 0 ); + + if ( pNode->level_unlinked( nHeight - nLevel )) { + gc::retire( node_traits::to_value_ptr( pNode ), dispose_node ); + m_Stat.onEraseWhileInsert(); + } + else + m_Stat.onLogicDeleteWhileInsert(); return true; } + p = pSucc; - p = q; - bool const result = pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( q, marked_node_ptr( pNode ), - memory_model::memory_order_release, atomics::memory_order_relaxed ); - if ( result ) + // Link pNode into the list at nLevel + if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( pSucc, marked_node_ptr( pNode ), + memory_model::memory_order_release, atomics::memory_order_relaxed )) + { + // go to next level break; + } // Renew insert position m_Stat.onRenewInsertPosition(); - if ( !find_position( val, pos, key_comparator(), false ) ) { + if ( !find_position( val, pos, key_comparator(), false )) { // The node has been deleted while we are inserting it m_Stat.onNotFoundWhileInsert(); return true; @@ -1386,38 +1397,55 @@ namespace cds { namespace intrusive { assert( pDel != nullptr ); marked_node_ptr pSucc; + back_off bkoff; // logical deletion (marking) for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) { - while ( true ) { - pSucc = pDel->next( nLevel ); - if ( pSucc.bits() || pDel->next( nLevel ).compare_exchange_weak( pSucc, pSucc | 1, - memory_model::memory_order_release, atomics::memory_order_relaxed ) ) + pSucc = pDel->next( nLevel ).load( memory_model::memory_order_relaxed ); + if ( pSucc.bits() == 0 ) { + bkoff.reset(); + while ( !( pDel->next( nLevel ).compare_exchange_weak( pSucc, pSucc | 1, + memory_model::memory_order_release, atomics::memory_order_acquire ) + || pSucc.bits() != 0 )) { - break; + bkoff(); + m_Stat.onMarkFailed(); } } } + marked_node_ptr p( pDel->next( 0 ).load( memory_model::memory_order_relaxed ).ptr()); while ( true ) { - marked_node_ptr p( pDel->next( 0 ).load( memory_model::memory_order_relaxed ).ptr() ); - if ( pDel->next( 0 ).compare_exchange_strong( p, p | 1, memory_model::memory_order_release, atomics::memory_order_relaxed ) ) + if ( pDel->next( 0 ).compare_exchange_strong( p, p | 1, memory_model::memory_order_release, atomics::memory_order_acquire )) { f( *node_traits::to_value_ptr( pDel ) ); // Physical deletion // try fast erase p = pDel; + unsigned nCount = 0; + for ( int nLevel = static_cast( pDel->height() - 1 ); nLevel >= 0; --nLevel ) { + pSucc = pDel->next( nLevel ).load( memory_model::memory_order_relaxed ); - if ( !pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ), - memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) + if ( !pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()), + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { + // Maybe, another threads already helped us to delete the node?.. + if ( nCount ) { + if ( pDel->level_unlinked( nCount )) { + gc::retire( node_traits::to_value_ptr( pDel ), dispose_node ); + m_Stat.onFastEraseHelped(); + return true; + } + } + // Make slow erase find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false ); m_Stat.onSlowErase(); return true; } + ++nCount; } // Fast erasing success @@ -1425,13 +1453,12 @@ namespace cds { namespace intrusive { m_Stat.onFastErase(); return true; } - else { - if ( p.bits() ) { - // Another thread is deleting pDel right now - return false; - } + else if ( p.bits() ) { + // Another thread is deleting pDel right now + return false; } m_Stat.onEraseRetry(); + bkoff(); } } @@ -1444,12 +1471,17 @@ namespace cds { namespace intrusive { finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f ) { node_type * pPred; - typename gc::template GuardArray<2> guards; marked_node_ptr pCur; marked_node_ptr pNull; + // guard array: + // 0 - pPred on level N + // 1 - pCur on level N + typename gc::template GuardArray<2> guards; back_off bkoff; + unsigned attempt = 0; + try_again: pPred = m_Head.head(); for ( int nLevel = static_cast( m_nHeight.load( memory_model::memory_order_relaxed ) - 1 ); nLevel >= 0; --nLevel ) { pCur = guards.protect( 1, pPred->next( nLevel ), gc_protect ); @@ -1458,22 +1490,17 @@ namespace cds { namespace intrusive { while ( pCur != pNull ) { if ( pCur.bits() ) { - unsigned int nAttempt = 0; - bkoff.reset(); - while ( pCur.bits() && nAttempt++ < 16 ) { + // pPred is being removed + if ( ++attempt < 4 ) { bkoff(); - pCur = guards.protect( 1, pPred->next( nLevel ), gc_protect ); + goto try_again; } - if ( pCur.bits() ) { - // Maybe, we are on deleted node sequence - // Abort searching, try slow-path - return find_fastpath_abort; - } + return find_fastpath_abort; } if ( pCur.ptr() ) { - int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val ); + int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val ); if ( nCmp < 0 ) { guards.copy( 0, 1 ); pPred = pCur.ptr(); @@ -1484,8 +1511,10 @@ namespace cds { namespace intrusive { f( *node_traits::to_value_ptr( pCur.ptr() ), val ); return find_fastpath_found; } - else // pCur > val - go down + else { + // pCur > val - go down break; + } } } } diff --git a/cds/intrusive/skip_list_rcu.h b/cds/intrusive/skip_list_rcu.h index c0722289..79d9f5b2 100644 --- a/cds/intrusive/skip_list_rcu.h +++ b/cds/intrusive/skip_list_rcu.h @@ -64,34 +64,21 @@ namespace cds { namespace intrusive { atomic_marked_ptr m_pNext; ///< Next item in bottom-list (list at level 0) public: node * m_pDelChain; ///< Deleted node chain (local for a thread) -# ifdef _DEBUG - bool volatile m_bLinked; - bool volatile m_bUnlinked; -# endif protected: unsigned int m_nHeight; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1. atomic_marked_ptr * m_arrNext; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr + atomics::atomic m_nUnlink; ///< How many levels has been unlinked public: /// Constructs a node of height 1 (a bottom-list node) CDS_CONSTEXPR node() : m_pNext( nullptr ) , m_pDelChain( nullptr ) -# ifdef _DEBUG - , m_bLinked( false ) - , m_bUnlinked( false ) -# endif , m_nHeight(1) , m_arrNext( nullptr ) + , m_nUnlink(0) {} -# ifdef _DEBUG - ~node() - { - assert( !m_bLinked || m_bUnlinked ); - } -# endif - /// Constructs a node of height \p nHeight void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower ) { @@ -189,6 +176,11 @@ namespace cds { namespace intrusive { && m_arrNext == nullptr && m_nHeight <= 1; } + + bool level_unlinked( unsigned nCount = 1 ) + { + return m_nUnlink.fetch_add( nCount, std::memory_order_relaxed ) + 1 == height(); + } }; } // namespace skip_list //@endcond @@ -689,451 +681,284 @@ namespace cds { namespace intrusive { /// Result of \p get(), \p get_with() functions - pointer to the node found typedef cds::urcu::raw_ptr< gc, value_type, raw_ptr_disposer > raw_ptr; - protected: - //@cond - - bool is_extracted( marked_node_ptr const p ) const + public: + /// Default constructor + SkipListSet() + : m_Head( c_nMaxHeight ) + , m_nHeight( c_nMinHeight ) + , m_pDeferredDelChain( nullptr ) { - return (p.bits() & 2) != 0; + static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); + + // Barrier for head node + atomics::atomic_thread_fence( memory_model::memory_order_release ); } - template - bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound ) + /// Clears and destructs the skip-list + ~SkipListSet() { - assert( gc::is_locked()); - - node_type * pPred; - marked_node_ptr pSucc; - marked_node_ptr pCur; - int nCmp = 1; - - retry: - pPred = m_Head.head(); - - for ( int nLevel = static_cast(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) { - - while ( true ) { - pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); - if ( pCur.bits()) { - // pCur.bits() means that pPred is logically deleted - goto retry; - } - - if ( pCur.ptr() == nullptr ) { - // end of the list at level nLevel - goto next level - break; - } - - // pSucc contains deletion mark for pCur - pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); + clear(); + } - if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr()) - goto retry; + public: + ///@name Forward iterators (thread-safe under RCU lock) + //@{ + /// Forward iterator + /** + The forward iterator has some features: + - it has no post-increment operator + - it depends on iterator of underlying \p OrderedList - if ( pSucc.bits()) { - // pCur is marked, i.e. logically deleted. - marked_node_ptr p( pCur.ptr()); -# ifdef _DEBUG - if ( nLevel == 0 ) - pCur->m_bUnlinked = true; -# endif - if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()), - memory_model::memory_order_release, atomics::memory_order_relaxed )) - { - if ( nLevel == 0 ) { - if ( !is_extracted( pSucc )) { - // We cannot free the node at this moment since RCU is locked - // Link deleted nodes to a chain to free later - pos.dispose( pCur.ptr()); - m_Stat.onEraseWhileFind(); - } - else { - m_Stat.onExtractWhileFind(); - } - } - } - goto retry; - } - else { - nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val ); - if ( nCmp < 0 ) - pPred = pCur.ptr(); - else if ( nCmp == 0 && bStopIfFound ) - goto found; - else - break; - } - } + You may safely use iterators in multi-threaded environment only under RCU lock. + Otherwise, a crash is possible if another thread deletes the element the iterator points to. + */ + typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator; - // Next level - pos.pPrev[ nLevel ] = pPred; - pos.pSucc[ nLevel ] = pCur.ptr(); - } + /// Const iterator type + typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator; - if ( nCmp != 0 ) - return false; + /// Returns a forward iterator addressing the first element in a set + iterator begin() + { + return iterator( *m_Head.head()); + } - found: - pos.pCur = pCur.ptr(); - return pCur.ptr() && nCmp == 0; + /// Returns a forward const iterator addressing the first element in a set + const_iterator begin() const + { + return const_iterator( *m_Head.head()); } - bool find_min_position( position& pos ) + /// Returns a forward const iterator addressing the first element in a set + const_iterator cbegin() const { - assert( gc::is_locked()); + return const_iterator( *m_Head.head()); + } - node_type * pPred; - marked_node_ptr pSucc; - marked_node_ptr pCur; + /// Returns a forward iterator that addresses the location succeeding the last element in a set. + iterator end() + { + return iterator(); + } - retry: - pPred = m_Head.head(); + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + const_iterator end() const + { + return const_iterator(); + } - for ( int nLevel = static_cast(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) { + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + const_iterator cend() const + { + return const_iterator(); + } + //@} - pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); - // pCur.bits() means that pPred is logically deleted - // head cannot be deleted - assert( pCur.bits() == 0 ); + public: + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. - if ( pCur.ptr()) { + The function applies RCU lock internally. - // pSucc contains deletion mark for pCur - pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert( val, []( value_type& ) {} ); + } - if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr()) - goto retry; + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. - if ( pSucc.bits()) { - // pCur is marked, i.e. logically deleted. -# ifdef _DEBUG - if ( nLevel == 0 ) - pCur->m_bUnlinked = true; -# endif - marked_node_ptr p( pCur.ptr()); - if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()), - memory_model::memory_order_release, atomics::memory_order_relaxed )) - { - if ( nLevel == 0 ) { - if ( !is_extracted( pSucc )) { - // We cannot free the node at this moment since RCU is locked - // Link deleted nodes to a chain to free later - pos.dispose( pCur.ptr()); - m_Stat.onEraseWhileFind(); - } - else { - m_Stat.onExtractWhileFind(); - } - } - } - goto retry; - } - } + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of \p val. - // Next level - pos.pPrev[ nLevel ] = pPred; - pos.pSucc[ nLevel ] = pCur.ptr(); - } - return (pos.pCur = pCur.ptr()) != nullptr; - } + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success. - bool find_max_position( position& pos ) + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool insert( value_type& val, Func f ) { - assert( gc::is_locked()); + check_deadlock_policy::check(); - node_type * pPred; - marked_node_ptr pSucc; - marked_node_ptr pCur; + position pos; + bool bRet; - retry: - pPred = m_Head.head(); + { + node_type * pNode = node_traits::to_node_ptr( val ); + scoped_node_ptr scp( pNode ); + unsigned int nHeight = pNode->height(); + bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr; + bool bTowerMade = false; - for ( int nLevel = static_cast(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) { + rcu_lock rcuLock; - while ( true ) { - pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); - if ( pCur.bits()) { - // pCur.bits() means that pPred is logically deleted - goto retry; - } + while ( true ) + { + bool bFound = find_position( val, pos, key_comparator(), true ); + if ( bFound ) { + // scoped_node_ptr deletes the node tower if we create it + if ( !bTowerMade ) + scp.release(); - if ( pCur.ptr() == nullptr ) { - // end of the list at level nLevel - goto next level + m_Stat.onInsertFailed(); + bRet = false; break; } - // pSucc contains deletion mark for pCur - pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); - - if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr()) - goto retry; - - if ( pSucc.bits()) { - // pCur is marked, i.e. logically deleted. -# ifdef _DEBUG - if ( nLevel == 0 ) - pCur->m_bUnlinked = true; -# endif - marked_node_ptr p( pCur.ptr()); - if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()), - memory_model::memory_order_release, atomics::memory_order_relaxed )) - { - if ( nLevel == 0 ) { - if ( !is_extracted( pSucc )) { - // We cannot free the node at this moment since RCU is locked - // Link deleted nodes to a chain to free later - pos.dispose( pCur.ptr()); - m_Stat.onEraseWhileFind(); - } - else { - m_Stat.onExtractWhileFind(); - } - } - } - goto retry; + if ( !bTowerOk ) { + build_node( pNode ); + nHeight = pNode->height(); + bTowerMade = + bTowerOk = true; } - else { - if ( !pSucc.ptr()) - break; - pPred = pCur.ptr(); + if ( !insert_at_position( val, pNode, pos, f )) { + m_Stat.onInsertRetry(); + continue; } - } - // Next level - pos.pPrev[ nLevel ] = pPred; - pos.pSucc[ nLevel ] = pCur.ptr(); + increase_height( nHeight ); + ++m_ItemCounter; + m_Stat.onAddNode( nHeight ); + m_Stat.onInsertSuccess(); + scp.release(); + bRet = true; + break; + } } - return (pos.pCur = pCur.ptr()) != nullptr; + return bRet; } - template - bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f ) - { - assert( gc::is_locked()); - - unsigned int nHeight = pNode->height(); - pNode->clear_tower(); + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. - { - marked_node_ptr p( pos.pSucc[0] ); - pNode->next( 0 ).store( p, memory_model::memory_order_release ); - if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed )) { - return false; - } -# ifdef _DEBUG - pNode->m_bLinked = true; -# endif - f( val ); - } + If the item \p val is not found in the set, then \p val is inserted into the set + iff \p bInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p %update() function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refer to the same thing. - for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) { - marked_node_ptr p; - while ( true ) { - marked_node_ptr q( pos.pSucc[ nLevel ]); - if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { - // pNode has been marked as removed while we are inserting it - // Stop inserting - assert( p.bits()); - m_Stat.onLogicDeleteWhileInsert(); - return true; - } - p = q; - if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed )) - break; + The functor can change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. - // Renew insert position - m_Stat.onRenewInsertPosition(); - if ( !find_position( val, pos, key_comparator(), false )) { - // The node has been deleted while we are inserting it - m_Stat.onNotFoundWhileInsert(); - return true; - } - } - } - return true; - } + RCU \p synchronize method can be called. RCU should not be locked. + + Returns std::pair where \p first is \p true if operation is successful, + i.e. the node has been inserted or updated, + \p second is \p true if new item has been added or \p false if the item with \p key + already exists. + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ template - bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract ) + std::pair update( value_type& val, Func func, bool bInsert = true ) { - assert( pDel != nullptr ); - assert( gc::is_locked()); - - marked_node_ptr pSucc; + check_deadlock_policy::check(); - // logical deletion (marking) - for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) { - pSucc = pDel->next(nLevel).load( memory_model::memory_order_relaxed ); - while ( true ) { - if ( pSucc.bits() - || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) - { - break; - } - } - } + position pos; + std::pair bRet( true, false ); - pSucc = pDel->next(0).load( memory_model::memory_order_relaxed ); - while ( true ) { - if ( pSucc.bits()) - return false; + { + node_type * pNode = node_traits::to_node_ptr( val ); + scoped_node_ptr scp( pNode ); + unsigned int nHeight = pNode->height(); + bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr; + bool bTowerMade = false; - int const nMask = bExtract ? 3 : 1; - if ( pDel->next(0).compare_exchange_strong( pSucc, pSucc | nMask, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) + rcu_lock rcuLock; + while ( true ) { - f( *node_traits::to_value_ptr( pDel )); + bool bFound = find_position( val, pos, key_comparator(), true ); + if ( bFound ) { + // scoped_node_ptr deletes the node tower if we create it before + if ( !bTowerMade ) + scp.release(); - // physical deletion - // try fast erase - pSucc = pDel; - for ( int nLevel = static_cast( pDel->height() - 1 ); nLevel >= 0; --nLevel ) { - if ( !pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( pSucc, - marked_node_ptr( pDel->next(nLevel).load(memory_model::memory_order_relaxed).ptr()), - memory_model::memory_order_release, atomics::memory_order_relaxed)) - { - // Do slow erase - find_position( *node_traits::to_value_ptr(pDel), pos, key_comparator(), false ); - if ( bExtract ) - m_Stat.onSlowExtract(); - else - m_Stat.onSlowErase(); -# ifdef _DEBUG - assert( pDel->m_bUnlinked ); -# endif - return true; - } + func( false, *node_traits::to_value_ptr(pos.pCur), val ); + m_Stat.onUpdateExist(); + break; } -# ifdef _DEBUG - pDel->m_bUnlinked = true; -# endif - if ( !bExtract ) { - // We cannot free the node at this moment since RCU is locked - // Link deleted nodes to a chain to free later - pos.dispose( pDel ); - m_Stat.onFastErase(); + if ( !bInsert ) { + scp.release(); + bRet.first = false; + break; } - else - m_Stat.onFastExtract(); - return true; - } - m_Stat.onEraseRetry(); - } - } - - enum finsd_fastpath_result { - find_fastpath_found, - find_fastpath_not_found, - find_fastpath_abort - }; - template - finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f ) const - { - node_type * pPred; - marked_node_ptr pCur; - marked_node_ptr pSucc; - marked_node_ptr pNull; - - back_off bkoff; - - pPred = m_Head.head(); - for ( int nLevel = static_cast(m_nHeight.load(memory_model::memory_order_relaxed) - 1); nLevel >= 0; --nLevel ) { - pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire ); - if ( pCur == pNull ) - continue; - - while ( pCur != pNull ) { - if ( pCur.bits()) { - // Wait until pCur is removed - unsigned int nAttempt = 0; - while ( pCur.bits() && nAttempt++ < 16 ) { - bkoff(); - pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire ); - } - bkoff.reset(); - if ( pCur.bits()) { - // Maybe, we are on deleted node sequence - // Abort searching, try slow-path - return find_fastpath_abort; - } + if ( !bTowerOk ) { + build_node( pNode ); + nHeight = pNode->height(); + bTowerMade = + bTowerOk = true; } - if ( pCur.ptr()) { - int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val ); - if ( nCmp < 0 ) { - pPred = pCur.ptr(); - pCur = pCur->next(nLevel).load( memory_model::memory_order_acquire ); - } - else if ( nCmp == 0 ) { - // found - f( *node_traits::to_value_ptr( pCur.ptr()), val ); - return find_fastpath_found; - } - else // pCur > val - go down - break; + if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { func( true, item, item ); })) { + m_Stat.onInsertRetry(); + continue; } + + increase_height( nHeight ); + ++m_ItemCounter; + scp.release(); + m_Stat.onAddNode( nHeight ); + m_Stat.onUpdateNew(); + bRet.second = true; + break; } } - return find_fastpath_not_found; - } - - template - bool find_slowpath( Q& val, Compare cmp, Func f, position& pos ) - { - if ( find_position( val, pos, cmp, true )) { - assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 ); - - f( *node_traits::to_value_ptr( pos.pCur ), val ); - return true; - } - else - return false; + return bRet; } - - template - bool do_find_with( Q& val, Compare cmp, Func f ) + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( value_type& val, Func func ) { - position pos; - return do_find_with( val, cmp, f, pos ); + return update( val, func, true ); } + //@endcond - template - bool do_find_with( Q& val, Compare cmp, Func f, position& pos ) - { - bool bRet; + /// Unlinks the item \p val from the set + /** + The function searches the item \p val in the set and unlink it from the set + if it is found and is equal to \p val. - { - rcu_lock l; + Difference between \p erase() and \p %unlink() functions: \p erase() finds a key + and deletes the item found. \p %unlink() searches an item by key and deletes it + only if \p val is an item of that set, i.e. the pointer to item found + is equal to &val . - switch ( find_fastpath( val, cmp, f )) { - case find_fastpath_found: - m_Stat.onFindFastSuccess(); - return true; - case find_fastpath_not_found: - m_Stat.onFindFastFailed(); - return false; - default: - break; - } + RCU \p synchronize method can be called. RCU should not be locked. - if ( find_slowpath( val, cmp, f, pos )) { - m_Stat.onFindSlowSuccess(); - bRet = true; - } - else { - m_Stat.onFindSlowFailed(); - bRet = false; - } - } - return bRet; - } + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously. - template - bool do_erase( Q const& val, Compare cmp, Func f ) + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) { check_deadlock_policy::check(); @@ -1141,25 +966,26 @@ namespace cds { namespace intrusive { bool bRet; { - rcu_lock rcuLock; + rcu_lock l; - if ( !find_position( val, pos, cmp, false )) { - m_Stat.onEraseFailed(); + if ( !find_position( val, pos, key_comparator(), false )) { + m_Stat.onUnlinkFailed(); bRet = false; } else { node_type * pDel = pos.pCur; - assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 ); + assert( key_comparator()( *node_traits::to_value_ptr( pDel ), val ) == 0 ); unsigned int nHeight = pDel->height(); - if ( try_remove_at( pDel, pos, f, false )) { + + if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos, [](value_type const&) {}, false )) { --m_ItemCounter; m_Stat.onRemoveNode( nHeight ); - m_Stat.onEraseSuccess(); + m_Stat.onUnlinkSuccess(); bRet = true; } else { - m_Stat.onEraseFailed(); + m_Stat.onUnlinkFailed(); bRet = false; } } @@ -1168,851 +994,1015 @@ namespace cds { namespace intrusive { return bRet; } - template - value_type * do_extract_key( Q const& key, Compare cmp, position& pos ) - { - // RCU should be locked!!! - assert( gc::is_locked()); - - node_type * pDel; - - if ( !find_position( key, pos, cmp, false )) { - m_Stat.onExtractFailed(); - pDel = nullptr; - } - else { - pDel = pos.pCur; - assert( cmp( *node_traits::to_value_ptr( pDel ), key ) == 0 ); + /// Extracts the item from the set with specified \p key + /** \anchor cds_intrusive_SkipListSet_rcu_extract + The function searches an item with key equal to \p key in the set, + unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. + If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr. - unsigned int const nHeight = pDel->height(); + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. - if ( try_remove_at( pDel, pos, [](value_type const&) {}, true )) { - --m_ItemCounter; - m_Stat.onRemoveNode( nHeight ); - m_Stat.onExtractSuccess(); - } - else { - m_Stat.onExtractFailed(); - pDel = nullptr; - } - } + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not call the disposer for the item found. + The disposer will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is called. + Example: + \code + typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; + skip_list theList; + // ... - return pDel ? node_traits::to_value_ptr( pDel ) : nullptr; - } + typename skip_list::exempt_ptr ep( theList.extract( 5 )); + if ( ep ) { + // Deal with ep + //... + // Dispose returned item. + ep.release(); + } + \endcode + */ template - value_type * do_extract( Q const& key ) + exempt_ptr extract( Q const& key ) { - check_deadlock_policy::check(); - value_type * pDel = nullptr; - position pos; - { - rcu_lock l; - pDel = do_extract_key( key, key_comparator(), pos ); - } - - return pDel; + return exempt_ptr( do_extract( key )); } + /// Extracts the item from the set with comparing functor \p pred + /** + The function is an analog of \p extract(Q const&) but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ template - value_type * do_extract_with( Q const& key, Less pred ) + exempt_ptr extract_with( Q const& key, Less pred ) { - CDS_UNUSED(pred); - check_deadlock_policy::check(); - value_type * pDel = nullptr; - position pos; - { - rcu_lock l; - pDel = do_extract_key( key, cds::opt::details::make_comparator_from_less(), pos ); - } - - return pDel; + return exempt_ptr( do_extract_with( key, pred )); } - value_type * do_extract_min() - { - assert( !gc::is_locked()); - - position pos; - node_type * pDel; + /// Extracts an item with minimal key from the list + /** + The function searches an item with minimal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item. + If the skip-list is empty the function returns an empty \p exempt_ptr. - { - rcu_lock l; + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not call the disposer for the item found. + The disposer will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is manually called. + Example: + \code + typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; + skip_list theList; + // ... - if ( !find_min_position( pos )) { - m_Stat.onExtractMinFailed(); - pDel = nullptr; - } - else { - pDel = pos.pCur; - unsigned int const nHeight = pDel->height(); + typename skip_list::exempt_ptr ep(theList.extract_min()); + if ( ep ) { + // Deal with ep + //... - if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true )) { - --m_ItemCounter; - m_Stat.onRemoveNode( nHeight ); - m_Stat.onExtractMinSuccess(); - } - else { - m_Stat.onExtractMinFailed(); - pDel = nullptr; - } - } + // Dispose returned item. + ep.release(); } + \endcode - return pDel ? node_traits::to_value_ptr( pDel ) : nullptr; - } - - value_type * do_extract_max() + @note Due the concurrent nature of the list, the function extracts nearly minimum key. + It means that the function gets leftmost item and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. + So, the function returns the item with minimum key at the moment of list traversing. + */ + exempt_ptr extract_min() { - assert( !gc::is_locked()); - - position pos; - node_type * pDel; + return exempt_ptr( do_extract_min()); + } - { - rcu_lock l; + /// Extracts an item with maximal key from the list + /** + The function searches an item with maximal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item. + If the skip-list is empty the function returns an empty \p exempt_ptr. - if ( !find_max_position( pos )) { - m_Stat.onExtractMaxFailed(); - pDel = nullptr; - } - else { - pDel = pos.pCur; - unsigned int const nHeight = pDel->height(); + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not call the disposer for the item found. + The disposer will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is manually called. + Example: + \code + typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; + skip_list theList; + // ... - if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true )) { - --m_ItemCounter; - m_Stat.onRemoveNode( nHeight ); - m_Stat.onExtractMaxSuccess(); - } - else { - m_Stat.onExtractMaxFailed(); - pDel = nullptr; - } - } + typename skip_list::exempt_ptr ep( theList.extract_max()); + if ( ep ) { + // Deal with ep + //... + // Dispose returned item. + ep.release(); } + \endcode - return pDel ? node_traits::to_value_ptr( pDel ) : nullptr; - } - - void increase_height( unsigned int nHeight ) + @note Due the concurrent nature of the list, the function extracts nearly maximal key. + It means that the function gets rightmost item and tries to unlink it. + During unlinking, a concurrent thread can insert an item with key greater than rightmost item's key. + So, the function returns the item with maximum key at the moment of list traversing. + */ + exempt_ptr extract_max() { - unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed ); - if ( nCur < nHeight ) - m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed ); + return exempt_ptr( do_extract_max()); } - //@endcond - public: - /// Default constructor - SkipListSet() - : m_Head( c_nMaxHeight ) - , m_nHeight( c_nMinHeight ) - , m_pDeferredDelChain( nullptr ) - { - static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); + /// Deletes the item from the set + /** \anchor cds_intrusive_SkipListSet_rcu_erase + The function searches an item with key equal to \p key in the set, + unlinks it from the set, and returns \p true. + If the item with key equal to \p key is not found the function return \p false. - // Barrier for head node - atomics::atomic_thread_fence( memory_model::memory_order_release ); - } + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. - /// Clears and destructs the skip-list - ~SkipListSet() + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool erase( const Q& key ) { - clear(); + return do_erase( key, key_comparator(), [](value_type const&) {} ); } - public: - ///@name Forward iterators (thread-safe under RCU lock) - //@{ - /// Forward iterator + /// Delete the item from the set with comparing functor \p pred /** - The forward iterator has some features: - - it has no post-increment operator - - it depends on iterator of underlying \p OrderedList - - You may safely use iterators in multi-threaded environment only under RCU lock. - Otherwise, a crash is possible if another thread deletes the element the iterator points to. + The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase "erase(Q const&)" + but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. */ - typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator; - - /// Const iterator type - typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator; - - /// Returns a forward iterator addressing the first element in a set - iterator begin() + template + bool erase_with( const Q& key, Less pred ) { - return iterator( *m_Head.head()); + CDS_UNUSED( pred ); + return do_erase( key, cds::opt::details::make_comparator_from_less(), [](value_type const&) {} ); } - /// Returns a forward const iterator addressing the first element in a set - const_iterator begin() const - { - return const_iterator( *m_Head.head()); - } + /// Deletes the item from the set + /** \anchor cds_intrusive_SkipListSet_rcu_erase_func + The function searches an item with key equal to \p key in the set, + call \p f functor with item found, unlinks it from the set, and returns \p true. + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously. - /// Returns a forward const iterator addressing the first element in a set - const_iterator cbegin() const - { - return const_iterator( *m_Head.head()); - } + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + If the item with key equal to \p key is not found the function return \p false. - /// Returns a forward iterator that addresses the location succeeding the last element in a set. - iterator end() - { - return iterator(); - } + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. - /// Returns a forward const iterator that addresses the location succeeding the last element in a set. - const_iterator end() const + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool erase( Q const& key, Func f ) { - return const_iterator(); + return do_erase( key, key_comparator(), f ); } - /// Returns a forward const iterator that addresses the location succeeding the last element in a set. - const_iterator cend() const - { - return const_iterator(); + /// Delete the item from the set with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase_func "erase(Q const&, Func)" + but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return do_erase( key, cds::opt::details::make_comparator_from_less(), f ); } - //@} - public: - /// Inserts new node - /** - The function inserts \p val in the set if it does not contain - an item with key equal to \p val. + /// Finds \p key + /** @anchor cds_intrusive_SkipListSet_rcu_find_func + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor can change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. The function applies RCU lock internally. - Returns \p true if \p val is placed into the set, \p false otherwise. + The function returns \p true if \p key is found, \p false otherwise. */ - bool insert( value_type& val ) + template + bool find( Q& key, Func f ) { - return insert( val, []( value_type& ) {} ); + return do_find_with( key, key_comparator(), f ); + } + //@cond + template + bool find( Q const& key, Func f ) + { + return do_find_with( key, key_comparator(), f ); } + //@endcond - /// Inserts new node + /// Finds the key \p key with comparing functor \p pred /** - This function is intended for derived non-intrusive containers. - - The function allows to split creating of new item into two part: - - create item with key only - - insert new item into the set - - if inserting is success, calls \p f functor to initialize value-field of \p val. - - The functor signature is: - \code - void func( value_type& val ); - \endcode - where \p val is the item inserted. User-defined functor \p f should guarantee that during changing - \p val no any other changes could be made on this set's item by concurrent threads. - The user-defined functor is called only if the inserting is success. - - RCU \p synchronize method can be called. RCU should not be locked. + The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_func "find(Q&, Func)" + but \p cmp is used for key comparison. + \p Less functor has the interface like \p std::less. + \p cmp must imply the same element order as the comparator used for building the set. */ - template - bool insert( value_type& val, Func f ) + template + bool find_with( Q& key, Less pred, Func f ) { - check_deadlock_policy::check(); - - position pos; - bool bRet; + CDS_UNUSED( pred ); + return do_find_with( key, cds::opt::details::make_comparator_from_less(), f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return do_find_with( key, cds::opt::details::make_comparator_from_less(), f ); + } + //@endcond - { - node_type * pNode = node_traits::to_node_ptr( val ); - scoped_node_ptr scp( pNode ); - unsigned int nHeight = pNode->height(); - bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr; - bool bTowerMade = false; + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. - rcu_lock rcuLock; + The function applies RCU lock internally. + */ + template + bool contains( Q const& key ) + { + return do_find_with( key, key_comparator(), [](value_type& , Q const& ) {} ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) + { + return contains( key ); + } + //@endcond - while ( true ) - { - bool bFound = find_position( val, pos, key_comparator(), true ); - if ( bFound ) { - // scoped_node_ptr deletes the node tower if we create it - if ( !bTowerMade ) - scp.release(); + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return do_find_with( key, cds::opt::details::make_comparator_from_less(), [](value_type& , Q const& ) {} ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond - m_Stat.onInsertFailed(); - bRet = false; - break; - } + /// Finds \p key and return the item found + /** \anchor cds_intrusive_SkipListSet_rcu_get + The function searches the item with key equal to \p key and returns a \p raw_ptr object pointed to item found. + If \p key is not found it returns empty \p raw_ptr. - if ( !bTowerOk ) { - build_node( pNode ); - nHeight = pNode->height(); - bTowerMade = - bTowerOk = true; - } + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. - if ( !insert_at_position( val, pNode, pos, f )) { - m_Stat.onInsertRetry(); - continue; - } + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; + skip_list theList; + // ... + typename skip_list::raw_ptr pVal; + { + // Lock RCU + skip_list::rcu_lock lock; - increase_height( nHeight ); - ++m_ItemCounter; - m_Stat.onAddNode( nHeight ); - m_Stat.onInsertSuccess(); - scp.release(); - bRet = true; - break; + pVal = theList.get( 5 ); + if ( pVal ) { + // Deal with pVal + //... } } + // You can manually release pVal after RCU-locked section + pVal.release(); + \endcode + */ + template + raw_ptr get( Q const& key ) + { + assert( gc::is_locked()); - return bRet; + position pos; + value_type * pFound; + if ( do_find_with( key, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos )) + return raw_ptr( pFound, raw_ptr_disposer( pos )); + return raw_ptr( raw_ptr_disposer( pos )); } - /// Updates the node + /// Finds \p key and return the item found /** - The operation performs inserting or changing data with lock-free manner. - - If the item \p val is not found in the set, then \p val is inserted into the set - iff \p bInsert is \p true. - Otherwise, the functor \p func is called with item found. - The functor signature is: - \code - void func( bool bNew, value_type& item, value_type& val ); - \endcode - with arguments: - - \p bNew - \p true if the item has been inserted, \p false otherwise - - \p item - item of the set - - \p val - argument \p val passed into the \p %update() function - If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments - refer to the same thing. - - The functor can change non-key fields of the \p item; however, \p func must guarantee - that during changing no any other modifications could be made on this item by concurrent threads. - - RCU \p synchronize method can be called. RCU should not be locked. - - Returns std::pair where \p first is \p true if operation is successful, - i.e. the node has been inserted or updated, - \p second is \p true if new item has been added or \p false if the item with \p key - already exists. + The function is an analog of \ref cds_intrusive_SkipListSet_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. - @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. */ - template - std::pair update( value_type& val, Func func, bool bInsert = true ) + template + raw_ptr get_with( Q const& key, Less pred ) { - check_deadlock_policy::check(); + CDS_UNUSED( pred ); + assert( gc::is_locked()); + value_type * pFound = nullptr; position pos; - std::pair bRet( true, false ); - + if ( do_find_with( key, cds::opt::details::make_comparator_from_less(), + [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos )) { - node_type * pNode = node_traits::to_node_ptr( val ); - scoped_node_ptr scp( pNode ); - unsigned int nHeight = pNode->height(); - bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr; - bool bTowerMade = false; + return raw_ptr( pFound, raw_ptr_disposer( pos )); + } + return raw_ptr( raw_ptr_disposer( pos )); + } - rcu_lock rcuLock; - while ( true ) - { - bool bFound = find_position( val, pos, key_comparator(), true ); - if ( bFound ) { - // scoped_node_ptr deletes the node tower if we create it before - if ( !bTowerMade ) - scp.release(); - - func( false, *node_traits::to_value_ptr(pos.pCur), val ); - m_Stat.onUpdateExist(); - break; - } + /// Returns item count in the set + /** + The value returned depends on item counter type provided by \p Traits template parameter. + For \p atomicity::empty_item_counter the function always returns 0. + Therefore, the function is not suitable for checking the set emptiness, use \p empty() + member function for this purpose. + */ + size_t size() const + { + return m_ItemCounter; + } - if ( !bInsert ) { - scp.release(); - bRet.first = false; - break; - } + /// Checks if the set is empty + bool empty() const + { + return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr; + } - if ( !bTowerOk ) { - build_node( pNode ); - nHeight = pNode->height(); - bTowerMade = - bTowerOk = true; - } + /// Clears the set (not atomic) + /** + The function unlink all items from the set. + The function is not atomic, thus, in multi-threaded environment with parallel insertions + this sequence + \code + set.clear(); + assert( set.empty()); + \endcode + the assertion could be raised. - if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { func( true, item, item ); })) { - m_Stat.onInsertRetry(); - continue; - } + For each item the \p disposer will be called automatically after unlinking. + */ + void clear() + { + exempt_ptr ep; + while ( (ep = extract_min())); + } - increase_height( nHeight ); - ++m_ItemCounter; - scp.release(); - m_Stat.onAddNode( nHeight ); - m_Stat.onUpdateNew(); - bRet.second = true; - break; - } - } + /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32. + static CDS_CONSTEXPR unsigned int max_height() CDS_NOEXCEPT + { + return c_nMaxHeight; + } - return bRet; + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; } + + protected: //@cond - template - CDS_DEPRECATED("ensure() is deprecated, use update()") - std::pair ensure( value_type& val, Func func ) + + bool is_extracted( marked_node_ptr const p ) const { - return update( val, func, true ); + return ( p.bits() & 2 ) != 0; } - //@endcond - /// Unlinks the item \p val from the set - /** - The function searches the item \p val in the set and unlink it from the set - if it is found and is equal to \p val. + void help_remove( int nLevel, node_type* pPred, marked_node_ptr pCur, marked_node_ptr pSucc, position& pos ) + { + marked_node_ptr p( pCur.ptr() ); + if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ), + memory_model::memory_order_release, atomics::memory_order_relaxed ) ) + { + if ( pCur->level_unlinked()) { + if ( !is_extracted( pSucc ) ) { + // We cannot free the node at this moment because RCU is locked + // Link deleted nodes to a chain to free later + pos.dispose( pCur.ptr() ); + m_Stat.onEraseWhileFind(); + } + else + m_Stat.onExtractWhileFind(); + } + } + } - Difference between \p erase() and \p %unlink() functions: \p erase() finds a key - and deletes the item found. \p %unlink() searches an item by key and deletes it - only if \p val is an item of that set, i.e. the pointer to item found - is equal to &val . + template + bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound ) + { + assert( gc::is_locked() ); - RCU \p synchronize method can be called. RCU should not be locked. + node_type * pPred; + marked_node_ptr pSucc; + marked_node_ptr pCur; + int nCmp = 1; - The \ref disposer specified in \p Traits class template parameter is called - by garbage collector \p GC asynchronously. + retry: + pPred = m_Head.head(); - The function returns \p true if success and \p false otherwise. - */ - bool unlink( value_type& val ) - { - check_deadlock_policy::check(); + for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { - position pos; - bool bRet; + while ( true ) { + pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); + if ( pCur.bits() ) { + // pCur.bits() means that pPred is logically deleted + goto retry; + } - { - rcu_lock l; + if ( pCur.ptr() == nullptr ) { + // end of the list at level nLevel - goto next level + break; + } - if ( !find_position( val, pos, key_comparator(), false )) { - m_Stat.onUnlinkFailed(); - bRet = false; - } - else { - node_type * pDel = pos.pCur; - assert( key_comparator()( *node_traits::to_value_ptr( pDel ), val ) == 0 ); + // pSucc contains deletion mark for pCur + pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); - unsigned int nHeight = pDel->height(); + if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() ) + goto retry; - if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos, [](value_type const&) {}, false )) { - --m_ItemCounter; - m_Stat.onRemoveNode( nHeight ); - m_Stat.onUnlinkSuccess(); - bRet = true; + if ( pSucc.bits() ) { + // pCur is marked, i.e. logically deleted. + help_remove( nLevel, pPred, pCur, pSucc, pos ); + goto retry; } else { - m_Stat.onUnlinkFailed(); - bRet = false; + nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val ); + if ( nCmp < 0 ) + pPred = pCur.ptr(); + else if ( nCmp == 0 && bStopIfFound ) + goto found; + else + break; } } + + // Next level + pos.pPrev[nLevel] = pPred; + pos.pSucc[nLevel] = pCur.ptr(); } - return bRet; + if ( nCmp != 0 ) + return false; + + found: + pos.pCur = pCur.ptr(); + return pCur.ptr() && nCmp == 0; } - /// Extracts the item from the set with specified \p key - /** \anchor cds_intrusive_SkipListSet_rcu_extract - The function searches an item with key equal to \p key in the set, - unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. - If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr. + bool find_min_position( position& pos ) + { + assert( gc::is_locked() ); - Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + node_type * pPred; + marked_node_ptr pSucc; + marked_node_ptr pCur; - RCU \p synchronize method can be called. RCU should NOT be locked. - The function does not call the disposer for the item found. - The disposer will be implicitly invoked when the returned object is destroyed or when - its \p release() member function is called. - Example: - \code - typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; - skip_list theList; - // ... + retry: + pPred = m_Head.head(); - typename skip_list::exempt_ptr ep( theList.extract( 5 )); - if ( ep ) { - // Deal with ep - //... + for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { - // Dispose returned item. - ep.release(); - } - \endcode - */ - template - exempt_ptr extract( Q const& key ) - { - return exempt_ptr( do_extract( key )); - } + pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); + // pCur.bits() means that pPred is logically deleted + // head cannot be deleted + assert( pCur.bits() == 0 ); - /// Extracts the item from the set with comparing functor \p pred - /** - The function is an analog of \p extract(Q const&) but \p pred predicate is used for key comparing. - \p Less has the interface like \p std::less. - \p pred must imply the same element order as the comparator used for building the set. - */ - template - exempt_ptr extract_with( Q const& key, Less pred ) - { - return exempt_ptr( do_extract_with( key, pred )); - } + if ( pCur.ptr() ) { - /// Extracts an item with minimal key from the list - /** - The function searches an item with minimal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item. - If the skip-list is empty the function returns an empty \p exempt_ptr. + // pSucc contains deletion mark for pCur + pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); - RCU \p synchronize method can be called. RCU should NOT be locked. - The function does not call the disposer for the item found. - The disposer will be implicitly invoked when the returned object is destroyed or when - its \p release() member function is manually called. - Example: - \code - typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; - skip_list theList; - // ... + if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() ) + goto retry; - typename skip_list::exempt_ptr ep(theList.extract_min()); - if ( ep ) { - // Deal with ep - //... + if ( pSucc.bits() ) { + // pCur is marked, i.e. logically deleted. + help_remove( nLevel, pPred, pCur, pSucc, pos ); + goto retry; + } + } - // Dispose returned item. - ep.release(); + // Next level + pos.pPrev[nLevel] = pPred; + pos.pSucc[nLevel] = pCur.ptr(); } - \endcode + return ( pos.pCur = pCur.ptr() ) != nullptr; + } - @note Due the concurrent nature of the list, the function extracts nearly minimum key. - It means that the function gets leftmost item and tries to unlink it. - During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. - So, the function returns the item with minimum key at the moment of list traversing. - */ - exempt_ptr extract_min() + bool find_max_position( position& pos ) { - return exempt_ptr( do_extract_min()); - } + assert( gc::is_locked() ); - /// Extracts an item with maximal key from the list - /** - The function searches an item with maximal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item. - If the skip-list is empty the function returns an empty \p exempt_ptr. + node_type * pPred; + marked_node_ptr pSucc; + marked_node_ptr pCur; - RCU \p synchronize method can be called. RCU should NOT be locked. - The function does not call the disposer for the item found. - The disposer will be implicitly invoked when the returned object is destroyed or when - its \p release() member function is manually called. - Example: - \code - typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; - skip_list theList; - // ... + retry: + pPred = m_Head.head(); - typename skip_list::exempt_ptr ep( theList.extract_max()); - if ( ep ) { - // Deal with ep - //... - // Dispose returned item. - ep.release(); + for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { + + while ( true ) { + pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); + if ( pCur.bits() ) { + // pCur.bits() means that pPred is logically deleted + goto retry; + } + + if ( pCur.ptr() == nullptr ) { + // end of the list at level nLevel - goto next level + break; + } + + // pSucc contains deletion mark for pCur + pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); + + if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() ) + goto retry; + + if ( pSucc.bits() ) { + // pCur is marked, i.e. logically deleted. + help_remove( nLevel, pPred, pCur, pSucc, pos ); + goto retry; + } + else { + if ( !pSucc.ptr() ) + break; + + pPred = pCur.ptr(); + } + } + + // Next level + pos.pPrev[nLevel] = pPred; + pos.pSucc[nLevel] = pCur.ptr(); } - \endcode - @note Due the concurrent nature of the list, the function extracts nearly maximal key. - It means that the function gets rightmost item and tries to unlink it. - During unlinking, a concurrent thread can insert an item with key greater than rightmost item's key. - So, the function returns the item with maximum key at the moment of list traversing. - */ - exempt_ptr extract_max() - { - return exempt_ptr( do_extract_max()); + return ( pos.pCur = pCur.ptr() ) != nullptr; } - /// Deletes the item from the set - /** \anchor cds_intrusive_SkipListSet_rcu_erase - The function searches an item with key equal to \p key in the set, - unlinks it from the set, and returns \p true. - If the item with key equal to \p key is not found the function return \p false. + template + bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f ) + { + assert( gc::is_locked() ); - Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + unsigned int const nHeight = pNode->height(); + pNode->clear_tower(); - RCU \p synchronize method can be called. RCU should not be locked. - */ - template - bool erase( const Q& key ) - { - return do_erase( key, key_comparator(), [](value_type const&) {} ); + // Insert at level 0 + { + marked_node_ptr p( pos.pSucc[0] ); + pNode->next( 0 ).store( p, memory_model::memory_order_relaxed ); + if ( !pos.pPrev[0]->next( 0 ).compare_exchange_strong( p, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ) ) { + return false; + } + + f( val ); + } + + // Insert at level 1..max + for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) { + marked_node_ptr p; + while ( true ) { + marked_node_ptr pSucc( pos.pSucc[nLevel] ); + + // Set pNode->next + // pNode->next must be null but can have a "logical deleted" flag if another thread is removing pNode right now + if ( !pNode->next( nLevel ).compare_exchange_strong( p, pSucc, + memory_model::memory_order_acq_rel, atomics::memory_order_acquire )) + { + // pNode has been marked as removed while we are inserting it + // Stop inserting + assert( p.bits() != 0 ); + + if ( pNode->level_unlinked( nHeight - nLevel ) && p.bits() == 1 ) { + pos.dispose( pNode ); + m_Stat.onEraseWhileInsert(); + } + else + m_Stat.onLogicDeleteWhileInsert(); + + return true; + } + p = pSucc; + + // Link pNode into the list at nLevel + if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( pSucc, marked_node_ptr( pNode ), + memory_model::memory_order_release, atomics::memory_order_relaxed )) + { + // go to next level + break; + } + + // Renew insert position + m_Stat.onRenewInsertPosition(); + if ( !find_position( val, pos, key_comparator(), false ) ) { + // The node has been deleted while we are inserting it + m_Stat.onNotFoundWhileInsert(); + return true; + } + } + } + return true; } - /// Delete the item from the set with comparing functor \p pred - /** - The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase "erase(Q const&)" - but \p pred predicate is used for key comparing. - \p Less has the interface like \p std::less. - \p pred must imply the same element order as the comparator used for building the set. - */ - template - bool erase_with( const Q& key, Less pred ) + template + bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract ) { - CDS_UNUSED( pred ); - return do_erase( key, cds::opt::details::make_comparator_from_less(), [](value_type const&) {} ); - } + assert( pDel != nullptr ); + assert( gc::is_locked() ); - /// Deletes the item from the set - /** \anchor cds_intrusive_SkipListSet_rcu_erase_func - The function searches an item with key equal to \p key in the set, - call \p f functor with item found, unlinks it from the set, and returns \p true. - The \ref disposer specified in \p Traits class template parameter is called - by garbage collector \p GC asynchronously. + marked_node_ptr pSucc; + back_off bkoff; - The \p Func interface is - \code - struct functor { - void operator()( value_type const& item ); - }; - \endcode - If the item with key equal to \p key is not found the function return \p false. + unsigned const nMask = bExtract ? 3u : 1u; - Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + // logical deletion (marking) + for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) { + pSucc = pDel->next( nLevel ).load( memory_model::memory_order_relaxed ); + if ( pSucc.bits() == 0 ) { + bkoff.reset(); + while ( !pDel->next( nLevel ).compare_exchange_weak( pSucc, pSucc | nMask, + memory_model::memory_order_release, atomics::memory_order_acquire )) + { + if ( pSucc.bits() == 0 ) { + bkoff(); + m_Stat.onMarkFailed(); + } + else if ( pSucc.bits() != nMask ) + return false; + } + } + } - RCU \p synchronize method can be called. RCU should not be locked. - */ - template - bool erase( Q const& key, Func f ) - { - return do_erase( key, key_comparator(), f ); + marked_node_ptr p( pDel->next( 0 ).load( memory_model::memory_order_relaxed ).ptr() ); + while ( true ) { + if ( pDel->next( 0 ).compare_exchange_strong( p, p | nMask, memory_model::memory_order_release, atomics::memory_order_acquire )) + { + f( *node_traits::to_value_ptr( pDel ) ); + + // physical deletion + // try fast erase + p = pDel; + unsigned nCount = 0; + for ( int nLevel = static_cast( pDel->height() - 1 ); nLevel >= 0; --nLevel ) { + + pSucc = pDel->next( nLevel ).load( memory_model::memory_order_relaxed ); + if ( !pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()), + memory_model::memory_order_acq_rel, atomics::memory_order_acquire ) ) + { + // Do slow erase + if ( nCount ) { + if ( pDel->level_unlinked( nCount )) { + if ( p.bits() == 1 ) { + pos.dispose( pDel ); + m_Stat.onFastEraseHelped(); + } + else + m_Stat.onFastExtractHelped(); + return true; + } + } + + // Make slow erase + find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false ); + if ( bExtract ) + m_Stat.onSlowExtract(); + else + m_Stat.onSlowErase(); + + return true; + } + ++nCount; + } + + if ( !bExtract ) { + // We cannot free the node at this moment since RCU is locked + // Link deleted nodes to a chain to free later + pos.dispose( pDel ); + m_Stat.onFastErase(); + } + else + m_Stat.onFastExtract(); + return true; + } + else if ( p.bits() ) { + // Another thread is deleting pDel right now + return false; + } + + m_Stat.onEraseRetry(); + bkoff(); + } } - /// Delete the item from the set with comparing functor \p pred - /** - The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase_func "erase(Q const&, Func)" - but \p pred predicate is used for key comparing. - \p Less has the interface like \p std::less. - \p pred must imply the same element order as the comparator used for building the set. - */ - template - bool erase_with( Q const& key, Less pred, Func f ) + enum finsd_fastpath_result { + find_fastpath_found, + find_fastpath_not_found, + find_fastpath_abort + }; + template + finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f ) const { - CDS_UNUSED( pred ); - return do_erase( key, cds::opt::details::make_comparator_from_less(), f ); - } + node_type * pPred; + marked_node_ptr pCur; + marked_node_ptr pSucc; + marked_node_ptr pNull; - /// Finds \p key - /** @anchor cds_intrusive_SkipListSet_rcu_find_func - The function searches the item with key equal to \p key and calls the functor \p f for item found. - The interface of \p Func functor is: - \code - struct functor { - void operator()( value_type& item, Q& key ); - }; - \endcode - where \p item is the item found, \p key is the find function argument. + back_off bkoff; + unsigned attempt = 0; - The functor can change non-key fields of \p item. Note that the functor is only guarantee - that \p item cannot be disposed during functor is executing. - The functor does not serialize simultaneous access to the set \p item. If such access is - possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + try_again: + pPred = m_Head.head(); + for ( int nLevel = static_cast( m_nHeight.load( memory_model::memory_order_relaxed ) - 1 ); nLevel >= 0; --nLevel ) { + pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); + if ( pCur == pNull ) + continue; - The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor - can modify both arguments. + while ( pCur != pNull ) { + if ( pCur.bits() ) { + // pPrev is being removed + if ( ++attempt < 4 ) { + bkoff(); + goto try_again; + } - The function applies RCU lock internally. + return find_fastpath_abort; + } - The function returns \p true if \p key is found, \p false otherwise. - */ - template - bool find( Q& key, Func f ) + if ( pCur.ptr() ) { + int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val ); + if ( nCmp < 0 ) { + pPred = pCur.ptr(); + pCur = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); + } + else if ( nCmp == 0 ) { + // found + f( *node_traits::to_value_ptr( pCur.ptr() ), val ); + return find_fastpath_found; + } + else // pCur > val - go down + break; + } + } + } + + return find_fastpath_not_found; + } + + template + bool find_slowpath( Q& val, Compare cmp, Func f, position& pos ) { - return do_find_with( key, key_comparator(), f ); + if ( find_position( val, pos, cmp, true ) ) { + assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 ); + + f( *node_traits::to_value_ptr( pos.pCur ), val ); + return true; + } + else + return false; } - //@cond - template - bool find( Q const& key, Func f ) + + template + bool do_find_with( Q& val, Compare cmp, Func f ) { - return do_find_with( key, key_comparator(), f ); + position pos; + return do_find_with( val, cmp, f, pos ); } - //@endcond - /// Finds the key \p key with comparing functor \p pred - /** - The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_func "find(Q&, Func)" - but \p cmp is used for key comparison. - \p Less functor has the interface like \p std::less. - \p cmp must imply the same element order as the comparator used for building the set. - */ - template - bool find_with( Q& key, Less pred, Func f ) + template + bool do_find_with( Q& val, Compare cmp, Func f, position& pos ) { - CDS_UNUSED( pred ); - return do_find_with( key, cds::opt::details::make_comparator_from_less(), f ); + bool bRet; + + { + rcu_lock l; + + switch ( find_fastpath( val, cmp, f ) ) { + case find_fastpath_found: + m_Stat.onFindFastSuccess(); + return true; + case find_fastpath_not_found: + m_Stat.onFindFastFailed(); + return false; + default: + break; + } + + if ( find_slowpath( val, cmp, f, pos ) ) { + m_Stat.onFindSlowSuccess(); + bRet = true; + } + else { + m_Stat.onFindSlowFailed(); + bRet = false; + } + } + return bRet; } - //@cond - template - bool find_with( Q const& key, Less pred, Func f ) + + template + bool do_erase( Q const& val, Compare cmp, Func f ) { - CDS_UNUSED( pred ); - return do_find_with( key, cds::opt::details::make_comparator_from_less(), f ); - } - //@endcond + check_deadlock_policy::check(); - /// Checks whether the set contains \p key - /** - The function searches the item with key equal to \p key - and returns \p true if it is found, and \p false otherwise. + position pos; + bool bRet; - The function applies RCU lock internally. - */ - template - bool contains( Q const& key ) - { - return do_find_with( key, key_comparator(), [](value_type& , Q const& ) {} ); - } - //@cond - template - CDS_DEPRECATED("deprecated, use contains()") - bool find( Q const& key ) - { - return contains( key ); - } - //@endcond + { + rcu_lock rcuLock; - /// Checks whether the set contains \p key using \p pred predicate for searching - /** - The function is similar to contains( key ) but \p pred is used for key comparing. - \p Less functor has the interface like \p std::less. - \p Less must imply the same element order as the comparator used for building the set. - */ - template - bool contains( Q const& key, Less pred ) - { - CDS_UNUSED( pred ); - return do_find_with( key, cds::opt::details::make_comparator_from_less(), [](value_type& , Q const& ) {} ); + if ( !find_position( val, pos, cmp, false ) ) { + m_Stat.onEraseFailed(); + bRet = false; + } + else { + node_type * pDel = pos.pCur; + assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 ); + + unsigned int nHeight = pDel->height(); + if ( try_remove_at( pDel, pos, f, false ) ) { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onEraseSuccess(); + bRet = true; + } + else { + m_Stat.onEraseFailed(); + bRet = false; + } + } + } + + return bRet; } - //@cond - template - CDS_DEPRECATED("deprecated, use contains()") - bool find_with( Q const& key, Less pred ) + + template + value_type * do_extract_key( Q const& key, Compare cmp, position& pos ) { - return contains( key, pred ); - } - //@endcond + // RCU should be locked!!! + assert( gc::is_locked() ); - /// Finds \p key and return the item found - /** \anchor cds_intrusive_SkipListSet_rcu_get - The function searches the item with key equal to \p key and returns a \p raw_ptr object pointed to item found. - If \p key is not found it returns empty \p raw_ptr. + node_type * pDel; - Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + if ( !find_position( key, pos, cmp, false ) ) { + m_Stat.onExtractFailed(); + pDel = nullptr; + } + else { + pDel = pos.pCur; + assert( cmp( *node_traits::to_value_ptr( pDel ), key ) == 0 ); - RCU should be locked before call of this function. - Returned item is valid only while RCU is locked: - \code - typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; - skip_list theList; - // ... - typename skip_list::raw_ptr pVal; - { - // Lock RCU - skip_list::rcu_lock lock; + unsigned int const nHeight = pDel->height(); - pVal = theList.get( 5 ); - if ( pVal ) { - // Deal with pVal - //... + if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onExtractSuccess(); + } + else { + m_Stat.onExtractFailed(); + pDel = nullptr; } } - // You can manually release pVal after RCU-locked section - pVal.release(); - \endcode - */ + + return pDel ? node_traits::to_value_ptr( pDel ) : nullptr; + } + template - raw_ptr get( Q const& key ) + value_type * do_extract( Q const& key ) { - assert( gc::is_locked()); - + check_deadlock_policy::check(); + value_type * pDel = nullptr; position pos; - value_type * pFound; - if ( do_find_with( key, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos )) - return raw_ptr( pFound, raw_ptr_disposer( pos )); - return raw_ptr( raw_ptr_disposer( pos )); - } + { + rcu_lock l; + pDel = do_extract_key( key, key_comparator(), pos ); + } - /// Finds \p key and return the item found - /** - The function is an analog of \ref cds_intrusive_SkipListSet_rcu_get "get(Q const&)" - but \p pred is used for comparing the keys. + return pDel; + } - \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q - in any order. - \p pred must imply the same element order as the comparator used for building the set. - */ template - raw_ptr get_with( Q const& key, Less pred ) + value_type * do_extract_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); - assert( gc::is_locked()); - - value_type * pFound = nullptr; + check_deadlock_policy::check(); + value_type * pDel = nullptr; position pos; - if ( do_find_with( key, cds::opt::details::make_comparator_from_less(), - [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos )) { - return raw_ptr( pFound, raw_ptr_disposer( pos )); + rcu_lock l; + pDel = do_extract_key( key, cds::opt::details::make_comparator_from_less(), pos ); } - return raw_ptr( raw_ptr_disposer( pos )); - } - /// Returns item count in the set - /** - The value returned depends on item counter type provided by \p Traits template parameter. - For \p atomicity::empty_item_counter the function always returns 0. - Therefore, the function is not suitable for checking the set emptiness, use \p empty() - member function for this purpose. - */ - size_t size() const - { - return m_ItemCounter; + return pDel; } - /// Checks if the set is empty - bool empty() const + value_type * do_extract_min() { - return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr; - } + assert( !gc::is_locked() ); - /// Clears the set (not atomic) - /** - The function unlink all items from the set. - The function is not atomic, thus, in multi-threaded environment with parallel insertions - this sequence - \code - set.clear(); - assert( set.empty()); - \endcode - the assertion could be raised. + position pos; + node_type * pDel; - For each item the \p disposer will be called automatically after unlinking. - */ - void clear() - { - exempt_ptr ep; - while ( (ep = extract_min())); + { + rcu_lock l; + + if ( !find_min_position( pos ) ) { + m_Stat.onExtractMinFailed(); + pDel = nullptr; + } + else { + pDel = pos.pCur; + unsigned int const nHeight = pDel->height(); + + if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onExtractMinSuccess(); + } + else { + m_Stat.onExtractMinFailed(); + pDel = nullptr; + } + } + } + + return pDel ? node_traits::to_value_ptr( pDel ) : nullptr; } - /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32. - static CDS_CONSTEXPR unsigned int max_height() CDS_NOEXCEPT + value_type * do_extract_max() { - return c_nMaxHeight; + assert( !gc::is_locked() ); + + position pos; + node_type * pDel; + + { + rcu_lock l; + + if ( !find_max_position( pos ) ) { + m_Stat.onExtractMaxFailed(); + pDel = nullptr; + } + else { + pDel = pos.pCur; + unsigned int const nHeight = pDel->height(); + + if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onExtractMaxSuccess(); + } + else { + m_Stat.onExtractMaxFailed(); + pDel = nullptr; + } + } + } + + return pDel ? node_traits::to_value_ptr( pDel ) : nullptr; } - /// Returns const reference to internal statistics - stat const& statistics() const + void increase_height( unsigned int nHeight ) { - return m_Stat; + unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed ); + if ( nCur < nHeight ) + m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed ); } + //@endcond }; }} // namespace cds::intrusive diff --git a/test/include/cds_test/stat_skiplist_out.h b/test/include/cds_test/stat_skiplist_out.h index 8e5b22e4..69f3894c 100644 --- a/test/include/cds_test/stat_skiplist_out.h +++ b/test/include/cds_test/stat_skiplist_out.h @@ -75,13 +75,17 @@ namespace cds_test { << CDSSTRESS_STAT_OUT( s, m_nFindSlowFailed ) << CDSSTRESS_STAT_OUT( s, m_nRenewInsertPosition ) << CDSSTRESS_STAT_OUT( s, m_nLogicDeleteWhileInsert ) + << CDSSTRESS_STAT_OUT( s, m_nEraseWhileInsert ) << CDSSTRESS_STAT_OUT( s, m_nNotFoundWhileInsert ) << CDSSTRESS_STAT_OUT( s, m_nFastErase ) + << CDSSTRESS_STAT_OUT( s, m_nFastEraseHelped ) << CDSSTRESS_STAT_OUT( s, m_nSlowErase ) << CDSSTRESS_STAT_OUT( s, m_nFastExtract ) + << CDSSTRESS_STAT_OUT( s, m_nFastExtractHelped ) << CDSSTRESS_STAT_OUT( s, m_nSlowExtract ) << CDSSTRESS_STAT_OUT( s, m_nEraseWhileFind ) - << CDSSTRESS_STAT_OUT( s, m_nExtractWhileFind ); + << CDSSTRESS_STAT_OUT( s, m_nExtractWhileFind ) + << CDSSTRESS_STAT_OUT( s, m_nMarkFailed ); } } // namespace cds_test -- 2.34.1