X-Git-Url: http://plrg.eecs.uci.edu/git/?p=libcds.git;a=blobdiff_plain;f=cds%2Fintrusive%2Fskip_list_rcu.h;h=79d9f5b2fe7b3ec156e453b17c9f7cf75813137a;hp=108b90b37698040c3aef15c2cdb5e56bf8856fa2;hb=8277dbfa3fae52996a2c50499cde16a10f9c188c;hpb=c12a10aa9e94d2577895e87ffa2ef6bef8b4807b diff --git a/cds/intrusive/skip_list_rcu.h b/cds/intrusive/skip_list_rcu.h index 108b90b3..79d9f5b2 100644 --- a/cds/intrusive/skip_list_rcu.h +++ b/cds/intrusive/skip_list_rcu.h @@ -1,4 +1,32 @@ -//$$CDS-header$$ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H #define CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H @@ -36,34 +64,21 @@ namespace cds { namespace intrusive { atomic_marked_ptr m_pNext; ///< Next item in bottom-list (list at level 0) public: node * m_pDelChain; ///< Deleted node chain (local for a thread) -# ifdef _DEBUG - bool volatile m_bLinked; - bool volatile m_bUnlinked; -# endif protected: unsigned int m_nHeight; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1. atomic_marked_ptr * m_arrNext; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr + atomics::atomic m_nUnlink; ///< How many levels has been unlinked public: /// Constructs a node of height 1 (a bottom-list node) CDS_CONSTEXPR node() : m_pNext( nullptr ) , m_pDelChain( nullptr ) -# ifdef _DEBUG - , m_bLinked( false ) - , m_bUnlinked( false ) -# endif , m_nHeight(1) , m_arrNext( nullptr ) + , m_nUnlink(0) {} -# ifdef _DEBUG - ~node() - { - assert( !m_bLinked || m_bUnlinked ); - } -# endif - /// Constructs a node of height \p nHeight void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower ) { @@ -98,8 +113,8 @@ namespace cds { namespace intrusive { /// Access to element of next pointer array atomic_marked_ptr& next( unsigned int nLevel ) { - assert( nLevel < height() ); - assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr) ); + assert( nLevel < height()); + assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr)); # ifdef CDS_THREAD_SANITIZER_ENABLED // TSan false positive: m_arrNext is read-only array @@ -115,7 +130,7 @@ namespace cds { namespace intrusive { /// Access to element of next pointer array (const version) atomic_marked_ptr const& next( unsigned int nLevel ) const { - assert( nLevel < height() ); + assert( nLevel < height()); assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr ); # ifdef CDS_THREAD_SANITIZER_ENABLED @@ -161,6 +176,11 @@ namespace cds { namespace intrusive { && m_arrNext == nullptr && m_nHeight <= 1; } + + bool level_unlinked( unsigned nCount = 1 ) + { + return m_nUnlink.fetch_add( nCount, std::memory_order_relaxed ) + 1 == height(); + } }; } // namespace skip_list //@endcond @@ -190,13 +210,10 @@ namespace cds { namespace intrusive { protected: void next() { - // RCU should be locked before iterating!!! - assert( gc::is_locked() ); - back_off bkoff; for (;;) { - if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) { + if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits()) { // Current node is marked as deleted. So, its next pointer can point to anything // In this case we interrupt our iteration and returns end() iterator. *this = iterator(); @@ -205,12 +222,12 @@ namespace cds { namespace intrusive { marked_ptr p = m_pNode->next(0).load( atomics::memory_order_relaxed ); node_type * pp = p.ptr(); - if ( p.bits() ) { + if ( p.bits()) { // p is marked as deleted. Spin waiting for physical removal bkoff(); continue; } - else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits() ) { + else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits()) { // p is marked as deleted. Spin waiting for physical removal bkoff(); continue; @@ -225,21 +242,18 @@ namespace cds { namespace intrusive { iterator( node_type& refHead ) : m_pNode( nullptr ) { - // RCU should be locked before iterating!!! - assert( gc::is_locked() ); - back_off bkoff; for (;;) { marked_ptr p = refHead.next(0).load( atomics::memory_order_relaxed ); - if ( !p.ptr() ) { + if ( !p.ptr()) { // empty skip-list break; } node_type * pp = p.ptr(); // Logically deleted node is marked from highest level - if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) { + if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits()) { m_pNode = pp; break; } @@ -251,17 +265,11 @@ namespace cds { namespace intrusive { public: iterator() : m_pNode( nullptr ) - { - // RCU should be locked before iterating!!! - assert( gc::is_locked() ); - } + {} iterator( iterator const& s) : m_pNode( s.m_pNode ) - { - // RCU should be locked before iterating!!! - assert( gc::is_locked() ); - } + {} value_type * operator ->() const { @@ -571,7 +579,7 @@ namespace cds { namespace intrusive { { assert( pVal ); - typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal) ); + typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal)); disposer()( pVal ); } @@ -586,7 +594,7 @@ namespace cds { namespace intrusive { static void dispose_chain( node_type * pChain ) { if ( pChain ) { - assert( !gc::is_locked() ); + assert( !gc::is_locked()); auto f = [&pChain]() -> cds::urcu::retired_ptr { node_type * p = pChain; @@ -673,451 +681,284 @@ namespace cds { namespace intrusive { /// Result of \p get(), \p get_with() functions - pointer to the node found typedef cds::urcu::raw_ptr< gc, value_type, raw_ptr_disposer > raw_ptr; - protected: - //@cond - - bool is_extracted( marked_node_ptr const p ) const + public: + /// Default constructor + SkipListSet() + : m_Head( c_nMaxHeight ) + , m_nHeight( c_nMinHeight ) + , m_pDeferredDelChain( nullptr ) { - return (p.bits() & 2) != 0; + static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); + + // Barrier for head node + atomics::atomic_thread_fence( memory_model::memory_order_release ); } - template - bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound ) + /// Clears and destructs the skip-list + ~SkipListSet() { - assert( gc::is_locked() ); - - node_type * pPred; - marked_node_ptr pSucc; - marked_node_ptr pCur; - int nCmp = 1; - - retry: - pPred = m_Head.head(); - - for ( int nLevel = static_cast(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) { - - while ( true ) { - pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); - if ( pCur.bits() ) { - // pCur.bits() means that pPred is logically deleted - goto retry; - } - - if ( pCur.ptr() == nullptr ) { - // end of the list at level nLevel - goto next level - break; - } - - // pSucc contains deletion mark for pCur - pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); + clear(); + } - if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() ) - goto retry; + public: + ///@name Forward iterators (thread-safe under RCU lock) + //@{ + /// Forward iterator + /** + The forward iterator has some features: + - it has no post-increment operator + - it depends on iterator of underlying \p OrderedList - if ( pSucc.bits() ) { - // pCur is marked, i.e. logically deleted. - marked_node_ptr p( pCur.ptr() ); -# ifdef _DEBUG - if ( nLevel == 0 ) - pCur->m_bUnlinked = true; -# endif - if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ), - memory_model::memory_order_release, atomics::memory_order_relaxed )) - { - if ( nLevel == 0 ) { - if ( !is_extracted( pSucc )) { - // We cannot free the node at this moment since RCU is locked - // Link deleted nodes to a chain to free later - pos.dispose( pCur.ptr() ); - m_Stat.onEraseWhileFind(); - } - else { - m_Stat.onExtractWhileFind(); - } - } - } - goto retry; - } - else { - nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val ); - if ( nCmp < 0 ) - pPred = pCur.ptr(); - else if ( nCmp == 0 && bStopIfFound ) - goto found; - else - break; - } - } + You may safely use iterators in multi-threaded environment only under RCU lock. + Otherwise, a crash is possible if another thread deletes the element the iterator points to. + */ + typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator; - // Next level - pos.pPrev[ nLevel ] = pPred; - pos.pSucc[ nLevel ] = pCur.ptr(); - } + /// Const iterator type + typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator; - if ( nCmp != 0 ) - return false; + /// Returns a forward iterator addressing the first element in a set + iterator begin() + { + return iterator( *m_Head.head()); + } - found: - pos.pCur = pCur.ptr(); - return pCur.ptr() && nCmp == 0; + /// Returns a forward const iterator addressing the first element in a set + const_iterator begin() const + { + return const_iterator( *m_Head.head()); } - bool find_min_position( position& pos ) + /// Returns a forward const iterator addressing the first element in a set + const_iterator cbegin() const { - assert( gc::is_locked() ); + return const_iterator( *m_Head.head()); + } - node_type * pPred; - marked_node_ptr pSucc; - marked_node_ptr pCur; + /// Returns a forward iterator that addresses the location succeeding the last element in a set. + iterator end() + { + return iterator(); + } - retry: - pPred = m_Head.head(); + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + const_iterator end() const + { + return const_iterator(); + } - for ( int nLevel = static_cast(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) { + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + const_iterator cend() const + { + return const_iterator(); + } + //@} - pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); - // pCur.bits() means that pPred is logically deleted - // head cannot be deleted - assert( pCur.bits() == 0 ); + public: + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. - if ( pCur.ptr() ) { + The function applies RCU lock internally. - // pSucc contains deletion mark for pCur - pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert( val, []( value_type& ) {} ); + } - if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() ) - goto retry; + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. - if ( pSucc.bits() ) { - // pCur is marked, i.e. logically deleted. -# ifdef _DEBUG - if ( nLevel == 0 ) - pCur->m_bUnlinked = true; -# endif - marked_node_ptr p( pCur.ptr() ); - if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ), - memory_model::memory_order_release, atomics::memory_order_relaxed )) - { - if ( nLevel == 0 ) { - if ( !is_extracted( pSucc )) { - // We cannot free the node at this moment since RCU is locked - // Link deleted nodes to a chain to free later - pos.dispose( pCur.ptr() ); - m_Stat.onEraseWhileFind(); - } - else { - m_Stat.onExtractWhileFind(); - } - } - } - goto retry; - } - } + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of \p val. - // Next level - pos.pPrev[ nLevel ] = pPred; - pos.pSucc[ nLevel ] = pCur.ptr(); - } - return (pos.pCur = pCur.ptr()) != nullptr; - } + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success. - bool find_max_position( position& pos ) + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool insert( value_type& val, Func f ) { - assert( gc::is_locked() ); + check_deadlock_policy::check(); - node_type * pPred; - marked_node_ptr pSucc; - marked_node_ptr pCur; + position pos; + bool bRet; - retry: - pPred = m_Head.head(); + { + node_type * pNode = node_traits::to_node_ptr( val ); + scoped_node_ptr scp( pNode ); + unsigned int nHeight = pNode->height(); + bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr; + bool bTowerMade = false; - for ( int nLevel = static_cast(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) { + rcu_lock rcuLock; - while ( true ) { - pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); - if ( pCur.bits() ) { - // pCur.bits() means that pPred is logically deleted - goto retry; - } + while ( true ) + { + bool bFound = find_position( val, pos, key_comparator(), true ); + if ( bFound ) { + // scoped_node_ptr deletes the node tower if we create it + if ( !bTowerMade ) + scp.release(); - if ( pCur.ptr() == nullptr ) { - // end of the list at level nLevel - goto next level + m_Stat.onInsertFailed(); + bRet = false; break; } - // pSucc contains deletion mark for pCur - pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); - - if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() ) - goto retry; - - if ( pSucc.bits() ) { - // pCur is marked, i.e. logically deleted. -# ifdef _DEBUG - if ( nLevel == 0 ) - pCur->m_bUnlinked = true; -# endif - marked_node_ptr p( pCur.ptr() ); - if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ), - memory_model::memory_order_release, atomics::memory_order_relaxed )) - { - if ( nLevel == 0 ) { - if ( !is_extracted( pSucc )) { - // We cannot free the node at this moment since RCU is locked - // Link deleted nodes to a chain to free later - pos.dispose( pCur.ptr() ); - m_Stat.onEraseWhileFind(); - } - else { - m_Stat.onExtractWhileFind(); - } - } - } - goto retry; + if ( !bTowerOk ) { + build_node( pNode ); + nHeight = pNode->height(); + bTowerMade = + bTowerOk = true; } - else { - if ( !pSucc.ptr() ) - break; - pPred = pCur.ptr(); + if ( !insert_at_position( val, pNode, pos, f )) { + m_Stat.onInsertRetry(); + continue; } - } - // Next level - pos.pPrev[ nLevel ] = pPred; - pos.pSucc[ nLevel ] = pCur.ptr(); + increase_height( nHeight ); + ++m_ItemCounter; + m_Stat.onAddNode( nHeight ); + m_Stat.onInsertSuccess(); + scp.release(); + bRet = true; + break; + } } - return (pos.pCur = pCur.ptr()) != nullptr; + return bRet; } - template - bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f ) - { - assert( gc::is_locked() ); + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. - unsigned int nHeight = pNode->height(); - pNode->clear_tower(); + If the item \p val is not found in the set, then \p val is inserted into the set + iff \p bInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p %update() function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refer to the same thing. - { - marked_node_ptr p( pos.pSucc[0] ); - pNode->next( 0 ).store( p, memory_model::memory_order_release ); - if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed )) { - return false; - } -# ifdef _DEBUG - pNode->m_bLinked = true; -# endif - f( val ); - } + The functor can change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. - for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) { - marked_node_ptr p; - while ( true ) { - marked_node_ptr q( pos.pSucc[ nLevel ]); - if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { - // pNode has been marked as removed while we are inserting it - // Stop inserting - assert( p.bits() ); - m_Stat.onLogicDeleteWhileInsert(); - return true; - } - p = q; - if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ) ) - break; + RCU \p synchronize method can be called. RCU should not be locked. - // Renew insert position - m_Stat.onRenewInsertPosition(); - if ( !find_position( val, pos, key_comparator(), false )) { - // The node has been deleted while we are inserting it - m_Stat.onNotFoundWhileInsert(); - return true; - } - } - } - return true; - } + Returns std::pair where \p first is \p true if operation is successful, + i.e. the node has been inserted or updated, + \p second is \p true if new item has been added or \p false if the item with \p key + already exists. + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ template - bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract ) + std::pair update( value_type& val, Func func, bool bInsert = true ) { - assert( pDel != nullptr ); - assert( gc::is_locked() ); - - marked_node_ptr pSucc; + check_deadlock_policy::check(); - // logical deletion (marking) - for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) { - pSucc = pDel->next(nLevel).load( memory_model::memory_order_relaxed ); - while ( true ) { - if ( pSucc.bits() - || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) - { - break; - } - } - } + position pos; + std::pair bRet( true, false ); - pSucc = pDel->next(0).load( memory_model::memory_order_relaxed ); - while ( true ) { - if ( pSucc.bits() ) - return false; + { + node_type * pNode = node_traits::to_node_ptr( val ); + scoped_node_ptr scp( pNode ); + unsigned int nHeight = pNode->height(); + bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr; + bool bTowerMade = false; - int const nMask = bExtract ? 3 : 1; - if ( pDel->next(0).compare_exchange_strong( pSucc, pSucc | nMask, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) + rcu_lock rcuLock; + while ( true ) { - f( *node_traits::to_value_ptr( pDel )); + bool bFound = find_position( val, pos, key_comparator(), true ); + if ( bFound ) { + // scoped_node_ptr deletes the node tower if we create it before + if ( !bTowerMade ) + scp.release(); - // physical deletion - // try fast erase - pSucc = pDel; - for ( int nLevel = static_cast( pDel->height() - 1 ); nLevel >= 0; --nLevel ) { - if ( !pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( pSucc, - marked_node_ptr( pDel->next(nLevel).load(memory_model::memory_order_relaxed).ptr() ), - memory_model::memory_order_release, atomics::memory_order_relaxed) ) - { - // Do slow erase - find_position( *node_traits::to_value_ptr(pDel), pos, key_comparator(), false ); - if ( bExtract ) - m_Stat.onSlowExtract(); - else - m_Stat.onSlowErase(); -# ifdef _DEBUG - assert( pDel->m_bUnlinked ); -# endif - return true; - } + func( false, *node_traits::to_value_ptr(pos.pCur), val ); + m_Stat.onUpdateExist(); + break; } -# ifdef _DEBUG - pDel->m_bUnlinked = true; -# endif - if ( !bExtract ) { - // We cannot free the node at this moment since RCU is locked - // Link deleted nodes to a chain to free later - pos.dispose( pDel ); - m_Stat.onFastErase(); + if ( !bInsert ) { + scp.release(); + bRet.first = false; + break; } - else - m_Stat.onFastExtract(); - return true; - } - m_Stat.onEraseRetry(); - } - } - - enum finsd_fastpath_result { - find_fastpath_found, - find_fastpath_not_found, - find_fastpath_abort - }; - template - finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f ) const - { - node_type * pPred; - marked_node_ptr pCur; - marked_node_ptr pSucc; - marked_node_ptr pNull; - - back_off bkoff; - - pPred = m_Head.head(); - for ( int nLevel = static_cast(m_nHeight.load(memory_model::memory_order_relaxed) - 1); nLevel >= 0; --nLevel ) { - pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire ); - if ( pCur == pNull ) - continue; - - while ( pCur != pNull ) { - if ( pCur.bits() ) { - // Wait until pCur is removed - unsigned int nAttempt = 0; - while ( pCur.bits() && nAttempt++ < 16 ) { - bkoff(); - pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire ); - } - bkoff.reset(); - if ( pCur.bits() ) { - // Maybe, we are on deleted node sequence - // Abort searching, try slow-path - return find_fastpath_abort; - } + if ( !bTowerOk ) { + build_node( pNode ); + nHeight = pNode->height(); + bTowerMade = + bTowerOk = true; } - if ( pCur.ptr() ) { - int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val ); - if ( nCmp < 0 ) { - pPred = pCur.ptr(); - pCur = pCur->next(nLevel).load( memory_model::memory_order_acquire ); - } - else if ( nCmp == 0 ) { - // found - f( *node_traits::to_value_ptr( pCur.ptr() ), val ); - return find_fastpath_found; - } - else // pCur > val - go down - break; + if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { func( true, item, item ); })) { + m_Stat.onInsertRetry(); + continue; } + + increase_height( nHeight ); + ++m_ItemCounter; + scp.release(); + m_Stat.onAddNode( nHeight ); + m_Stat.onUpdateNew(); + bRet.second = true; + break; } } - return find_fastpath_not_found; - } - - template - bool find_slowpath( Q& val, Compare cmp, Func f, position& pos ) - { - if ( find_position( val, pos, cmp, true )) { - assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 ); - - f( *node_traits::to_value_ptr( pos.pCur ), val ); - return true; - } - else - return false; + return bRet; } - - template - bool do_find_with( Q& val, Compare cmp, Func f ) + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( value_type& val, Func func ) { - position pos; - return do_find_with( val, cmp, f, pos ); + return update( val, func, true ); } + //@endcond - template - bool do_find_with( Q& val, Compare cmp, Func f, position& pos ) - { - bool bRet; + /// Unlinks the item \p val from the set + /** + The function searches the item \p val in the set and unlink it from the set + if it is found and is equal to \p val. - { - rcu_lock l; + Difference between \p erase() and \p %unlink() functions: \p erase() finds a key + and deletes the item found. \p %unlink() searches an item by key and deletes it + only if \p val is an item of that set, i.e. the pointer to item found + is equal to &val . - switch ( find_fastpath( val, cmp, f )) { - case find_fastpath_found: - m_Stat.onFindFastSuccess(); - return true; - case find_fastpath_not_found: - m_Stat.onFindFastFailed(); - return false; - default: - break; - } + RCU \p synchronize method can be called. RCU should not be locked. - if ( find_slowpath( val, cmp, f, pos )) { - m_Stat.onFindSlowSuccess(); - bRet = true; - } - else { - m_Stat.onFindSlowFailed(); - bRet = false; - } - } - return bRet; - } + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously. - template - bool do_erase( Q const& val, Compare cmp, Func f ) + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) { check_deadlock_policy::check(); @@ -1125,25 +966,26 @@ namespace cds { namespace intrusive { bool bRet; { - rcu_lock rcuLock; + rcu_lock l; - if ( !find_position( val, pos, cmp, false ) ) { - m_Stat.onEraseFailed(); + if ( !find_position( val, pos, key_comparator(), false )) { + m_Stat.onUnlinkFailed(); bRet = false; } else { node_type * pDel = pos.pCur; - assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 ); + assert( key_comparator()( *node_traits::to_value_ptr( pDel ), val ) == 0 ); unsigned int nHeight = pDel->height(); - if ( try_remove_at( pDel, pos, f, false )) { + + if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos, [](value_type const&) {}, false )) { --m_ItemCounter; m_Stat.onRemoveNode( nHeight ); - m_Stat.onEraseSuccess(); + m_Stat.onUnlinkSuccess(); bRet = true; } else { - m_Stat.onEraseFailed(); + m_Stat.onUnlinkFailed(); bRet = false; } } @@ -1152,840 +994,1015 @@ namespace cds { namespace intrusive { return bRet; } - template - value_type * do_extract_key( Q const& key, Compare cmp, position& pos ) - { - // RCU should be locked!!! - assert( gc::is_locked() ); + /// Extracts the item from the set with specified \p key + /** \anchor cds_intrusive_SkipListSet_rcu_extract + The function searches an item with key equal to \p key in the set, + unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. + If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr. - node_type * pDel; + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. - if ( !find_position( key, pos, cmp, false ) ) { - m_Stat.onExtractFailed(); - pDel = nullptr; - } - else { - pDel = pos.pCur; - assert( cmp( *node_traits::to_value_ptr( pDel ), key ) == 0 ); + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not call the disposer for the item found. + The disposer will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is called. + Example: + \code + typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; + skip_list theList; + // ... - unsigned int const nHeight = pDel->height(); + typename skip_list::exempt_ptr ep( theList.extract( 5 )); + if ( ep ) { + // Deal with ep + //... - if ( try_remove_at( pDel, pos, [](value_type const&) {}, true )) { - --m_ItemCounter; - m_Stat.onRemoveNode( nHeight ); - m_Stat.onExtractSuccess(); - } - else { - m_Stat.onExtractFailed(); - pDel = nullptr; - } + // Dispose returned item. + ep.release(); } - - return pDel ? node_traits::to_value_ptr( pDel ) : nullptr; - } - + \endcode + */ template - value_type * do_extract( Q const& key ) + exempt_ptr extract( Q const& key ) { - check_deadlock_policy::check(); - value_type * pDel = nullptr; - position pos; - { - rcu_lock l; - pDel = do_extract_key( key, key_comparator(), pos ); - } - - return pDel; + return exempt_ptr( do_extract( key )); } + /// Extracts the item from the set with comparing functor \p pred + /** + The function is an analog of \p extract(Q const&) but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ template - value_type * do_extract_with( Q const& key, Less pred ) + exempt_ptr extract_with( Q const& key, Less pred ) { - CDS_UNUSED(pred); - check_deadlock_policy::check(); - value_type * pDel = nullptr; - position pos; - { - rcu_lock l; - pDel = do_extract_key( key, cds::opt::details::make_comparator_from_less(), pos ); - } - - return pDel; + return exempt_ptr( do_extract_with( key, pred )); } - value_type * do_extract_min() - { - assert( !gc::is_locked() ); - - position pos; - node_type * pDel; + /// Extracts an item with minimal key from the list + /** + The function searches an item with minimal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item. + If the skip-list is empty the function returns an empty \p exempt_ptr. - { - rcu_lock l; + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not call the disposer for the item found. + The disposer will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is manually called. + Example: + \code + typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; + skip_list theList; + // ... - if ( !find_min_position( pos ) ) { - m_Stat.onExtractMinFailed(); - pDel = nullptr; - } - else { - pDel = pos.pCur; - unsigned int const nHeight = pDel->height(); + typename skip_list::exempt_ptr ep(theList.extract_min()); + if ( ep ) { + // Deal with ep + //... - if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) { - --m_ItemCounter; - m_Stat.onRemoveNode( nHeight ); - m_Stat.onExtractMinSuccess(); - } - else { - m_Stat.onExtractMinFailed(); - pDel = nullptr; - } - } + // Dispose returned item. + ep.release(); } + \endcode - return pDel ? node_traits::to_value_ptr( pDel ) : nullptr; - } - - value_type * do_extract_max() + @note Due the concurrent nature of the list, the function extracts nearly minimum key. + It means that the function gets leftmost item and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. + So, the function returns the item with minimum key at the moment of list traversing. + */ + exempt_ptr extract_min() { - assert( !gc::is_locked() ); - - position pos; - node_type * pDel; + return exempt_ptr( do_extract_min()); + } - { - rcu_lock l; + /// Extracts an item with maximal key from the list + /** + The function searches an item with maximal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item. + If the skip-list is empty the function returns an empty \p exempt_ptr. - if ( !find_max_position( pos ) ) { - m_Stat.onExtractMaxFailed(); - pDel = nullptr; - } - else { - pDel = pos.pCur; - unsigned int const nHeight = pDel->height(); + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not call the disposer for the item found. + The disposer will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is manually called. + Example: + \code + typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; + skip_list theList; + // ... - if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) { - --m_ItemCounter; - m_Stat.onRemoveNode( nHeight ); - m_Stat.onExtractMaxSuccess(); - } - else { - m_Stat.onExtractMaxFailed(); - pDel = nullptr; - } - } + typename skip_list::exempt_ptr ep( theList.extract_max()); + if ( ep ) { + // Deal with ep + //... + // Dispose returned item. + ep.release(); } + \endcode - return pDel ? node_traits::to_value_ptr( pDel ) : nullptr; - } - - void increase_height( unsigned int nHeight ) + @note Due the concurrent nature of the list, the function extracts nearly maximal key. + It means that the function gets rightmost item and tries to unlink it. + During unlinking, a concurrent thread can insert an item with key greater than rightmost item's key. + So, the function returns the item with maximum key at the moment of list traversing. + */ + exempt_ptr extract_max() { - unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed ); - if ( nCur < nHeight ) - m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed ); + return exempt_ptr( do_extract_max()); } - //@endcond - public: - /// Default constructor - SkipListSet() - : m_Head( c_nMaxHeight ) - , m_nHeight( c_nMinHeight ) - , m_pDeferredDelChain( nullptr ) - { - static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); + /// Deletes the item from the set + /** \anchor cds_intrusive_SkipListSet_rcu_erase + The function searches an item with key equal to \p key in the set, + unlinks it from the set, and returns \p true. + If the item with key equal to \p key is not found the function return \p false. - // Barrier for head node - atomics::atomic_thread_fence( memory_model::memory_order_release ); - } + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. - /// Clears and destructs the skip-list - ~SkipListSet() + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool erase( const Q& key ) { - clear(); + return do_erase( key, key_comparator(), [](value_type const&) {} ); } - public: - /// Iterator type - typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator; - - /// Const iterator type - typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator; - - /// Returns a forward iterator addressing the first element in a set - iterator begin() + /// Delete the item from the set with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase "erase(Q const&)" + but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( const Q& key, Less pred ) { - return iterator( *m_Head.head() ); + CDS_UNUSED( pred ); + return do_erase( key, cds::opt::details::make_comparator_from_less(), [](value_type const&) {} ); } - /// Returns a forward const iterator addressing the first element in a set - const_iterator begin() const - { - return const_iterator( *m_Head.head() ); - } + /// Deletes the item from the set + /** \anchor cds_intrusive_SkipListSet_rcu_erase_func + The function searches an item with key equal to \p key in the set, + call \p f functor with item found, unlinks it from the set, and returns \p true. + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously. - /// Returns a forward const iterator addressing the first element in a set - const_iterator cbegin() const - { - return const_iterator( *m_Head.head() ); - } + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + If the item with key equal to \p key is not found the function return \p false. - /// Returns a forward iterator that addresses the location succeeding the last element in a set. - iterator end() + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool erase( Q const& key, Func f ) { - return iterator(); + return do_erase( key, key_comparator(), f ); } - /// Returns a forward const iterator that addresses the location succeeding the last element in a set. - const_iterator end() const + /// Delete the item from the set with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase_func "erase(Q const&, Func)" + but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) { - return const_iterator(); + CDS_UNUSED( pred ); + return do_erase( key, cds::opt::details::make_comparator_from_less(), f ); } - /// Returns a forward const iterator that addresses the location succeeding the last element in a set. - const_iterator cend() const - { - return const_iterator(); - } + /// Finds \p key + /** @anchor cds_intrusive_SkipListSet_rcu_find_func + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. - public: - /// Inserts new node - /** - The function inserts \p val in the set if it does not contain - an item with key equal to \p val. + The functor can change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. The function applies RCU lock internally. - Returns \p true if \p val is placed into the set, \p false otherwise. + The function returns \p true if \p key is found, \p false otherwise. */ - bool insert( value_type& val ) + template + bool find( Q& key, Func f ) { - return insert( val, []( value_type& ) {} ); + return do_find_with( key, key_comparator(), f ); } + //@cond + template + bool find( Q const& key, Func f ) + { + return do_find_with( key, key_comparator(), f ); + } + //@endcond - /// Inserts new node + /// Finds the key \p key with comparing functor \p pred /** - This function is intended for derived non-intrusive containers. - - The function allows to split creating of new item into two part: - - create item with key only - - insert new item into the set - - if inserting is success, calls \p f functor to initialize value-field of \p val. - - The functor signature is: - \code - void func( value_type& val ); - \endcode - where \p val is the item inserted. User-defined functor \p f should guarantee that during changing - \p val no any other changes could be made on this set's item by concurrent threads. - The user-defined functor is called only if the inserting is success. - - RCU \p synchronize method can be called. RCU should not be locked. + The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_func "find(Q&, Func)" + but \p cmp is used for key comparison. + \p Less functor has the interface like \p std::less. + \p cmp must imply the same element order as the comparator used for building the set. */ - template - bool insert( value_type& val, Func f ) + template + bool find_with( Q& key, Less pred, Func f ) { - check_deadlock_policy::check(); - - position pos; - bool bRet; + CDS_UNUSED( pred ); + return do_find_with( key, cds::opt::details::make_comparator_from_less(), f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return do_find_with( key, cds::opt::details::make_comparator_from_less(), f ); + } + //@endcond - { - node_type * pNode = node_traits::to_node_ptr( val ); - scoped_node_ptr scp( pNode ); - unsigned int nHeight = pNode->height(); - bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr; - bool bTowerMade = false; + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. - rcu_lock rcuLock; + The function applies RCU lock internally. + */ + template + bool contains( Q const& key ) + { + return do_find_with( key, key_comparator(), [](value_type& , Q const& ) {} ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) + { + return contains( key ); + } + //@endcond - while ( true ) - { - bool bFound = find_position( val, pos, key_comparator(), true ); - if ( bFound ) { - // scoped_node_ptr deletes the node tower if we create it - if ( !bTowerMade ) - scp.release(); + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return do_find_with( key, cds::opt::details::make_comparator_from_less(), [](value_type& , Q const& ) {} ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond - m_Stat.onInsertFailed(); - bRet = false; - break; - } + /// Finds \p key and return the item found + /** \anchor cds_intrusive_SkipListSet_rcu_get + The function searches the item with key equal to \p key and returns a \p raw_ptr object pointed to item found. + If \p key is not found it returns empty \p raw_ptr. - if ( !bTowerOk ) { - build_node( pNode ); - nHeight = pNode->height(); - bTowerMade = - bTowerOk = true; - } + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. - if ( !insert_at_position( val, pNode, pos, f )) { - m_Stat.onInsertRetry(); - continue; - } + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; + skip_list theList; + // ... + typename skip_list::raw_ptr pVal; + { + // Lock RCU + skip_list::rcu_lock lock; - increase_height( nHeight ); - ++m_ItemCounter; - m_Stat.onAddNode( nHeight ); - m_Stat.onInsertSuccess(); - scp.release(); - bRet = true; - break; + pVal = theList.get( 5 ); + if ( pVal ) { + // Deal with pVal + //... } } + // You can manually release pVal after RCU-locked section + pVal.release(); + \endcode + */ + template + raw_ptr get( Q const& key ) + { + assert( gc::is_locked()); - return bRet; + position pos; + value_type * pFound; + if ( do_find_with( key, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos )) + return raw_ptr( pFound, raw_ptr_disposer( pos )); + return raw_ptr( raw_ptr_disposer( pos )); } - /// Updates the node + /// Finds \p key and return the item found /** - The operation performs inserting or changing data with lock-free manner. - - If the item \p val is not found in the set, then \p val is inserted into the set - iff \p bInsert is \p true. - Otherwise, the functor \p func is called with item found. - The functor signature is: - \code - void func( bool bNew, value_type& item, value_type& val ); - \endcode - with arguments: - - \p bNew - \p true if the item has been inserted, \p false otherwise - - \p item - item of the set - - \p val - argument \p val passed into the \p %update() function - If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments - refer to the same thing. - - The functor can change non-key fields of the \p item; however, \p func must guarantee - that during changing no any other modifications could be made on this item by concurrent threads. - - RCU \p synchronize method can be called. RCU should not be locked. - - Returns std::pair where \p first is \p true if operation is successfull, - i.e. the node has been inserted or updated, - \p second is \p true if new item has been added or \p false if the item with \p key - already exists. + The function is an analog of \ref cds_intrusive_SkipListSet_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. - @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. */ - template - std::pair update( value_type& val, Func func, bool bInsert = true ) + template + raw_ptr get_with( Q const& key, Less pred ) { - check_deadlock_policy::check(); + CDS_UNUSED( pred ); + assert( gc::is_locked()); + value_type * pFound = nullptr; position pos; - std::pair bRet( true, false ); - + if ( do_find_with( key, cds::opt::details::make_comparator_from_less(), + [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos )) { - node_type * pNode = node_traits::to_node_ptr( val ); - scoped_node_ptr scp( pNode ); - unsigned int nHeight = pNode->height(); - bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr; - bool bTowerMade = false; + return raw_ptr( pFound, raw_ptr_disposer( pos )); + } + return raw_ptr( raw_ptr_disposer( pos )); + } - rcu_lock rcuLock; - while ( true ) - { - bool bFound = find_position( val, pos, key_comparator(), true ); - if ( bFound ) { - // scoped_node_ptr deletes the node tower if we create it before - if ( !bTowerMade ) - scp.release(); - - func( false, *node_traits::to_value_ptr(pos.pCur), val ); - m_Stat.onUpdateExist(); - break; - } + /// Returns item count in the set + /** + The value returned depends on item counter type provided by \p Traits template parameter. + For \p atomicity::empty_item_counter the function always returns 0. + Therefore, the function is not suitable for checking the set emptiness, use \p empty() + member function for this purpose. + */ + size_t size() const + { + return m_ItemCounter; + } - if ( !bInsert ) { - scp.release(); - bRet.first = false; - break; - } + /// Checks if the set is empty + bool empty() const + { + return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr; + } - if ( !bTowerOk ) { - build_node( pNode ); - nHeight = pNode->height(); - bTowerMade = - bTowerOk = true; - } + /// Clears the set (not atomic) + /** + The function unlink all items from the set. + The function is not atomic, thus, in multi-threaded environment with parallel insertions + this sequence + \code + set.clear(); + assert( set.empty()); + \endcode + the assertion could be raised. - if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { func( true, item, item ); })) { - m_Stat.onInsertRetry(); - continue; - } + For each item the \p disposer will be called automatically after unlinking. + */ + void clear() + { + exempt_ptr ep; + while ( (ep = extract_min())); + } - increase_height( nHeight ); - ++m_ItemCounter; - scp.release(); - m_Stat.onAddNode( nHeight ); - m_Stat.onUpdateNew(); - bRet.second = true; - break; - } - } + /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32. + static CDS_CONSTEXPR unsigned int max_height() CDS_NOEXCEPT + { + return c_nMaxHeight; + } - return bRet; + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; } + + protected: //@cond - template - CDS_DEPRECATED("ensure() is deprecated, use update()") - std::pair ensure( value_type& val, Func func ) + + bool is_extracted( marked_node_ptr const p ) const { - return update( val, func, true ); + return ( p.bits() & 2 ) != 0; } - //@endcond - /// Unlinks the item \p val from the set - /** - The function searches the item \p val in the set and unlink it from the set - if it is found and is equal to \p val. + void help_remove( int nLevel, node_type* pPred, marked_node_ptr pCur, marked_node_ptr pSucc, position& pos ) + { + marked_node_ptr p( pCur.ptr() ); + if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ), + memory_model::memory_order_release, atomics::memory_order_relaxed ) ) + { + if ( pCur->level_unlinked()) { + if ( !is_extracted( pSucc ) ) { + // We cannot free the node at this moment because RCU is locked + // Link deleted nodes to a chain to free later + pos.dispose( pCur.ptr() ); + m_Stat.onEraseWhileFind(); + } + else + m_Stat.onExtractWhileFind(); + } + } + } - Difference between \p erase() and \p %unlink() functions: \p erase() finds a key - and deletes the item found. \p %unlink() searches an item by key and deletes it - only if \p val is an item of that set, i.e. the pointer to item found - is equal to &val . + template + bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound ) + { + assert( gc::is_locked() ); - RCU \p synchronize method can be called. RCU should not be locked. + node_type * pPred; + marked_node_ptr pSucc; + marked_node_ptr pCur; + int nCmp = 1; - The \ref disposer specified in \p Traits class template parameter is called - by garbage collector \p GC asynchronously. + retry: + pPred = m_Head.head(); - The function returns \p true if success and \p false otherwise. - */ - bool unlink( value_type& val ) - { - check_deadlock_policy::check(); + for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { - position pos; - bool bRet; + while ( true ) { + pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); + if ( pCur.bits() ) { + // pCur.bits() means that pPred is logically deleted + goto retry; + } - { - rcu_lock l; + if ( pCur.ptr() == nullptr ) { + // end of the list at level nLevel - goto next level + break; + } - if ( !find_position( val, pos, key_comparator(), false ) ) { - m_Stat.onUnlinkFailed(); - bRet = false; - } - else { - node_type * pDel = pos.pCur; - assert( key_comparator()( *node_traits::to_value_ptr( pDel ), val ) == 0 ); + // pSucc contains deletion mark for pCur + pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); - unsigned int nHeight = pDel->height(); + if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() ) + goto retry; - if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos, [](value_type const&) {}, false )) { - --m_ItemCounter; - m_Stat.onRemoveNode( nHeight ); - m_Stat.onUnlinkSuccess(); - bRet = true; + if ( pSucc.bits() ) { + // pCur is marked, i.e. logically deleted. + help_remove( nLevel, pPred, pCur, pSucc, pos ); + goto retry; } else { - m_Stat.onUnlinkFailed(); - bRet = false; + nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val ); + if ( nCmp < 0 ) + pPred = pCur.ptr(); + else if ( nCmp == 0 && bStopIfFound ) + goto found; + else + break; } } + + // Next level + pos.pPrev[nLevel] = pPred; + pos.pSucc[nLevel] = pCur.ptr(); } - return bRet; + if ( nCmp != 0 ) + return false; + + found: + pos.pCur = pCur.ptr(); + return pCur.ptr() && nCmp == 0; } - /// Extracts the item from the set with specified \p key - /** \anchor cds_intrusive_SkipListSet_rcu_extract - The function searches an item with key equal to \p key in the set, - unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. - If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr. + bool find_min_position( position& pos ) + { + assert( gc::is_locked() ); - Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + node_type * pPred; + marked_node_ptr pSucc; + marked_node_ptr pCur; - RCU \p synchronize method can be called. RCU should NOT be locked. - The function does not call the disposer for the item found. - The disposer will be implicitly invoked when the returned object is destroyed or when - its \p release() member function is called. - Example: - \code - typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; - skip_list theList; - // ... + retry: + pPred = m_Head.head(); - typename skip_list::exempt_ptr ep( theList.extract( 5 )); - if ( ep ) { - // Deal with ep - //... + for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { - // Dispose returned item. - ep.release(); - } - \endcode - */ - template - exempt_ptr extract( Q const& key ) - { - return exempt_ptr( do_extract( key )); - } + pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); + // pCur.bits() means that pPred is logically deleted + // head cannot be deleted + assert( pCur.bits() == 0 ); - /// Extracts the item from the set with comparing functor \p pred - /** - The function is an analog of \p extract(Q const&) but \p pred predicate is used for key comparing. - \p Less has the interface like \p std::less. - \p pred must imply the same element order as the comparator used for building the set. - */ - template - exempt_ptr extract_with( Q const& key, Less pred ) - { - return exempt_ptr( do_extract_with( key, pred )); - } + if ( pCur.ptr() ) { - /// Extracts an item with minimal key from the list - /** - The function searches an item with minimal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item. - If the skip-list is empty the function returns an empty \p exempt_ptr. + // pSucc contains deletion mark for pCur + pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); - RCU \p synchronize method can be called. RCU should NOT be locked. - The function does not call the disposer for the item found. - The disposer will be implicitly invoked when the returned object is destroyed or when - its \p release() member function is manually called. - Example: - \code - typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; - skip_list theList; - // ... + if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() ) + goto retry; - typename skip_list::exempt_ptr ep(theList.extract_min()); - if ( ep ) { - // Deal with ep - //... + if ( pSucc.bits() ) { + // pCur is marked, i.e. logically deleted. + help_remove( nLevel, pPred, pCur, pSucc, pos ); + goto retry; + } + } - // Dispose returned item. - ep.release(); + // Next level + pos.pPrev[nLevel] = pPred; + pos.pSucc[nLevel] = pCur.ptr(); } - \endcode + return ( pos.pCur = pCur.ptr() ) != nullptr; + } - @note Due the concurrent nature of the list, the function extracts nearly minimum key. - It means that the function gets leftmost item and tries to unlink it. - During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. - So, the function returns the item with minimum key at the moment of list traversing. - */ - exempt_ptr extract_min() + bool find_max_position( position& pos ) { - return exempt_ptr( do_extract_min()); - } + assert( gc::is_locked() ); - /// Extracts an item with maximal key from the list - /** - The function searches an item with maximal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item. - If the skip-list is empty the function returns an empty \p exempt_ptr. + node_type * pPred; + marked_node_ptr pSucc; + marked_node_ptr pCur; - RCU \p synchronize method can be called. RCU should NOT be locked. - The function does not call the disposer for the item found. - The disposer will be implicitly invoked when the returned object is destroyed or when - its \p release() member function is manually called. - Example: - \code - typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; - skip_list theList; - // ... + retry: + pPred = m_Head.head(); - typename skip_list::exempt_ptr ep( theList.extract_max() ); - if ( ep ) { - // Deal with ep - //... - // Dispose returned item. - ep.release(); + for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { + + while ( true ) { + pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); + if ( pCur.bits() ) { + // pCur.bits() means that pPred is logically deleted + goto retry; + } + + if ( pCur.ptr() == nullptr ) { + // end of the list at level nLevel - goto next level + break; + } + + // pSucc contains deletion mark for pCur + pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); + + if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() ) + goto retry; + + if ( pSucc.bits() ) { + // pCur is marked, i.e. logically deleted. + help_remove( nLevel, pPred, pCur, pSucc, pos ); + goto retry; + } + else { + if ( !pSucc.ptr() ) + break; + + pPred = pCur.ptr(); + } + } + + // Next level + pos.pPrev[nLevel] = pPred; + pos.pSucc[nLevel] = pCur.ptr(); } - \endcode - @note Due the concurrent nature of the list, the function extracts nearly maximal key. - It means that the function gets rightmost item and tries to unlink it. - During unlinking, a concurrent thread can insert an item with key greater than rightmost item's key. - So, the function returns the item with maximum key at the moment of list traversing. - */ - exempt_ptr extract_max() - { - return exempt_ptr( do_extract_max()); + return ( pos.pCur = pCur.ptr() ) != nullptr; } - /// Deletes the item from the set - /** \anchor cds_intrusive_SkipListSet_rcu_erase - The function searches an item with key equal to \p key in the set, - unlinks it from the set, and returns \p true. - If the item with key equal to \p key is not found the function return \p false. + template + bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f ) + { + assert( gc::is_locked() ); - Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + unsigned int const nHeight = pNode->height(); + pNode->clear_tower(); - RCU \p synchronize method can be called. RCU should not be locked. - */ - template - bool erase( const Q& key ) - { - return do_erase( key, key_comparator(), [](value_type const&) {} ); + // Insert at level 0 + { + marked_node_ptr p( pos.pSucc[0] ); + pNode->next( 0 ).store( p, memory_model::memory_order_relaxed ); + if ( !pos.pPrev[0]->next( 0 ).compare_exchange_strong( p, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ) ) { + return false; + } + + f( val ); + } + + // Insert at level 1..max + for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) { + marked_node_ptr p; + while ( true ) { + marked_node_ptr pSucc( pos.pSucc[nLevel] ); + + // Set pNode->next + // pNode->next must be null but can have a "logical deleted" flag if another thread is removing pNode right now + if ( !pNode->next( nLevel ).compare_exchange_strong( p, pSucc, + memory_model::memory_order_acq_rel, atomics::memory_order_acquire )) + { + // pNode has been marked as removed while we are inserting it + // Stop inserting + assert( p.bits() != 0 ); + + if ( pNode->level_unlinked( nHeight - nLevel ) && p.bits() == 1 ) { + pos.dispose( pNode ); + m_Stat.onEraseWhileInsert(); + } + else + m_Stat.onLogicDeleteWhileInsert(); + + return true; + } + p = pSucc; + + // Link pNode into the list at nLevel + if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( pSucc, marked_node_ptr( pNode ), + memory_model::memory_order_release, atomics::memory_order_relaxed )) + { + // go to next level + break; + } + + // Renew insert position + m_Stat.onRenewInsertPosition(); + if ( !find_position( val, pos, key_comparator(), false ) ) { + // The node has been deleted while we are inserting it + m_Stat.onNotFoundWhileInsert(); + return true; + } + } + } + return true; } - /// Delete the item from the set with comparing functor \p pred - /** - The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase "erase(Q const&)" - but \p pred predicate is used for key comparing. - \p Less has the interface like \p std::less. - \p pred must imply the same element order as the comparator used for building the set. - */ - template - bool erase_with( const Q& key, Less pred ) + template + bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract ) { - CDS_UNUSED( pred ); - return do_erase( key, cds::opt::details::make_comparator_from_less(), [](value_type const&) {} ); - } + assert( pDel != nullptr ); + assert( gc::is_locked() ); - /// Deletes the item from the set - /** \anchor cds_intrusive_SkipListSet_rcu_erase_func - The function searches an item with key equal to \p key in the set, - call \p f functor with item found, unlinks it from the set, and returns \p true. - The \ref disposer specified in \p Traits class template parameter is called - by garbage collector \p GC asynchronously. + marked_node_ptr pSucc; + back_off bkoff; - The \p Func interface is - \code - struct functor { - void operator()( value_type const& item ); - }; - \endcode - If the item with key equal to \p key is not found the function return \p false. + unsigned const nMask = bExtract ? 3u : 1u; - Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + // logical deletion (marking) + for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) { + pSucc = pDel->next( nLevel ).load( memory_model::memory_order_relaxed ); + if ( pSucc.bits() == 0 ) { + bkoff.reset(); + while ( !pDel->next( nLevel ).compare_exchange_weak( pSucc, pSucc | nMask, + memory_model::memory_order_release, atomics::memory_order_acquire )) + { + if ( pSucc.bits() == 0 ) { + bkoff(); + m_Stat.onMarkFailed(); + } + else if ( pSucc.bits() != nMask ) + return false; + } + } + } - RCU \p synchronize method can be called. RCU should not be locked. - */ - template - bool erase( Q const& key, Func f ) - { - return do_erase( key, key_comparator(), f ); + marked_node_ptr p( pDel->next( 0 ).load( memory_model::memory_order_relaxed ).ptr() ); + while ( true ) { + if ( pDel->next( 0 ).compare_exchange_strong( p, p | nMask, memory_model::memory_order_release, atomics::memory_order_acquire )) + { + f( *node_traits::to_value_ptr( pDel ) ); + + // physical deletion + // try fast erase + p = pDel; + unsigned nCount = 0; + for ( int nLevel = static_cast( pDel->height() - 1 ); nLevel >= 0; --nLevel ) { + + pSucc = pDel->next( nLevel ).load( memory_model::memory_order_relaxed ); + if ( !pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()), + memory_model::memory_order_acq_rel, atomics::memory_order_acquire ) ) + { + // Do slow erase + if ( nCount ) { + if ( pDel->level_unlinked( nCount )) { + if ( p.bits() == 1 ) { + pos.dispose( pDel ); + m_Stat.onFastEraseHelped(); + } + else + m_Stat.onFastExtractHelped(); + return true; + } + } + + // Make slow erase + find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false ); + if ( bExtract ) + m_Stat.onSlowExtract(); + else + m_Stat.onSlowErase(); + + return true; + } + ++nCount; + } + + if ( !bExtract ) { + // We cannot free the node at this moment since RCU is locked + // Link deleted nodes to a chain to free later + pos.dispose( pDel ); + m_Stat.onFastErase(); + } + else + m_Stat.onFastExtract(); + return true; + } + else if ( p.bits() ) { + // Another thread is deleting pDel right now + return false; + } + + m_Stat.onEraseRetry(); + bkoff(); + } } - /// Delete the item from the set with comparing functor \p pred - /** - The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase_func "erase(Q const&, Func)" - but \p pred predicate is used for key comparing. - \p Less has the interface like \p std::less. - \p pred must imply the same element order as the comparator used for building the set. - */ - template - bool erase_with( Q const& key, Less pred, Func f ) + enum finsd_fastpath_result { + find_fastpath_found, + find_fastpath_not_found, + find_fastpath_abort + }; + template + finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f ) const { - CDS_UNUSED( pred ); - return do_erase( key, cds::opt::details::make_comparator_from_less(), f ); - } + node_type * pPred; + marked_node_ptr pCur; + marked_node_ptr pSucc; + marked_node_ptr pNull; - /// Finds \p key - /** @anchor cds_intrusive_SkipListSet_rcu_find_func - The function searches the item with key equal to \p key and calls the functor \p f for item found. - The interface of \p Func functor is: - \code - struct functor { - void operator()( value_type& item, Q& key ); - }; - \endcode - where \p item is the item found, \p key is the find function argument. + back_off bkoff; + unsigned attempt = 0; - The functor can change non-key fields of \p item. Note that the functor is only guarantee - that \p item cannot be disposed during functor is executing. - The functor does not serialize simultaneous access to the set \p item. If such access is - possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + try_again: + pPred = m_Head.head(); + for ( int nLevel = static_cast( m_nHeight.load( memory_model::memory_order_relaxed ) - 1 ); nLevel >= 0; --nLevel ) { + pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); + if ( pCur == pNull ) + continue; - The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor - can modify both arguments. + while ( pCur != pNull ) { + if ( pCur.bits() ) { + // pPrev is being removed + if ( ++attempt < 4 ) { + bkoff(); + goto try_again; + } - The function applies RCU lock internally. + return find_fastpath_abort; + } - The function returns \p true if \p key is found, \p false otherwise. - */ - template - bool find( Q& key, Func f ) - { - return do_find_with( key, key_comparator(), f ); + if ( pCur.ptr() ) { + int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val ); + if ( nCmp < 0 ) { + pPred = pCur.ptr(); + pCur = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); + } + else if ( nCmp == 0 ) { + // found + f( *node_traits::to_value_ptr( pCur.ptr() ), val ); + return find_fastpath_found; + } + else // pCur > val - go down + break; + } + } + } + + return find_fastpath_not_found; } - //@cond - template - bool find( Q const& key, Func f ) + + template + bool find_slowpath( Q& val, Compare cmp, Func f, position& pos ) { - return do_find_with( key, key_comparator(), f ); + if ( find_position( val, pos, cmp, true ) ) { + assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 ); + + f( *node_traits::to_value_ptr( pos.pCur ), val ); + return true; + } + else + return false; } - //@endcond - /// Finds the key \p key with comparing functor \p pred - /** - The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_func "find(Q&, Func)" - but \p cmp is used for key comparison. - \p Less functor has the interface like \p std::less. - \p cmp must imply the same element order as the comparator used for building the set. - */ - template - bool find_with( Q& key, Less pred, Func f ) + template + bool do_find_with( Q& val, Compare cmp, Func f ) { - CDS_UNUSED( pred ); - return do_find_with( key, cds::opt::details::make_comparator_from_less(), f ); + position pos; + return do_find_with( val, cmp, f, pos ); } - //@cond - template - bool find_with( Q const& key, Less pred, Func f ) + + template + bool do_find_with( Q& val, Compare cmp, Func f, position& pos ) { - CDS_UNUSED( pred ); - return do_find_with( key, cds::opt::details::make_comparator_from_less(), f ); + bool bRet; + + { + rcu_lock l; + + switch ( find_fastpath( val, cmp, f ) ) { + case find_fastpath_found: + m_Stat.onFindFastSuccess(); + return true; + case find_fastpath_not_found: + m_Stat.onFindFastFailed(); + return false; + default: + break; + } + + if ( find_slowpath( val, cmp, f, pos ) ) { + m_Stat.onFindSlowSuccess(); + bRet = true; + } + else { + m_Stat.onFindSlowFailed(); + bRet = false; + } + } + return bRet; } - //@endcond - - /// Checks whether the set contains \p key - /** - The function searches the item with key equal to \p key - and returns \p true if it is found, and \p false otherwise. - The function applies RCU lock internally. - */ - template - bool contains( Q const& key ) - { - return do_find_with( key, key_comparator(), [](value_type& , Q const& ) {} ); - } - //@cond - template - CDS_DEPRECATED("deprecated, use contains()") - bool find( Q const& key ) + template + bool do_erase( Q const& val, Compare cmp, Func f ) { - return contains( key ); - } - //@endcond + check_deadlock_policy::check(); - /// Checks whether the set contains \p key using \p pred predicate for searching - /** - The function is similar to contains( key ) but \p pred is used for key comparing. - \p Less functor has the interface like \p std::less. - \p Less must imply the same element order as the comparator used for building the set. - */ - template - bool contains( Q const& key, Less pred ) - { - CDS_UNUSED( pred ); - return do_find_with( key, cds::opt::details::make_comparator_from_less(), [](value_type& , Q const& ) {} ); + position pos; + bool bRet; + + { + rcu_lock rcuLock; + + if ( !find_position( val, pos, cmp, false ) ) { + m_Stat.onEraseFailed(); + bRet = false; + } + else { + node_type * pDel = pos.pCur; + assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 ); + + unsigned int nHeight = pDel->height(); + if ( try_remove_at( pDel, pos, f, false ) ) { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onEraseSuccess(); + bRet = true; + } + else { + m_Stat.onEraseFailed(); + bRet = false; + } + } + } + + return bRet; } - //@cond - template - CDS_DEPRECATED("deprecated, use contains()") - bool find_with( Q const& key, Less pred ) + + template + value_type * do_extract_key( Q const& key, Compare cmp, position& pos ) { - return contains( key, pred ); - } - //@endcond + // RCU should be locked!!! + assert( gc::is_locked() ); - /// Finds \p key and return the item found - /** \anchor cds_intrusive_SkipListSet_rcu_get - The function searches the item with key equal to \p key and returns a \p raw_ptr object pointed to item found. - If \p key is not found it returns empty \p raw_ptr. + node_type * pDel; - Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + if ( !find_position( key, pos, cmp, false ) ) { + m_Stat.onExtractFailed(); + pDel = nullptr; + } + else { + pDel = pos.pCur; + assert( cmp( *node_traits::to_value_ptr( pDel ), key ) == 0 ); - RCU should be locked before call of this function. - Returned item is valid only while RCU is locked: - \code - typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; - skip_list theList; - // ... - typename skip_list::raw_ptr pVal; - { - // Lock RCU - skip_list::rcu_lock lock; + unsigned int const nHeight = pDel->height(); - pVal = theList.get( 5 ); - if ( pVal ) { - // Deal with pVal - //... + if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onExtractSuccess(); + } + else { + m_Stat.onExtractFailed(); + pDel = nullptr; } } - // You can manually release pVal after RCU-locked section - pVal.release(); - \endcode - */ + + return pDel ? node_traits::to_value_ptr( pDel ) : nullptr; + } + template - raw_ptr get( Q const& key ) + value_type * do_extract( Q const& key ) { - assert( gc::is_locked()); - + check_deadlock_policy::check(); + value_type * pDel = nullptr; position pos; - value_type * pFound; - if ( do_find_with( key, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos )) - return raw_ptr( pFound, raw_ptr_disposer( pos )); - return raw_ptr( raw_ptr_disposer( pos )); - } + { + rcu_lock l; + pDel = do_extract_key( key, key_comparator(), pos ); + } - /// Finds \p key and return the item found - /** - The function is an analog of \ref cds_intrusive_SkipListSet_rcu_get "get(Q const&)" - but \p pred is used for comparing the keys. + return pDel; + } - \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q - in any order. - \p pred must imply the same element order as the comparator used for building the set. - */ template - raw_ptr get_with( Q const& key, Less pred ) + value_type * do_extract_with( Q const& key, Less pred ) { CDS_UNUSED( pred ); - assert( gc::is_locked()); - - value_type * pFound = nullptr; + check_deadlock_policy::check(); + value_type * pDel = nullptr; position pos; - if ( do_find_with( key, cds::opt::details::make_comparator_from_less(), - [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos )) { - return raw_ptr( pFound, raw_ptr_disposer( pos )); + rcu_lock l; + pDel = do_extract_key( key, cds::opt::details::make_comparator_from_less(), pos ); } - return raw_ptr( raw_ptr_disposer( pos )); - } - /// Returns item count in the set - /** - The value returned depends on item counter type provided by \p Traits template parameter. - For \p atomicity::empty_item_counter the function always returns 0. - Therefore, the function is not suitable for checking the set emptiness, use \p empty() - member function for this purpose. - */ - size_t size() const - { - return m_ItemCounter; + return pDel; } - /// Checks if the set is empty - bool empty() const + value_type * do_extract_min() { - return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr; - } + assert( !gc::is_locked() ); - /// Clears the set (not atomic) - /** - The function unlink all items from the set. - The function is not atomic, thus, in multi-threaded environment with parallel insertions - this sequence - \code - set.clear(); - assert( set.empty() ); - \endcode - the assertion could be raised. + position pos; + node_type * pDel; - For each item the \p disposer will be called automatically after unlinking. - */ - void clear() - { - exempt_ptr ep; - while ( (ep = extract_min()) ); + { + rcu_lock l; + + if ( !find_min_position( pos ) ) { + m_Stat.onExtractMinFailed(); + pDel = nullptr; + } + else { + pDel = pos.pCur; + unsigned int const nHeight = pDel->height(); + + if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onExtractMinSuccess(); + } + else { + m_Stat.onExtractMinFailed(); + pDel = nullptr; + } + } + } + + return pDel ? node_traits::to_value_ptr( pDel ) : nullptr; } - /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32. - static CDS_CONSTEXPR unsigned int max_height() CDS_NOEXCEPT + value_type * do_extract_max() { - return c_nMaxHeight; + assert( !gc::is_locked() ); + + position pos; + node_type * pDel; + + { + rcu_lock l; + + if ( !find_max_position( pos ) ) { + m_Stat.onExtractMaxFailed(); + pDel = nullptr; + } + else { + pDel = pos.pCur; + unsigned int const nHeight = pDel->height(); + + if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onExtractMaxSuccess(); + } + else { + m_Stat.onExtractMaxFailed(); + pDel = nullptr; + } + } + } + + return pDel ? node_traits::to_value_ptr( pDel ) : nullptr; } - /// Returns const reference to internal statistics - stat const& statistics() const + void increase_height( unsigned int nHeight ) { - return m_Stat; + unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed ); + if ( nCur < nHeight ) + m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed ); } + //@endcond }; }} // namespace cds::intrusive