SkipList: fixed memory leaks
authorkhizmax <libcds.dev@gmail.com>
Fri, 30 Dec 2016 21:10:38 +0000 (00:10 +0300)
committerkhizmax <libcds.dev@gmail.com>
Fri, 30 Dec 2016 21:10:38 +0000 (00:10 +0300)
cds/intrusive/details/skip_list_base.h
cds/intrusive/impl/skip_list.h
cds/intrusive/skip_list_rcu.h
test/include/cds_test/stat_skiplist_out.h

index 97d30e3..d304e5e 100644 (file)
@@ -70,7 +70,7 @@ namespace cds { namespace intrusive {
             atomic_marked_ptr           m_pNext;     ///< Next item in bottom-list (list at level 0)
             unsigned int                m_nHeight;   ///< Node height (size of \p m_arrNext array). For node at level 0 the height is 1.
             atomic_marked_ptr *         m_arrNext;   ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr
-            atomics::atomic<unsigned int> m_nUnlink; ///< How many levels has been unlinked
+            atomics::atomic<unsigned int> m_nUnlink; ///< Unlink helper
             //@endcond
 
         public:
@@ -78,7 +78,7 @@ namespace cds { namespace intrusive {
                 : m_pNext( nullptr )
                 , m_nHeight( 1 )
                 , m_arrNext( nullptr )
-                , m_nUnlink( 0 )
+                , m_nUnlink( 1 )
             {}
 
 
@@ -92,7 +92,7 @@ namespace cds { namespace intrusive {
 
                 m_arrNext = nextTower;
                 m_nHeight = nHeight;
-                atomics::atomic_thread_fence( atomics::memory_order_release );
+                m_nUnlink.store( nHeight, atomics::memory_order_relaxed );
             }
 
             //@cond
@@ -168,7 +168,12 @@ namespace cds { namespace intrusive {
 
             bool level_unlinked( unsigned nCount = 1 )
             {
-                return m_nUnlink.fetch_add( nCount, std::memory_order_relaxed ) + 1 == height();
+                return m_nUnlink.fetch_sub( nCount, atomics::memory_order_relaxed ) == 1;
+            }
+
+            bool is_upper_level( unsigned nLevel ) const
+            {
+                return m_nUnlink.load( atomics::memory_order_relaxed ) == nLevel + 1;
             }
             //@endcond
         };
@@ -403,12 +408,9 @@ namespace cds { namespace intrusive {
             event_counter   m_nFindSlowFailed       ; ///< Count of failed call of \p find and all derivatives (via slow-path)
             event_counter   m_nRenewInsertPosition  ; ///< Count of renewing position events while inserting
             event_counter   m_nLogicDeleteWhileInsert; ///< Count of events "The node has been logically deleted while inserting"
-            event_counter   m_nEraseWhileInsert     ; ///< Count of events "The node has been disposed while inserting"
             event_counter   m_nNotFoundWhileInsert  ; ///< Count of events "Inserting node is not found"
             event_counter   m_nFastErase            ; ///< Fast erase event counter
-            event_counter   m_nFastEraseHelped      ; ///< Fast erase with helping of other thread
             event_counter   m_nFastExtract          ; ///< Fast extract event counter
-            event_counter   m_nFastExtractHelped    ; ///< Fast extract with helping of other thread
             event_counter   m_nSlowErase            ; ///< Slow erase event counter
             event_counter   m_nSlowExtract          ; ///< Slow extract event counter
             event_counter   m_nExtractSuccess       ; ///< Count of successful call of \p extract
@@ -455,12 +457,9 @@ namespace cds { namespace intrusive {
             void onExtractWhileFind()       { ++m_nExtractWhileFind ; }
             void onRenewInsertPosition()    { ++m_nRenewInsertPosition; }
             void onLogicDeleteWhileInsert() { ++m_nLogicDeleteWhileInsert; }
-            void onEraseWhileInsert()       { ++m_nEraseWhileInsert;  }
             void onNotFoundWhileInsert()    { ++m_nNotFoundWhileInsert; }
             void onFastErase()              { ++m_nFastErase;         }
-            void onFastEraseHelped()        { ++m_nFastEraseHelped;   }
             void onFastExtract()            { ++m_nFastExtract;       }
-            void onFastExtractHelped()      { ++m_nFastExtractHelped; }
             void onSlowErase()              { ++m_nSlowErase;         }
             void onSlowExtract()            { ++m_nSlowExtract;       }
             void onExtractSuccess()         { ++m_nExtractSuccess;    }
@@ -500,12 +499,9 @@ namespace cds { namespace intrusive {
             void onExtractWhileFind()       const {}
             void onRenewInsertPosition()    const {}
             void onLogicDeleteWhileInsert() const {}
-            void onEraseWhileInsert()       const {}
             void onNotFoundWhileInsert()    const {}
             void onFastErase()              const {}
-            void onFastEraseHelped()        const {}
             void onFastExtract()            const {}
-            void onFastExtractHelped()      const {}
             void onSlowErase()              const {}
             void onSlowExtract()            const {}
             void onExtractSuccess()         const {}
index c3b38f0..df92274 100644 (file)
@@ -394,7 +394,8 @@ namespace cds { namespace intrusive {
         // c_nMaxHeight * 2 - pPred/pSucc guards
         // + 1 - for erase, unlink
         // + 1 - for clear
-        static size_t const c_nHazardPtrCount = c_nMaxHeight * 2 + 2; ///< Count of hazard pointer required for the skip-list
+        // + 1 - for help_remove()
+        static size_t const c_nHazardPtrCount = c_nMaxHeight * 2 + 3; ///< Count of hazard pointer required for the skip-list
 
     protected:
         typedef typename node_type::atomic_marked_ptr   atomic_node_ptr;   ///< Atomic marked node pointer
@@ -1150,12 +1151,17 @@ namespace cds { namespace intrusive {
         void help_remove( int nLevel, node_type* pPred, marked_node_ptr pCur, marked_node_ptr pSucc )
         {
             marked_node_ptr p( pCur.ptr() );
-            if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()),
-                memory_model::memory_order_acquire, atomics::memory_order_relaxed ) )
-            {
-                if ( pCur->level_unlinked() ) {
-                    gc::retire( node_traits::to_value_ptr( pCur.ptr() ), dispose_node );
-                    m_Stat.onEraseWhileFind();
+
+            if ( pCur->is_upper_level( nLevel )) {
+                typename gc::Guard hp;
+                if ( hp.protect( pCur->next( nLevel ), gc_protect ) == pSucc &&
+                     pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
+                        memory_model::memory_order_acquire, atomics::memory_order_relaxed ) )
+                {
+                    if ( pCur->level_unlinked() ) {
+                        gc::retire( node_traits::to_value_ptr( pCur.ptr() ), dispose_node );
+                        m_Stat.onEraseWhileFind();
+                    }
                 }
             }
         }
@@ -1355,18 +1361,14 @@ namespace cds { namespace intrusive {
                     // Set pNode->next
                     // pNode->next must be null but can have a "logical deleted" flag if another thread is removing pNode right now
                     if ( !pNode->next( nLevel ).compare_exchange_strong( p, pSucc,
-                        memory_model::memory_order_acq_rel, atomics::memory_order_acquire )
+                        memory_model::memory_order_acq_rel, atomics::memory_order_acquire ) )
                     {
                         // pNode has been marked as removed while we are inserting it
                         // Stop inserting
                         assert( p.bits() != 0 );
-
-                        if ( pNode->level_unlinked( nHeight - nLevel )) {
-                            gc::retire( node_traits::to_value_ptr( pNode ), dispose_node );
-                            m_Stat.onEraseWhileInsert();
-                        }
-                        else
-                            m_Stat.onLogicDeleteWhileInsert();
+                        if ( pNode->level_unlinked( nHeight - nLevel ))
+                            gc::retire( &val, dispose_node );
+                        m_Stat.onLogicDeleteWhileInsert();
                         return true;
                     }
                     p = pSucc;
@@ -1423,29 +1425,21 @@ namespace cds { namespace intrusive {
                     // Physical deletion
                     // try fast erase
                     p = pDel;
-                    unsigned nCount = 0;
 
                     for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
 
                         pSucc = pDel->next( nLevel ).load( memory_model::memory_order_relaxed );
-                        if ( !pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()),
-                            memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
+                        if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
+                            memory_model::memory_order_acquire, atomics::memory_order_relaxed ) )
                         {
-                            // Maybe, another threads already helped us to delete the node?..
-                            if ( nCount ) {
-                                if ( pDel->level_unlinked( nCount )) {
-                                    gc::retire( node_traits::to_value_ptr( pDel ), dispose_node );
-                                    m_Stat.onFastEraseHelped();
-                                    return true;
-                                }
-                            }
-
+                            pDel->level_unlinked();
+                        }
+                        else {
                             // Make slow erase
                             find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false );
                             m_Stat.onSlowErase();
                             return true;
                         }
-                        ++nCount;
                     }
 
                     // Fast erasing success
@@ -1486,8 +1480,6 @@ namespace cds { namespace intrusive {
             pPred = m_Head.head();
             for ( int nLevel = static_cast<int>( m_nHeight.load( memory_model::memory_order_relaxed ) - 1 ); nLevel >= 0; --nLevel ) {
                 pCur = guards.protect( 1, pPred->next( nLevel ), gc_protect );
-                if ( pCur == pNull )
-                    continue;
 
                 while ( pCur != pNull ) {
                     if ( pCur.bits() ) {
index 24a12ce..0339461 100644 (file)
@@ -67,7 +67,7 @@ namespace cds { namespace intrusive {
         protected:
             unsigned int            m_nHeight;   ///< Node height (size of m_arrNext array). For node at level 0 the height is 1.
             atomic_marked_ptr *     m_arrNext;   ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr
-            atomics::atomic<unsigned> m_nUnlink; ///< How many levels has been unlinked
+            atomics::atomic<unsigned> m_nUnlink; ///< Unlink helper
 
         public:
             /// Constructs a node of height 1 (a bottom-list node)
@@ -76,7 +76,7 @@ namespace cds { namespace intrusive {
                 , m_pDelChain( nullptr )
                 , m_nHeight(1)
                 , m_arrNext( nullptr )
-                , m_nUnlink(0)
+                , m_nUnlink(1)
             {}
 
             /// Constructs a node of height \p nHeight
@@ -89,6 +89,7 @@ namespace cds { namespace intrusive {
 
                 m_arrNext = nextTower;
                 m_nHeight = nHeight;
+                m_nUnlink.store( nHeight, atomics::memory_order_release );
             }
 
             atomic_marked_ptr * release_tower()
@@ -179,7 +180,12 @@ namespace cds { namespace intrusive {
 
             bool level_unlinked( unsigned nCount = 1 )
             {
-                return m_nUnlink.fetch_add( nCount, std::memory_order_relaxed ) + 1 == height();
+                return m_nUnlink.fetch_sub( nCount, std::memory_order_relaxed ) == 1;
+            }
+
+            bool is_upper_level( unsigned nLevel ) const
+            {
+                return m_nUnlink.load( atomics::memory_order_relaxed ) == nLevel + 1;
             }
         };
     } // namespace skip_list
@@ -1403,8 +1409,10 @@ namespace cds { namespace intrusive {
         void help_remove( int nLevel, node_type* pPred, marked_node_ptr pCur, marked_node_ptr pSucc, position& pos )
         {
             marked_node_ptr p( pCur.ptr() );
-            if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
-                memory_model::memory_order_release, atomics::memory_order_relaxed ) )
+
+            if ( pCur->is_upper_level( nLevel )
+                && pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()),
+                    memory_model::memory_order_release, atomics::memory_order_relaxed ))
             {
                 if ( pCur->level_unlinked()) {
                     if ( !is_extracted( pSucc ) ) {
@@ -1585,9 +1593,8 @@ namespace cds { namespace intrusive {
             {
                 marked_node_ptr p( pos.pSucc[0] );
                 pNode->next( 0 ).store( p, memory_model::memory_order_relaxed );
-                if ( !pos.pPrev[0]->next( 0 ).compare_exchange_strong( p, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ) ) {
+                if ( !pos.pPrev[0]->next( 0 ).compare_exchange_strong( p, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ))
                     return false;
-                }
 
                 f( val );
             }
@@ -1601,19 +1608,14 @@ namespace cds { namespace intrusive {
                     // Set pNode->next
                     // pNode->next must be null but can have a "logical deleted" flag if another thread is removing pNode right now
                     if ( !pNode->next( nLevel ).compare_exchange_strong( p, pSucc,
-                        memory_model::memory_order_acq_rel, atomics::memory_order_acquire ))
+                        memory_model::memory_order_acq_rel, atomics::memory_order_acquire ) )
                     {
                         // pNode has been marked as removed while we are inserting it
                         // Stop inserting
                         assert( p.bits() != 0 );
-
-                        if ( pNode->level_unlinked( nHeight - nLevel ) && p.bits() == 1 ) {
+                        if ( pNode->level_unlinked( nHeight - nLevel ))
                             pos.dispose( pNode );
-                            m_Stat.onEraseWhileInsert();
-                        }
-                        else
-                            m_Stat.onLogicDeleteWhileInsert();
-
+                        m_Stat.onLogicDeleteWhileInsert();
                         return true;
                     }
                     p = pSucc;
@@ -1676,26 +1678,15 @@ namespace cds { namespace intrusive {
                     // physical deletion
                     // try fast erase
                     p = pDel;
-                    unsigned nCount = 0;
                     for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
 
                         pSucc = pDel->next( nLevel ).load( memory_model::memory_order_relaxed );
-                        if ( !pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()),
+                        if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
                             memory_model::memory_order_acq_rel, atomics::memory_order_acquire ) )
                         {
-                            // Do slow erase
-                            if ( nCount ) {
-                                if ( pDel->level_unlinked( nCount )) {
-                                    if ( p.bits() == 1 ) {
-                                        pos.dispose( pDel );
-                                        m_Stat.onFastEraseHelped();
-                                    }
-                                    else
-                                        m_Stat.onFastExtractHelped();
-                                    return true;
-                                }
-                            }
-
+                            pDel->level_unlinked();
+                        }
+                        else {
                             // Make slow erase
                             find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false );
                             if ( bExtract )
@@ -1705,9 +1696,9 @@ namespace cds { namespace intrusive {
 
                             return true;
                         }
-                        ++nCount;
                     }
 
+                    // Fast erasing success
                     if ( !bExtract ) {
                         // We cannot free the node at this moment since RCU is locked
                         // Link deleted nodes to a chain to free later
@@ -1749,12 +1740,10 @@ namespace cds { namespace intrusive {
             pPred = m_Head.head();
             for ( int nLevel = static_cast<int>( m_nHeight.load( memory_model::memory_order_relaxed ) - 1 ); nLevel >= 0; --nLevel ) {
                 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
-                if ( pCur == pNull )
-                    continue;
 
                 while ( pCur != pNull ) {
                     if ( pCur.bits() ) {
-                        // pPrev is being removed
+                        // pPred is being removed
                         if ( ++attempt < 4 ) {
                             bkoff();
                             goto try_again;
index d263130..6cfb1d6 100644 (file)
@@ -75,13 +75,10 @@ namespace cds_test {
             << CDSSTRESS_STAT_OUT( s, m_nFindSlowFailed )
             << CDSSTRESS_STAT_OUT( s, m_nRenewInsertPosition )
             << CDSSTRESS_STAT_OUT( s, m_nLogicDeleteWhileInsert )
-            << CDSSTRESS_STAT_OUT( s, m_nEraseWhileInsert )
             << CDSSTRESS_STAT_OUT( s, m_nNotFoundWhileInsert )
             << CDSSTRESS_STAT_OUT( s, m_nFastErase )
-            << CDSSTRESS_STAT_OUT( s, m_nFastEraseHelped )
             << CDSSTRESS_STAT_OUT( s, m_nSlowErase )
             << CDSSTRESS_STAT_OUT( s, m_nFastExtract )
-            << CDSSTRESS_STAT_OUT( s, m_nFastExtractHelped )
             << CDSSTRESS_STAT_OUT( s, m_nSlowExtract )
             << CDSSTRESS_STAT_OUT( s, m_nEraseWhileFind )
             << CDSSTRESS_STAT_OUT( s, m_nExtractWhileFind )