Docfix
[libcds.git] / cds / intrusive / impl / iterable_list.h
index 948ebc7379367e142aa39b4f8b01ec5fddc9f46a..33a99054c6b11d06ccc42619fd498e60fdd5fe97 100644 (file)
@@ -1,7 +1,7 @@
 /*
     This file is a part of libcds - Concurrent Data Structures library
 
-    (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+    (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
 
     Source code repo: http://github.com/khizmax/libcds/
     Download: http://sourceforge.net/projects/libcds/files/
@@ -108,7 +108,6 @@ namespace cds { namespace intrusive {
         You should select GC you want and include appropriate .h-file:
         - for \p gc::HP: <tt> <cds/intrusive/iterable_list_hp.h> </tt>
         - for \p gc::DHP: <tt> <cds/intrusive/iterable_list_dhp.h> </tt>
-        - for \ref cds_urcu_gc "RCU type" - see \ref cds_intrusive_IterableList_rcu "RCU-based IterableList"
     */
     template <
         class GC
@@ -147,7 +146,7 @@ namespace cds { namespace intrusive {
 
         typedef typename gc::template guarded_ptr< value_type > guarded_ptr; ///< Guarded pointer
 
-        static CDS_CONSTEXPR const size_t c_nHazardPtrCount = 3; ///< Count of hazard pointer required for the algorithm
+        static CDS_CONSTEXPR const size_t c_nHazardPtrCount = 4; ///< Count of hazard pointer required for the algorithm
 
         //@cond
         // Rebind traits (split-list support)
@@ -333,7 +332,7 @@ namespace cds { namespace intrusive {
             this code
             \code
                 if ( it1 == it2 )
-                    assert( &(*it1) == &(*it2) );
+                    assert( &(*it1) == &(*it2));
             \endcode
             can throw assertion. The point is that the iterator stores the value of element which can be modified later by other thread.
             The guard inside the iterator prevents recycling that value so the iterator's value remains valid even after such changing.
@@ -847,12 +846,12 @@ namespace cds { namespace intrusive {
             insert_position pos;
 
             while ( true ) {
-                if ( inserting_search( pHead, *pNode->data.load(memory_model::memory_order_relaxed).ptr(), pos, key_comparator() ) ) {
+                if ( inserting_search( pHead, *pNode->data.load(memory_model::memory_order_relaxed).ptr(), pos, key_comparator())) {
                     m_Stat.onInsertFailed();
                     return false;
                 }
 
-                if ( link_aux_node( pNode, pos ) ) {
+                if ( link_aux_node( pNode, pos, pHead )) {
                     ++m_ItemCounter;
                     m_Stat.onInsertSuccess();
                     return true;
@@ -867,12 +866,12 @@ namespace cds { namespace intrusive {
             insert_position pos;
 
             while ( true ) {
-                if ( inserting_search( pHead, val, pos, key_comparator() )) {
+                if ( inserting_search( pHead, val, pos, key_comparator())) {
                     m_Stat.onInsertFailed();
                     return false;
                 }
 
-                if ( link_data( &val, pos ) ) {
+                if ( link_data( &val, pos, pHead )) {
                     ++m_ItemCounter;
                     m_Stat.onInsertSuccess();
                     return true;
@@ -891,12 +890,12 @@ namespace cds { namespace intrusive {
             guard.assign( &val );
 
             while ( true ) {
-                if ( inserting_search( pHead, val, pos, key_comparator() ) ) {
+                if ( inserting_search( pHead, val, pos, key_comparator())) {
                     m_Stat.onInsertFailed();
                     return false;
                 }
 
-                if ( link_data( &val, pos ) ) {
+                if ( link_data( &val, pos, pHead )) {
                     f( val );
                     ++m_ItemCounter;
                     m_Stat.onInsertSuccess();
@@ -916,14 +915,14 @@ namespace cds { namespace intrusive {
             guard.assign( &val );
 
             while ( true ) {
-                if ( inserting_search( pHead, val, pos, key_comparator() ) ) {
+                if ( inserting_search( pHead, val, pos, key_comparator())) {
                     // try to replace pCur->data with val
                     assert( pos.pFound != nullptr );
                     assert( key_comparator()(*pos.pFound, val) == 0 );
 
                     marked_data_ptr pFound( pos.pFound );
                     if ( cds_likely( pos.pCur->data.compare_exchange_strong( pFound, marked_data_ptr( &val ),
-                            memory_model::memory_order_release, atomics::memory_order_relaxed ))) 
+                            memory_model::memory_order_release, atomics::memory_order_relaxed )))
                     {
                         if ( pos.pFound != &val ) {
                             retire_data( pos.pFound );
@@ -939,7 +938,7 @@ namespace cds { namespace intrusive {
                         return std::make_pair( false, false );
                     }
 
-                    if ( link_data( &val, pos )) {
+                    if ( link_data( &val, pos, pHead )) {
                         func( val, static_cast<value_type*>( nullptr ));
                         ++m_ItemCounter;
                         m_Stat.onUpdateNew();
@@ -1042,7 +1041,7 @@ namespace cds { namespace intrusive {
         bool find_at( node_type const* pHead, Q const& val, Compare cmp ) const
         {
             position pos;
-            if ( search( pHead, val, pos, cmp ) ) {
+            if ( search( pHead, val, pos, cmp )) {
                 m_Stat.onFindSuccess();
                 return true;
             }
@@ -1191,7 +1190,7 @@ namespace cds { namespace intrusive {
         void destroy( Predicate pred )
         {
             node_type * pNode = m_Head.next.load( memory_model::memory_order_relaxed );
-            while ( pNode != pNode->next.load( memory_model::memory_order_relaxed ) ) {
+            while ( pNode != pNode->next.load( memory_model::memory_order_relaxed )) {
                 value_type * pVal = pNode->data.load( memory_model::memory_order_relaxed ).ptr();
                 node_type * pNext = pNode->next.load( memory_model::memory_order_relaxed );
                 bool const is_regular_node = !pVal || pred( pVal );
@@ -1247,7 +1246,7 @@ namespace cds { namespace intrusive {
             }
         }
 
-        bool link_data( value_type * pVal, insert_position& pos )
+        bool link_data( value_type* pVal, insert_position& pos, node_type* pHead )
         {
             assert( pos.pPrev != nullptr );
             assert( pos.pCur != nullptr );
@@ -1257,15 +1256,16 @@ namespace cds { namespace intrusive {
             // and then set it to another.
             // To prevent this we mark pos.pCur data as undeletable by setting LSB
             marked_data_ptr valCur( pos.pFound );
-            if ( !pos.pCur->data.compare_exchange_strong( valCur, valCur | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
+            if ( !pos.pCur->data.compare_exchange_strong( valCur, valCur | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
                 // oops, pos.pCur data has been changed or another thread is setting pos.pPrev data
                 m_Stat.onNodeMarkFailed();
                 return false;
             }
 
             marked_data_ptr valPrev( pos.pPrevVal );
-            if ( !pos.pPrev->data.compare_exchange_strong( valPrev, valPrev | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
+            if ( !pos.pPrev->data.compare_exchange_strong( valPrev, valPrev | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
                 pos.pCur->data.store( valCur, memory_model::memory_order_relaxed );
+
                 m_Stat.onNodeMarkFailed();
                 return false;
             }
@@ -1275,12 +1275,26 @@ namespace cds { namespace intrusive {
                 // sequence pPrev - pCur is broken
                 pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed );
                 pos.pCur->data.store( valCur, memory_model::memory_order_relaxed );
+
                 m_Stat.onNodeSeqBreak();
                 return false;
             }
 
-            if ( pos.pPrev != pos.pHead && pos.pPrevVal == nullptr )
-            {
+            if ( pos.pPrevVal == nullptr ) {
+                // Check ABA-problem for prev
+                // There is a possibility that the current thread was preempted
+                // on entry of this function. Other threads can link data to prev
+                // and then remove it. As a result, the order of items may be changed
+                if ( find_prev( pHead, *pVal ) != pos.pPrev ) {
+                    pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed );
+                    pos.pCur->data.store( valCur, memory_model::memory_order_relaxed );
+
+                    m_Stat.onNullPrevABA();
+                    return false;
+                }
+            }
+
+            if ( pos.pPrev != pos.pHead && pos.pPrevVal == nullptr ) {
                 // reuse pPrev
 
                 // Set pos.pPrev data if it is null
@@ -1319,7 +1333,7 @@ namespace cds { namespace intrusive {
         }
 
         // split-list support
-        bool link_aux_node( node_type * pNode, insert_position& pos )
+        bool link_aux_node( node_type * pNode, insert_position& pos, node_type* pHead )
         {
             assert( pos.pPrev != nullptr );
             assert( pos.pCur != nullptr );
@@ -1329,14 +1343,14 @@ namespace cds { namespace intrusive {
             // and then set it to another.
             // To prevent this we mark pos.pCur data as undeletable by setting LSB
             marked_data_ptr valCur( pos.pFound );
-            if ( !pos.pCur->data.compare_exchange_strong( valCur, valCur | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
+            if ( !pos.pCur->data.compare_exchange_strong( valCur, valCur | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
                 // oops, pos.pCur data has been changed or another thread is setting pos.pPrev data
                 m_Stat.onNodeMarkFailed();
                 return false;
             }
 
             marked_data_ptr valPrev( pos.pPrevVal );
-            if ( !pos.pPrev->data.compare_exchange_strong( valPrev, valPrev | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
+            if ( !pos.pPrev->data.compare_exchange_strong( valPrev, valPrev | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
                 pos.pCur->data.store( valCur, memory_model::memory_order_relaxed );
                 m_Stat.onNodeMarkFailed();
                 return false;
@@ -1351,6 +1365,20 @@ namespace cds { namespace intrusive {
                 return false;
             }
 
+            if ( pos.pPrevVal == nullptr ) {
+                // Check ABA-problem for prev
+                // There is a possibility that the current thread was preempted
+                // on entry of this function. Other threads can insert (link) an item to prev
+                // and then remove it. As a result, the order of items may be changed
+                if ( find_prev( pHead, *pNode->data.load( memory_model::memory_order_relaxed ).ptr()) != pos.pPrev ) {
+                    pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed );
+                    pos.pCur->data.store( valCur, memory_model::memory_order_relaxed );
+
+                    m_Stat.onNullPrevABA();
+                    return false;
+                }
+            }
+
             // insert new node between pos.pPrev and pos.pCur
             pNode->next.store( pos.pCur, memory_model::memory_order_relaxed );
 
@@ -1369,12 +1397,40 @@ namespace cds { namespace intrusive {
             assert( pos.pFound != nullptr );
 
             marked_data_ptr val( pos.pFound );
-            if ( pos.pCur->data.compare_exchange_strong( val, marked_data_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
+            if ( pos.pCur->data.compare_exchange_strong( val, marked_data_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
                 retire_data( pos.pFound );
                 return true;
             }
             return false;
         }
+
+        template <typename Q>
+        node_type* find_prev( node_type const* pHead, Q const& val ) const
+        {
+            node_type*  pPrev = const_cast<node_type*>(pHead);
+            typename gc::Guard guard;
+            key_comparator cmp;
+
+            while ( true ) {
+                node_type * pCur = pPrev->next.load( memory_model::memory_order_relaxed );
+
+                if ( pCur == pCur->next.load( memory_model::memory_order_acquire )) {
+                    // end-of-list
+                    return pPrev;
+                }
+
+                value_type * pVal = guard.protect( pCur->data,
+                    []( marked_data_ptr p ) -> value_type*
+                {
+                    return p.ptr();
+                } ).ptr();
+
+                if ( pVal && cmp( *pVal, val ) >= 0 )
+                    return pPrev;
+
+                pPrev = pCur;
+            }
+        }
         //@endcond
     };
 }} // namespace cds::intrusive