Docfix
[libcds.git] / cds / intrusive / impl / iterable_list.h
index bf01a32776135640981fd24742ad87dbf95a8416..33a99054c6b11d06ccc42619fd498e60fdd5fe97 100644 (file)
@@ -1,7 +1,7 @@
 /*
     This file is a part of libcds - Concurrent Data Structures library
 
-    (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+    (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
 
     Source code repo: http://github.com/khizmax/libcds/
     Download: http://sourceforge.net/projects/libcds/files/
@@ -40,7 +40,10 @@ namespace cds { namespace intrusive {
     /** @ingroup cds_intrusive_list
         \anchor cds_intrusive_IterableList_hp
 
-        This lock-free list implementation supports thread-safe iterators.
+        This non-blocking list implementation supports thread-safe iterators;
+        searching and removing are lock-free, inserting is non-blocking because it
+        uses a light-weight synchronization based on marked pointers.
+
         Unlike \p cds::intrusive::MichaelList the iterable list does not require
         any hook in \p T to be stored in the list.
 
@@ -105,7 +108,6 @@ namespace cds { namespace intrusive {
         You should select GC you want and include appropriate .h-file:
         - for \p gc::HP: <tt> <cds/intrusive/iterable_list_hp.h> </tt>
         - for \p gc::DHP: <tt> <cds/intrusive/iterable_list_dhp.h> </tt>
-        - for \ref cds_urcu_gc "RCU type" - see \ref cds_intrusive_IterableList_rcu "RCU-based IterableList"
     */
     template <
         class GC
@@ -117,6 +119,9 @@ namespace cds { namespace intrusive {
 #endif
     >
     class IterableList
+#ifndef CDS_DOXYGEN_INVOKED
+        : public iterable_list_tag
+#endif
     {
     public:
         typedef T       value_type; ///< type of value stored in the list
@@ -141,7 +146,7 @@ namespace cds { namespace intrusive {
 
         typedef typename gc::template guarded_ptr< value_type > guarded_ptr; ///< Guarded pointer
 
-        static CDS_CONSTEXPR const size_t c_nHazardPtrCount = 3; ///< Count of hazard pointer required for the algorithm
+        static CDS_CONSTEXPR const size_t c_nHazardPtrCount = 4; ///< Count of hazard pointer required for the algorithm
 
         //@cond
         // Rebind traits (split-list support)
@@ -180,10 +185,14 @@ namespace cds { namespace intrusive {
             node_type *       pCur;   ///< Current node
 
             value_type *      pFound;       ///< Value of \p pCur->data, valid only if data found
-            value_type *      pPrevVal;     ///< Value of \p pPrev->data, can be \p nullptr
 
             typename gc::Guard guard;       ///< guard for \p pFound
-            typename gc::Guard prevGuard;   ///< guard for \p pPrevVal
+        };
+
+        struct insert_position: public position
+        {
+            value_type *        pPrevVal;     ///< Value of \p pPrev->data, can be \p nullptr
+            typename gc::Guard  prevGuard;   ///< guard for \p pPrevVal
         };
         //@endcond
 
@@ -323,7 +332,7 @@ namespace cds { namespace intrusive {
             this code
             \code
                 if ( it1 == it2 )
-                    assert( &(*it1) == &(*it2) );
+                    assert( &(*it1) == &(*it2));
             \endcode
             can throw assertion. The point is that the iterator stores the value of element which can be modified later by other thread.
             The guard inside the iterator prevents recycling that value so the iterator's value remains valid even after such changing.
@@ -483,7 +492,7 @@ namespace cds { namespace intrusive {
         */
         std::pair<bool, bool> upsert( value_type& val, bool bInsert = true )
         {
-            return update_at( &m_Head, val, []( value_type&, value_type* ) {}, bInsert );
+            return upsert_at( &m_Head, val, bInsert );
         }
 
         /// Unlinks the item \p val from the list
@@ -821,7 +830,7 @@ namespace cds { namespace intrusive {
 
     protected:
         //@cond
-#if 0
+
         // split-list support
         bool insert_aux_node( node_type * pNode )
         {
@@ -832,25 +841,37 @@ namespace cds { namespace intrusive {
         bool insert_aux_node( node_type* pHead, node_type * pNode )
         {
             assert( pNode != nullptr );
+            assert( pNode->data.load( memory_model::memory_order_relaxed ) != nullptr );
+
+            insert_position pos;
+
+            while ( true ) {
+                if ( inserting_search( pHead, *pNode->data.load(memory_model::memory_order_relaxed).ptr(), pos, key_comparator())) {
+                    m_Stat.onInsertFailed();
+                    return false;
+                }
 
-            // Hack: convert node_type to value_type.
-            // In principle, auxiliary node can be non-reducible to value_type
-            // We assume that comparator can correctly distinguish aux and regular node.
-            return insert_at( pHead, *node_traits::to_value_ptr( pNode ) );
+                if ( link_aux_node( pNode, pos, pHead )) {
+                    ++m_ItemCounter;
+                    m_Stat.onInsertSuccess();
+                    return true;
+                }
+
+                m_Stat.onInsertRetry();
+            }
         }
-#endif
 
         bool insert_at( node_type* pHead, value_type& val )
         {
-            position pos;
+            insert_position pos;
 
             while ( true ) {
-                if ( search( pHead, val, pos, key_comparator() )) {
+                if ( inserting_search( pHead, val, pos, key_comparator())) {
                     m_Stat.onInsertFailed();
                     return false;
                 }
 
-                if ( link_data( &val, pos ) ) {
+                if ( link_data( &val, pos, pHead )) {
                     ++m_ItemCounter;
                     m_Stat.onInsertSuccess();
                     return true;
@@ -863,18 +884,18 @@ namespace cds { namespace intrusive {
         template <typename Func>
         bool insert_at( node_type* pHead, value_type& val, Func f )
         {
-            position pos;
+            insert_position pos;
 
             typename gc::Guard guard;
             guard.assign( &val );
 
             while ( true ) {
-                if ( search( pHead, val, pos, key_comparator() ) ) {
+                if ( inserting_search( pHead, val, pos, key_comparator())) {
                     m_Stat.onInsertFailed();
                     return false;
                 }
 
-                if ( link_data( &val, pos ) ) {
+                if ( link_data( &val, pos, pHead )) {
                     f( val );
                     ++m_ItemCounter;
                     m_Stat.onInsertSuccess();
@@ -888,20 +909,20 @@ namespace cds { namespace intrusive {
         template <typename Func>
         std::pair<bool, bool> update_at( node_type* pHead, value_type& val, Func func, bool bInsert )
         {
-            position pos;
+            insert_position pos;
 
             typename gc::Guard guard;
             guard.assign( &val );
 
             while ( true ) {
-                if ( search( pHead, val, pos, key_comparator() ) ) {
+                if ( inserting_search( pHead, val, pos, key_comparator())) {
                     // try to replace pCur->data with val
                     assert( pos.pFound != nullptr );
                     assert( key_comparator()(*pos.pFound, val) == 0 );
 
                     marked_data_ptr pFound( pos.pFound );
                     if ( cds_likely( pos.pCur->data.compare_exchange_strong( pFound, marked_data_ptr( &val ),
-                            memory_model::memory_order_release, atomics::memory_order_relaxed ))) 
+                            memory_model::memory_order_release, atomics::memory_order_relaxed )))
                     {
                         if ( pos.pFound != &val ) {
                             retire_data( pos.pFound );
@@ -917,7 +938,7 @@ namespace cds { namespace intrusive {
                         return std::make_pair( false, false );
                     }
 
-                    if ( link_data( &val, pos )) {
+                    if ( link_data( &val, pos, pHead )) {
                         func( val, static_cast<value_type*>( nullptr ));
                         ++m_ItemCounter;
                         m_Stat.onUpdateNew();
@@ -929,6 +950,11 @@ namespace cds { namespace intrusive {
             }
         }
 
+        std::pair<bool, bool> upsert_at( node_type* pHead, value_type& val, bool bInsert )
+        {
+            return update_at( pHead, val, []( value_type&, value_type* ) {}, bInsert );
+        }
+
         bool unlink_at( node_type* pHead, value_type& val )
         {
             position pos;
@@ -1015,7 +1041,7 @@ namespace cds { namespace intrusive {
         bool find_at( node_type const* pHead, Q const& val, Compare cmp ) const
         {
             position pos;
-            if ( search( pHead, val, pos, cmp ) ) {
+            if ( search( pHead, val, pos, cmp )) {
                 m_Stat.onFindSuccess();
                 return true;
             }
@@ -1085,17 +1111,15 @@ namespace cds { namespace intrusive {
         {
             pos.pHead = pHead;
             node_type*  pPrev = const_cast<node_type*>( pHead );
-            value_type* pPrevVal = nullptr;
 
             while ( true ) {
                 node_type * pCur = pPrev->next.load( memory_model::memory_order_relaxed );
 
-                if ( pCur == pCur->next.load( memory_model::memory_order_relaxed )) {
+                if ( pCur == pCur->next.load( memory_model::memory_order_acquire )) {
                     // end-of-list
                     pos.pPrev = pPrev;
                     pos.pCur = pCur;
                     pos.pFound = nullptr;
-                    pos.pPrevVal = pPrevVal;
                     return false;
                 }
 
@@ -1105,6 +1129,45 @@ namespace cds { namespace intrusive {
                         return p.ptr();
                     }).ptr();
 
+                if ( pVal ) {
+                    int const nCmp = cmp( *pVal, val );
+                    if ( nCmp >= 0 ) {
+                        pos.pPrev = pPrev;
+                        pos.pCur = pCur;
+                        pos.pFound = pVal;
+                        return nCmp == 0;
+                    }
+                }
+
+                pPrev = pCur;
+            }
+        }
+
+        template <typename Q, typename Compare >
+        bool inserting_search( node_type const* pHead, Q const& val, insert_position& pos, Compare cmp ) const
+        {
+            pos.pHead = pHead;
+            node_type*  pPrev = const_cast<node_type*>(pHead);
+            value_type* pPrevVal = pPrev->data.load( memory_model::memory_order_relaxed ).ptr();
+
+            while ( true ) {
+                node_type * pCur = pPrev->next.load( memory_model::memory_order_relaxed );
+
+                if ( pCur == pCur->next.load( memory_model::memory_order_acquire )) {
+                    // end-of-list
+                    pos.pPrev = pPrev;
+                    pos.pCur = pCur;
+                    pos.pFound = nullptr;
+                    pos.pPrevVal = pPrevVal;
+                    return false;
+                }
+
+                value_type * pVal = pos.guard.protect( pCur->data,
+                    []( marked_data_ptr p ) -> value_type*
+                {
+                    return p.ptr();
+                } ).ptr();
+
                 if ( pVal ) {
                     int const nCmp = cmp( *pVal, val );
                     if ( nCmp >= 0 ) {
@@ -1121,6 +1184,26 @@ namespace cds { namespace intrusive {
                 pos.prevGuard.copy( pos.guard );
             }
         }
+
+        // split-list support
+        template <typename Predicate>
+        void destroy( Predicate pred )
+        {
+            node_type * pNode = m_Head.next.load( memory_model::memory_order_relaxed );
+            while ( pNode != pNode->next.load( memory_model::memory_order_relaxed )) {
+                value_type * pVal = pNode->data.load( memory_model::memory_order_relaxed ).ptr();
+                node_type * pNext = pNode->next.load( memory_model::memory_order_relaxed );
+                bool const is_regular_node = !pVal || pred( pVal );
+                if ( is_regular_node ) {
+                    if ( pVal )
+                        retire_data( pVal );
+                    delete_node( pNode );
+                }
+                pNode = pNext;
+            }
+
+            m_Head.next.store( &m_Tail, memory_model::memory_order_relaxed );
+        }
         //@endcond
 
     private:
@@ -1163,7 +1246,7 @@ namespace cds { namespace intrusive {
             }
         }
 
-        bool link_data( value_type * pVal, position& pos )
+        bool link_data( value_type* pVal, insert_position& pos, node_type* pHead )
         {
             assert( pos.pPrev != nullptr );
             assert( pos.pCur != nullptr );
@@ -1173,15 +1256,16 @@ namespace cds { namespace intrusive {
             // and then set it to another.
             // To prevent this we mark pos.pCur data as undeletable by setting LSB
             marked_data_ptr valCur( pos.pFound );
-            if ( !pos.pCur->data.compare_exchange_strong( valCur, valCur | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
+            if ( !pos.pCur->data.compare_exchange_strong( valCur, valCur | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
                 // oops, pos.pCur data has been changed or another thread is setting pos.pPrev data
                 m_Stat.onNodeMarkFailed();
                 return false;
             }
 
             marked_data_ptr valPrev( pos.pPrevVal );
-            if ( !pos.pPrev->data.compare_exchange_strong( valPrev, valPrev | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
+            if ( !pos.pPrev->data.compare_exchange_strong( valPrev, valPrev | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
                 pos.pCur->data.store( valCur, memory_model::memory_order_relaxed );
+
                 m_Stat.onNodeMarkFailed();
                 return false;
             }
@@ -1191,12 +1275,26 @@ namespace cds { namespace intrusive {
                 // sequence pPrev - pCur is broken
                 pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed );
                 pos.pCur->data.store( valCur, memory_model::memory_order_relaxed );
+
                 m_Stat.onNodeSeqBreak();
                 return false;
             }
 
-            if ( pos.pPrev != pos.pHead && pos.pPrevVal == nullptr )
-            {
+            if ( pos.pPrevVal == nullptr ) {
+                // Check ABA-problem for prev
+                // There is a possibility that the current thread was preempted
+                // on entry of this function. Other threads can link data to prev
+                // and then remove it. As a result, the order of items may be changed
+                if ( find_prev( pHead, *pVal ) != pos.pPrev ) {
+                    pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed );
+                    pos.pCur->data.store( valCur, memory_model::memory_order_relaxed );
+
+                    m_Stat.onNullPrevABA();
+                    return false;
+                }
+            }
+
+            if ( pos.pPrev != pos.pHead && pos.pPrevVal == nullptr ) {
                 // reuse pPrev
 
                 // Set pos.pPrev data if it is null
@@ -1234,18 +1332,105 @@ namespace cds { namespace intrusive {
             return false;
         }
 
+        // split-list support
+        bool link_aux_node( node_type * pNode, insert_position& pos, node_type* pHead )
+        {
+            assert( pos.pPrev != nullptr );
+            assert( pos.pCur != nullptr );
+
+            // We need pos.pCur data should be unchanged, otherwise ordering violation can be possible
+            // if current thread will be preempted and another thread will delete pos.pCur data
+            // and then set it to another.
+            // To prevent this we mark pos.pCur data as undeletable by setting LSB
+            marked_data_ptr valCur( pos.pFound );
+            if ( !pos.pCur->data.compare_exchange_strong( valCur, valCur | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
+                // oops, pos.pCur data has been changed or another thread is setting pos.pPrev data
+                m_Stat.onNodeMarkFailed();
+                return false;
+            }
+
+            marked_data_ptr valPrev( pos.pPrevVal );
+            if ( !pos.pPrev->data.compare_exchange_strong( valPrev, valPrev | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
+                pos.pCur->data.store( valCur, memory_model::memory_order_relaxed );
+                m_Stat.onNodeMarkFailed();
+                return false;
+            }
+
+            // checks if link pPrev -> pCur is broken
+            if ( pos.pPrev->next.load( memory_model::memory_order_acquire ) != pos.pCur ) {
+                // sequence pPrev - pCur is broken
+                pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed );
+                pos.pCur->data.store( valCur, memory_model::memory_order_relaxed );
+                m_Stat.onNodeSeqBreak();
+                return false;
+            }
+
+            if ( pos.pPrevVal == nullptr ) {
+                // Check ABA-problem for prev
+                // There is a possibility that the current thread was preempted
+                // on entry of this function. Other threads can insert (link) an item to prev
+                // and then remove it. As a result, the order of items may be changed
+                if ( find_prev( pHead, *pNode->data.load( memory_model::memory_order_relaxed ).ptr()) != pos.pPrev ) {
+                    pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed );
+                    pos.pCur->data.store( valCur, memory_model::memory_order_relaxed );
+
+                    m_Stat.onNullPrevABA();
+                    return false;
+                }
+            }
+
+            // insert new node between pos.pPrev and pos.pCur
+            pNode->next.store( pos.pCur, memory_model::memory_order_relaxed );
+
+            bool result = pos.pPrev->next.compare_exchange_strong( pos.pCur, pNode, memory_model::memory_order_release, atomics::memory_order_relaxed );
+
+            // Clears data marks
+            pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed );
+            pos.pCur->data.store( valCur, memory_model::memory_order_relaxed );
+
+            return result;
+        }
+
         static bool unlink_data( position& pos )
         {
             assert( pos.pCur != nullptr );
             assert( pos.pFound != nullptr );
 
             marked_data_ptr val( pos.pFound );
-            if ( pos.pCur->data.compare_exchange_strong( val, marked_data_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
+            if ( pos.pCur->data.compare_exchange_strong( val, marked_data_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
                 retire_data( pos.pFound );
                 return true;
             }
             return false;
         }
+
+        template <typename Q>
+        node_type* find_prev( node_type const* pHead, Q const& val ) const
+        {
+            node_type*  pPrev = const_cast<node_type*>(pHead);
+            typename gc::Guard guard;
+            key_comparator cmp;
+
+            while ( true ) {
+                node_type * pCur = pPrev->next.load( memory_model::memory_order_relaxed );
+
+                if ( pCur == pCur->next.load( memory_model::memory_order_acquire )) {
+                    // end-of-list
+                    return pPrev;
+                }
+
+                value_type * pVal = guard.protect( pCur->data,
+                    []( marked_data_ptr p ) -> value_type*
+                {
+                    return p.ptr();
+                } ).ptr();
+
+                if ( pVal && cmp( *pVal, val ) >= 0 )
+                    return pPrev;
+
+                pPrev = pCur;
+            }
+        }
         //@endcond
     };
 }} // namespace cds::intrusive