Fixed data races in EllenBinTree (TSan)
authorkhizmax <libcds.dev@gmail.com>
Sun, 12 Jul 2015 19:43:44 +0000 (22:43 +0300)
committerkhizmax <libcds.dev@gmail.com>
Sun, 12 Jul 2015 19:43:44 +0000 (22:43 +0300)
cds/intrusive/details/ellen_bintree_base.h
cds/intrusive/impl/ellen_bintree.h
tools/tsan-suppression

index 54e928b768f13fdc537eb2d5e5ef039cf4469e26..a61f4f4c6bd214345b1437598ae6a9d903fb479a 100644 (file)
@@ -89,7 +89,7 @@ namespace cds { namespace intrusive {
                 key_infinite = key_infinite1 | key_infinite2    ///< Cumulative infinite flags
             };
 
-            unsigned int    m_nFlags    ;   ///< Internal flags
+            atomics::atomic<unsigned int> m_nFlags;   ///< Internal flags
 
             /// Constructs leaf (bIntrenal == false) or internal (bInternal == true) node
             explicit basic_node( bool bInternal )
@@ -105,25 +105,26 @@ namespace cds { namespace intrusive {
             /// Checks if the node is internal
             bool is_internal() const
             {
-                return (m_nFlags & internal) != 0;
+                return (m_nFlags.load(atomics::memory_order_relaxed) & internal) != 0;
             }
 
             /// Returns infinite key, 0 if the node is not infinite
             unsigned int infinite_key() const
             {
-                return m_nFlags & key_infinite;
+                return m_nFlags.load(atomics::memory_order_relaxed) & key_infinite;
             }
 
             /// Sets infinite key for the node (for internal use only!!!)
             void infinite_key( int nInf )
             {
-                m_nFlags &= ~key_infinite;
+                unsigned int nFlags = m_nFlags.load(atomics::memory_order_relaxed);
+                nFlags &= ~key_infinite;
                 switch ( nInf ) {
                 case 1:
-                    m_nFlags |= key_infinite1;
+                    nFlags |= key_infinite1;
                     break;
                 case 2:
-                    m_nFlags |= key_infinite2;
+                    nFlags |= key_infinite2;
                     break;
                 case 0:
                     break;
@@ -131,6 +132,7 @@ namespace cds { namespace intrusive {
                     assert( false );
                     break;
                 }
+                m_nFlags.store( nFlags, atomics::memory_order_relaxed );
             }
         };
 
index 1019d0c2c4412d65b3396cce7827603977e2bf26..58c6a6acbc694c97851a53091241be064a9679c5 100644 (file)
@@ -164,6 +164,8 @@ namespace cds { namespace intrusive {
         typedef typename traits::node_allocator        node_allocator;        ///< Allocator for internal node
         typedef typename traits::update_desc_allocator update_desc_allocator; ///< Update descriptor allocator
 
+        static CDS_CONSTEXPR const size_t c_nHazardPtrCount = 8; ///< Count of hazard pointer required for the algorithm
+
     protected:
         //@cond
         typedef ellen_bintree::details::compare< key_type, value_type, key_comparator, node_traits > node_compare;
@@ -179,9 +181,6 @@ namespace cds { namespace intrusive {
                 Guard_updGrandParent,
                 Guard_updParent,
 
-                // helping
-                Guard_helpLeaf,
-
                 // end of guard indices
                 guard_count
             };
@@ -204,11 +203,6 @@ namespace cds { namespace intrusive {
                 ,bRightLeaf( false )
                 ,bRightParent( false )
             {}
-
-            void clean_help_guards()
-            {
-                guards.clear( Guard_helpLeaf );
-            }
         };
         //@endcond
 
@@ -882,16 +876,15 @@ namespace cds { namespace intrusive {
             return false;
         }
 
-        tree_node * protect_child_node( search_result& res, internal_node * pParent, bool bRight, update_ptr updParent ) const
+        static tree_node * protect_child_node( search_result& res, internal_node * pParent, bool bRight, update_ptr updParent )
         {
-            tree_node * p;
-            tree_node * pn = bRight ? pParent->m_pRight.load( memory_model::memory_order_relaxed ) : pParent->m_pLeft.load( memory_model::memory_order_relaxed );
-            do {
-                p = pn;
-                res.guards.assign( search_result::Guard_Leaf, static_cast<internal_node *>( p ));
-                res.guards.assign( search_result::Guard_helpLeaf, node_traits::to_value_ptr( static_cast<leaf_node *>( p ) ));
-                pn = bRight ? pParent->m_pRight.load( memory_model::memory_order_acquire ) : pParent->m_pLeft.load( memory_model::memory_order_acquire );
-            } while ( p != pn );
+            tree_node * p = bRight
+                ? res.guards.protect( search_result::Guard_Leaf, pParent->m_pRight,
+                                                []( tree_node * p ) -> internal_node* { return static_cast<internal_node *>(p);})
+                : res.guards.protect( search_result::Guard_Leaf, pParent->m_pLeft,
+                                                []( tree_node * p ) -> internal_node* { return static_cast<internal_node *>(p);});
+            if ( p && p->is_leaf() )
+                res.guards.assign( search_result::Guard_Leaf, node_traits::to_value_ptr( static_cast<leaf_node *>( p )));
 
             // child node is guarded
             // See whether pParent->m_pUpdate has not been changed
@@ -899,22 +892,12 @@ namespace cds { namespace intrusive {
                 // update has been changed - returns nullptr as a flag to search retry
                 return nullptr;
             }
-
-            if ( p && p->is_leaf() )
-                res.guards.copy( search_result::Guard_Leaf, search_result::Guard_helpLeaf );
-            res.guards.clear( search_result::Guard_helpLeaf );
             return p;
         }
 
-        update_ptr search_protect_update( search_result& res, atomics::atomic<update_ptr> const& src ) const
+        static update_ptr search_protect_update( search_result& res, atomics::atomic<update_ptr> const& src )
         {
-            update_ptr ret;
-            update_ptr upd( src.load( memory_model::memory_order_relaxed ) );
-            do {
-                ret = upd;
-                res.guards.assign( search_result::Guard_updParent, upd );
-            } while ( ret != (upd = src.load( memory_model::memory_order_acquire )) );
-            return ret;
+            return res.guards.protect( search_result::Guard_updParent, src, [](update_ptr p) -> update_desc* { return p.ptr(); });
         }
 
         template <typename KeyValue, typename Compare>
@@ -1179,21 +1162,11 @@ namespace cds { namespace intrusive {
             }
         }
 
-        tree_node * protect_sibling( typename gc::Guard& guard, atomics::atomic<tree_node *>& sibling )
+        static tree_node * protect_sibling( typename gc::Guard& guard, atomics::atomic<tree_node *>& sibling )
         {
-            typename gc::Guard guardLeaf;
-
-            tree_node * pSibling;
-            tree_node * p = sibling.load( memory_model::memory_order_relaxed );
-            do {
-                pSibling = p;
-                guard.assign( static_cast<internal_node *>(p) );
-                guardLeaf.assign( node_traits::to_value_ptr( static_cast<leaf_node *>(p)));
-            } while ( pSibling != ( p = sibling.load( memory_model::memory_order_acquire )) );
-
+            tree_node * pSibling = guard.protect( sibling, [](tree_node * p) -> internal_node* { return static_cast<internal_node *>(p); } );
             if ( pSibling->is_leaf() )
-                guard.copy( guardLeaf );
-
+                guard.assign( node_traits::to_value_ptr( static_cast<leaf_node *>( pSibling )));
             return pSibling;
         }
 
index 45c24847511e5851297d5c9db858264957a36c84..b784e967b260e47aee8fb7a2f82da04caffd21d9 100644 (file)
@@ -7,5 +7,8 @@ race:cds::gc::details::retired_ptr::free
 # uRCU false positive
 race:cds::urcu::gc*::batch_retire*
 
+# EllenBinTree false positive
+race:ellen_bintree_pool::internal_node_allocator*::allocate
+
 # TODO: TSan false positive or library issues?
 race:cds::container::OptimisticQueue*::alloc_node