From: khizmax Date: Sun, 12 Jul 2015 19:43:44 +0000 (+0300) Subject: Fixed data races in EllenBinTree (TSan) X-Git-Tag: v2.1.0~187 X-Git-Url: http://plrg.eecs.uci.edu/git/?p=libcds.git;a=commitdiff_plain;h=6b0c3af9ca967532e87ab27a70b42e56abe8d23e Fixed data races in EllenBinTree (TSan) --- diff --git a/cds/intrusive/details/ellen_bintree_base.h b/cds/intrusive/details/ellen_bintree_base.h index 54e928b7..a61f4f4c 100644 --- a/cds/intrusive/details/ellen_bintree_base.h +++ b/cds/intrusive/details/ellen_bintree_base.h @@ -89,7 +89,7 @@ namespace cds { namespace intrusive { key_infinite = key_infinite1 | key_infinite2 ///< Cumulative infinite flags }; - unsigned int m_nFlags ; ///< Internal flags + atomics::atomic m_nFlags; ///< Internal flags /// Constructs leaf (bIntrenal == false) or internal (bInternal == true) node explicit basic_node( bool bInternal ) @@ -105,25 +105,26 @@ namespace cds { namespace intrusive { /// Checks if the node is internal bool is_internal() const { - return (m_nFlags & internal) != 0; + return (m_nFlags.load(atomics::memory_order_relaxed) & internal) != 0; } /// Returns infinite key, 0 if the node is not infinite unsigned int infinite_key() const { - return m_nFlags & key_infinite; + return m_nFlags.load(atomics::memory_order_relaxed) & key_infinite; } /// Sets infinite key for the node (for internal use only!!!) void infinite_key( int nInf ) { - m_nFlags &= ~key_infinite; + unsigned int nFlags = m_nFlags.load(atomics::memory_order_relaxed); + nFlags &= ~key_infinite; switch ( nInf ) { case 1: - m_nFlags |= key_infinite1; + nFlags |= key_infinite1; break; case 2: - m_nFlags |= key_infinite2; + nFlags |= key_infinite2; break; case 0: break; @@ -131,6 +132,7 @@ namespace cds { namespace intrusive { assert( false ); break; } + m_nFlags.store( nFlags, atomics::memory_order_relaxed ); } }; diff --git a/cds/intrusive/impl/ellen_bintree.h b/cds/intrusive/impl/ellen_bintree.h index 1019d0c2..58c6a6ac 100644 --- a/cds/intrusive/impl/ellen_bintree.h +++ b/cds/intrusive/impl/ellen_bintree.h @@ -164,6 +164,8 @@ namespace cds { namespace intrusive { typedef typename traits::node_allocator node_allocator; ///< Allocator for internal node typedef typename traits::update_desc_allocator update_desc_allocator; ///< Update descriptor allocator + static CDS_CONSTEXPR const size_t c_nHazardPtrCount = 8; ///< Count of hazard pointer required for the algorithm + protected: //@cond typedef ellen_bintree::details::compare< key_type, value_type, key_comparator, node_traits > node_compare; @@ -179,9 +181,6 @@ namespace cds { namespace intrusive { Guard_updGrandParent, Guard_updParent, - // helping - Guard_helpLeaf, - // end of guard indices guard_count }; @@ -204,11 +203,6 @@ namespace cds { namespace intrusive { ,bRightLeaf( false ) ,bRightParent( false ) {} - - void clean_help_guards() - { - guards.clear( Guard_helpLeaf ); - } }; //@endcond @@ -882,16 +876,15 @@ namespace cds { namespace intrusive { return false; } - tree_node * protect_child_node( search_result& res, internal_node * pParent, bool bRight, update_ptr updParent ) const + static tree_node * protect_child_node( search_result& res, internal_node * pParent, bool bRight, update_ptr updParent ) { - tree_node * p; - tree_node * pn = bRight ? pParent->m_pRight.load( memory_model::memory_order_relaxed ) : pParent->m_pLeft.load( memory_model::memory_order_relaxed ); - do { - p = pn; - res.guards.assign( search_result::Guard_Leaf, static_cast( p )); - res.guards.assign( search_result::Guard_helpLeaf, node_traits::to_value_ptr( static_cast( p ) )); - pn = bRight ? pParent->m_pRight.load( memory_model::memory_order_acquire ) : pParent->m_pLeft.load( memory_model::memory_order_acquire ); - } while ( p != pn ); + tree_node * p = bRight + ? res.guards.protect( search_result::Guard_Leaf, pParent->m_pRight, + []( tree_node * p ) -> internal_node* { return static_cast(p);}) + : res.guards.protect( search_result::Guard_Leaf, pParent->m_pLeft, + []( tree_node * p ) -> internal_node* { return static_cast(p);}); + if ( p && p->is_leaf() ) + res.guards.assign( search_result::Guard_Leaf, node_traits::to_value_ptr( static_cast( p ))); // child node is guarded // See whether pParent->m_pUpdate has not been changed @@ -899,22 +892,12 @@ namespace cds { namespace intrusive { // update has been changed - returns nullptr as a flag to search retry return nullptr; } - - if ( p && p->is_leaf() ) - res.guards.copy( search_result::Guard_Leaf, search_result::Guard_helpLeaf ); - res.guards.clear( search_result::Guard_helpLeaf ); return p; } - update_ptr search_protect_update( search_result& res, atomics::atomic const& src ) const + static update_ptr search_protect_update( search_result& res, atomics::atomic const& src ) { - update_ptr ret; - update_ptr upd( src.load( memory_model::memory_order_relaxed ) ); - do { - ret = upd; - res.guards.assign( search_result::Guard_updParent, upd ); - } while ( ret != (upd = src.load( memory_model::memory_order_acquire )) ); - return ret; + return res.guards.protect( search_result::Guard_updParent, src, [](update_ptr p) -> update_desc* { return p.ptr(); }); } template @@ -1179,21 +1162,11 @@ namespace cds { namespace intrusive { } } - tree_node * protect_sibling( typename gc::Guard& guard, atomics::atomic& sibling ) + static tree_node * protect_sibling( typename gc::Guard& guard, atomics::atomic& sibling ) { - typename gc::Guard guardLeaf; - - tree_node * pSibling; - tree_node * p = sibling.load( memory_model::memory_order_relaxed ); - do { - pSibling = p; - guard.assign( static_cast(p) ); - guardLeaf.assign( node_traits::to_value_ptr( static_cast(p))); - } while ( pSibling != ( p = sibling.load( memory_model::memory_order_acquire )) ); - + tree_node * pSibling = guard.protect( sibling, [](tree_node * p) -> internal_node* { return static_cast(p); } ); if ( pSibling->is_leaf() ) - guard.copy( guardLeaf ); - + guard.assign( node_traits::to_value_ptr( static_cast( pSibling ))); return pSibling; } diff --git a/tools/tsan-suppression b/tools/tsan-suppression index 45c24847..b784e967 100644 --- a/tools/tsan-suppression +++ b/tools/tsan-suppression @@ -7,5 +7,8 @@ race:cds::gc::details::retired_ptr::free # uRCU false positive race:cds::urcu::gc*::batch_retire* +# EllenBinTree false positive +race:ellen_bintree_pool::internal_node_allocator*::allocate + # TODO: TSan false positive or library issues? race:cds::container::OptimisticQueue*::alloc_node