From 190963414ab1cf78a26b636495530e064a4d980e Mon Sep 17 00:00:00 2001 From: khizmax Date: Wed, 6 May 2015 22:12:34 +0300 Subject: [PATCH] TSan exam: - annotations have been moved to the allocator wrapper - fixed data races for some containers - improved Treiber's stack elimination algo --- cds/compiler/feature_tsan.h | 8 ++- cds/container/fcqueue.h | 9 +++ cds/container/fcstack.h | 1 + cds/container/lazy_kvlist_rcu.h | 15 +---- cds/container/msqueue.h | 3 - cds/container/split_list_set_rcu.h | 12 +--- cds/details/allocator.h | 78 ++++++++++++++++++++-- cds/intrusive/basket_queue.h | 14 ++-- cds/intrusive/details/ellen_bintree_base.h | 13 ++-- cds/intrusive/fcqueue.h | 1 - cds/intrusive/fcstack.h | 1 - cds/intrusive/impl/ellen_bintree.h | 6 ++ cds/intrusive/msqueue.h | 6 +- cds/intrusive/optimistic_queue.h | 2 +- cds/intrusive/segmented_queue.h | 11 +-- cds/intrusive/split_list_rcu.h | 7 +- cds/intrusive/treiber_stack.h | 27 ++++++-- cds/memory/michael/allocator.h | 13 +++- cds/opt/buffer.h | 4 +- tests/unit/pqueue/push_pop.cpp | 8 +-- 20 files changed, 160 insertions(+), 79 deletions(-) diff --git a/cds/compiler/feature_tsan.h b/cds/compiler/feature_tsan.h index cecbadc5..1a05a17d 100644 --- a/cds/compiler/feature_tsan.h +++ b/cds/compiler/feature_tsan.h @@ -6,6 +6,8 @@ // Thread Sanitizer annotations. // From https://groups.google.com/d/msg/thread-sanitizer/SsrHB7FTnTk/mNTGNLQj-9cJ +//@cond + #ifdef CDS_THREAD_SANITIZER_ENABLED # define CDS_TSAN_ANNOTATE_HAPPENS_BEFORE(addr) AnnotateHappensBefore(__FILE__, __LINE__, reinterpret_cast(addr)) # define CDS_TSAN_ANNOTATE_HAPPENS_AFTER(addr) AnnotateHappensAfter(__FILE__, __LINE__, reinterpret_cast(addr)) @@ -31,7 +33,9 @@ void AnnotateIgnoreWritesBegin(const char *f, int l); void AnnotateIgnoreWritesEnd(const char *f, int l); } -#else + +#else // CDS_THREAD_SANITIZER_ENABLED + # define CDS_TSAN_ANNOTATE_HAPPENS_BEFORE(addr) # define CDS_TSAN_ANNOTATE_HAPPENS_AFTER(addr) @@ -41,6 +45,8 @@ # define CDS_TSAN_ANNOTATE_IGNORE_WRITES_END # define CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN # define CDS_TSAN_ANNOTATE_IGNORE_RW_END + #endif +//@endcond #endif // #ifndef CDSLIB_COMPILER_FEATURE_TSAN_H diff --git a/cds/container/fcqueue.h b/cds/container/fcqueue.h index bc7b684b..fedf97ad 100644 --- a/cds/container/fcqueue.h +++ b/cds/container/fcqueue.h @@ -290,6 +290,9 @@ namespace cds { namespace container { { assert( pRec ); + // this function is called under FC mutex, so switch TSan off + CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN; + switch ( pRec->op() ) { case op_enq: assert( pRec->pValEnq ); @@ -315,12 +318,17 @@ namespace cds { namespace container { assert(false); break; } + CDS_TSAN_ANNOTATE_IGNORE_RW_END; } /// Batch-processing flat combining void fc_process( typename fc_kernel::iterator itBegin, typename fc_kernel::iterator itEnd ) { typedef typename fc_kernel::iterator fc_iterator; + + // this function is called under FC mutex, so switch TSan off + CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN; + for ( fc_iterator it = itBegin, itPrev = itEnd; it != itEnd; ++it ) { switch ( it->op() ) { case op_enq: @@ -335,6 +343,7 @@ namespace cds { namespace container { break; } } + CDS_TSAN_ANNOTATE_IGNORE_RW_END; } //@endcond diff --git a/cds/container/fcstack.h b/cds/container/fcstack.h index 0f6aeb4d..7ef2164e 100644 --- a/cds/container/fcstack.h +++ b/cds/container/fcstack.h @@ -304,6 +304,7 @@ namespace cds { namespace container { { // this function is called under FC mutex, so switch TSan off CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN; + typedef typename fc_kernel::iterator fc_iterator; for ( fc_iterator it = itBegin, itPrev = itEnd; it != itEnd; ++it ) { switch ( it->op() ) { diff --git a/cds/container/lazy_kvlist_rcu.h b/cds/container/lazy_kvlist_rcu.h index 1ead8ccc..de24d131 100644 --- a/cds/container/lazy_kvlist_rcu.h +++ b/cds/container/lazy_kvlist_rcu.h @@ -130,28 +130,19 @@ namespace cds { namespace container { template static node_type * alloc_node(const K& key) { - CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; - node_type * p = cxx_allocator().New( key ); - CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; - return p; + return cxx_allocator().New( key ); } template static node_type * alloc_node( const K& key, const V& val ) { - CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; - node_type * p = cxx_allocator().New( key, val ); - CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; - return p; + return cxx_allocator().New( key, val ); } template static node_type * alloc_node( Args&&... args ) { - CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; - node_type * p = cxx_allocator().MoveNew( std::forward(args)... ); - CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; - return p; + return cxx_allocator().MoveNew( std::forward(args)... ); } static void free_node( node_type * pNode ) diff --git a/cds/container/msqueue.h b/cds/container/msqueue.h index 3292ff91..1d355e97 100644 --- a/cds/container/msqueue.h +++ b/cds/container/msqueue.h @@ -117,10 +117,7 @@ namespace cds { namespace container { { void operator ()( node_type * pNode ) { - // TSan false positive possible - CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; cxx_allocator().Delete( pNode ); - CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; } }; diff --git a/cds/container/split_list_set_rcu.h b/cds/container/split_list_set_rcu.h index 7ad24be8..f6caeb73 100644 --- a/cds/container/split_list_set_rcu.h +++ b/cds/container/split_list_set_rcu.h @@ -226,26 +226,18 @@ namespace cds { namespace container { template static node_type * alloc_node( Q const& v ) { - CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; - node_type * p = cxx_node_allocator().New( v ); - CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; - return p; + return cxx_node_allocator().New( v ); } template static node_type * alloc_node( Args&&... args ) { - CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; - node_type * p = cxx_node_allocator().MoveNew( std::forward(args)...); - CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; - return p; + return cxx_node_allocator().MoveNew( std::forward(args)...); } static void free_node( node_type * pNode ) { - CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; cxx_node_allocator().Delete( pNode ); - CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; } struct node_disposer { diff --git a/cds/details/allocator.h b/cds/details/allocator.h index e37a16a2..cbc634c0 100644 --- a/cds/details/allocator.h +++ b/cds/details/allocator.h @@ -32,6 +32,9 @@ namespace cds { , typename Alloc::template rebind::other >::type allocator_type; + /// \p true if underlined allocator is \p std::allocator, \p false otherwise + static CDS_CONSTEXPR bool const c_bStdAllocator = std::is_same< allocator_type, std::allocator>::value; + /// Element type typedef T value_type; @@ -39,20 +42,52 @@ namespace cds { template value_type * New( S const&... src ) { +# if CDS_THREAD_SANITIZER_ENABLED + if ( c_bStdAllocator ) { + CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; + } + value_type * pv = Construct( allocator_type::allocate(1), src... ); + if ( c_bStdAllocator ) { + CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; + } + return pv; +# else return Construct( allocator_type::allocate(1), src... ); +# endif } /// Analogue of operator new T( std::forward(args)... ) (move semantics) template value_type * MoveNew( Args&&... args ) { +# if CDS_THREAD_SANITIZER_ENABLED + if ( c_bStdAllocator ) { + CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; + } + value_type * pv = MoveConstruct( allocator_type::allocate(1), std::forward(args)... ); + if ( c_bStdAllocator ) { + CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; + } + return pv; +# else return MoveConstruct( allocator_type::allocate(1), std::forward(args)... ); +# endif } /// Analogue of operator new T[\p nCount ] value_type * NewArray( size_t nCount ) { +# if CDS_THREAD_SANITIZER_ENABLED + if ( c_bStdAllocator ) { + CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; + } +# endif value_type * p = allocator_type::allocate( nCount ); +# if CDS_THREAD_SANITIZER_ENABLED + if ( c_bStdAllocator ) { + CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; + } +# endif for ( size_t i = 0; i < nCount; ++i ) Construct( p + i ); return p; @@ -65,7 +100,17 @@ namespace cds { template value_type * NewArray( size_t nCount, S const& src ) { +# if CDS_THREAD_SANITIZER_ENABLED + if ( c_bStdAllocator ) { + CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; + } +# endif value_type * p = allocator_type::allocate( nCount ); +# if CDS_THREAD_SANITIZER_ENABLED + if ( c_bStdAllocator ) { + CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; + } +# endif for ( size_t i = 0; i < nCount; ++i ) Construct( p + i, src ); return p; @@ -95,16 +140,22 @@ namespace cds { /// Analogue of operator delete void Delete( value_type * p ) { + // TSan false positive possible + CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; allocator_type::destroy( p ); allocator_type::deallocate( p, 1 ); + CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; } /// Analogue of operator delete [] void Delete( value_type * p, size_t nCount ) { + // TSan false positive possible + CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; for ( size_t i = 0; i < nCount; ++i ) allocator_type::destroy( p + i ); allocator_type::deallocate( p, nCount ); + CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; } # if CDS_COMPILER == CDS_COMPILER_INTEL @@ -119,14 +170,22 @@ namespace cds { template value_type * Construct( void * p, S const&... src ) { - return new( p ) value_type( src... ); + // TSan false positive possible + CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; + value_type * pv = new( p ) value_type( src... ); + CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; + return pv; } /// Analogue of placement operator new( p ) T( std::forward(args)... ) template value_type * MoveConstruct( void * p, Args&&... args ) { - return new( p ) value_type( std::forward(args)... ); + // TSan false positive possible + CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; + value_type * pv = new( p ) value_type( std::forward(args)... ); + CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; + return pv; } /// Rebinds allocator to other type \p Q instead of \p T @@ -143,7 +202,18 @@ namespace cds { size_t const nPtrSize = ( nByteSize + sizeof(void *) - 1 ) / sizeof(void *); typedef typename allocator_type::template rebind< void * >::other void_allocator; - return void_allocator().allocate( nPtrSize ); +# if CDS_THREAD_SANITIZER_ENABLED + if ( c_bStdAllocator ) { + CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; + } +# endif + void * p = void_allocator().allocate( nPtrSize ); +# if CDS_THREAD_SANITIZER_ENABLED + if ( c_bStdAllocator ) { + CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; + } +# endif + return p; } //@endcond }; @@ -163,7 +233,7 @@ namespace cds { */ static void free( T * p ) { - Allocator a; + Allocator a; a.Delete( p ); } }; diff --git a/cds/intrusive/basket_queue.h b/cds/intrusive/basket_queue.h index 4c26872b..f5d129a9 100644 --- a/cds/intrusive/basket_queue.h +++ b/cds/intrusive/basket_queue.h @@ -514,7 +514,7 @@ namespace cds { namespace intrusive { } } - m_pTail.compare_exchange_weak( t, marked_ptr(pNext.ptr()), memory_model::memory_order_release, memory_model::memory_order_relaxed ); + m_pTail.compare_exchange_weak( t, marked_ptr(pNext.ptr()), memory_model::memory_order_acquire, atomics::memory_order_relaxed ); } else { marked_ptr iter( h ); @@ -537,7 +537,7 @@ namespace cds { namespace intrusive { else if ( bDeque ) { res.pNext = pNext.ptr(); - if ( iter->m_pNext.compare_exchange_weak( pNext, marked_ptr( pNext.ptr(), 1 ), memory_model::memory_order_release, memory_model::memory_order_relaxed ) ) { + if ( iter->m_pNext.compare_exchange_weak( pNext, marked_ptr( pNext.ptr(), 1 ), memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) { if ( hops >= m_nMaxHops ) free_chain( h, pNext ); break; @@ -565,7 +565,7 @@ namespace cds { namespace intrusive { { // "head" and "newHead" are guarded - if ( m_pHead.compare_exchange_strong( head, marked_ptr(newHead.ptr()), memory_model::memory_order_release, memory_model::memory_order_relaxed )) + if ( m_pHead.compare_exchange_strong( head, marked_ptr(newHead.ptr()), memory_model::memory_order_release, atomics::memory_order_relaxed )) { typename gc::template GuardArray<2> guards; guards.assign( 0, node_traits::to_value_ptr(head.ptr()) ); @@ -659,8 +659,8 @@ namespace cds { namespace intrusive { if ( pNext.ptr() == nullptr ) { pNew->m_pNext.store( marked_ptr(), memory_model::memory_order_release ); - if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr(pNew), memory_model::memory_order_release, memory_model::memory_order_relaxed ) ) { - if ( !m_pTail.compare_exchange_strong( t, marked_ptr(pNew), memory_model::memory_order_release, memory_model::memory_order_relaxed )) + if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr(pNew), memory_model::memory_order_release, atomics::memory_order_relaxed ) ) { + if ( !m_pTail.compare_exchange_strong( t, marked_ptr(pNew), memory_model::memory_order_release, atomics::memory_order_relaxed )) m_Stat.onAdvanceTailFailed(); break; } @@ -681,7 +681,7 @@ namespace cds { namespace intrusive { { bkoff(); pNew->m_pNext.store( pNext, memory_model::memory_order_release ); - if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr( pNew ), memory_model::memory_order_release, memory_model::memory_order_relaxed )) { + if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr( pNew ), memory_model::memory_order_release, atomics::memory_order_relaxed )) { m_Stat.onAddBasket(); break; } @@ -713,7 +713,7 @@ namespace cds { namespace intrusive { pNext = p; g.assign( 0, g.template get( 1 ) ); } - if ( !bTailOk || !m_pTail.compare_exchange_weak( t, marked_ptr( pNext.ptr() ), memory_model::memory_order_release, memory_model::memory_order_relaxed )) + if ( !bTailOk || !m_pTail.compare_exchange_weak( t, marked_ptr( pNext.ptr() ), memory_model::memory_order_release, atomics::memory_order_relaxed )) m_Stat.onAdvanceTailFailed(); m_Stat.onBadTail(); diff --git a/cds/intrusive/details/ellen_bintree_base.h b/cds/intrusive/details/ellen_bintree_base.h index 54e928b7..1bb08e1b 100644 --- a/cds/intrusive/details/ellen_bintree_base.h +++ b/cds/intrusive/details/ellen_bintree_base.h @@ -89,7 +89,7 @@ namespace cds { namespace intrusive { key_infinite = key_infinite1 | key_infinite2 ///< Cumulative infinite flags }; - unsigned int m_nFlags ; ///< Internal flags + atomics::atomic m_nFlags ; ///< Internal flags /// Constructs leaf (bIntrenal == false) or internal (bInternal == true) node explicit basic_node( bool bInternal ) @@ -105,25 +105,26 @@ namespace cds { namespace intrusive { /// Checks if the node is internal bool is_internal() const { - return (m_nFlags & internal) != 0; + return (m_nFlags.load( atomics::memory_order_relaxed ) & internal) != 0; } /// Returns infinite key, 0 if the node is not infinite unsigned int infinite_key() const { - return m_nFlags & key_infinite; + return m_nFlags.load( atomics::memory_order_relaxed ) & key_infinite; } /// Sets infinite key for the node (for internal use only!!!) void infinite_key( int nInf ) { - m_nFlags &= ~key_infinite; + const unsigned int nFlags = m_nFlags.load( atomics::memory_order_relaxed ) & ~key_infinite; + m_nFlags.store( nFlags, atomics::memory_order_relaxed ); switch ( nInf ) { case 1: - m_nFlags |= key_infinite1; + m_nFlags.store( nFlags | key_infinite1, atomics::memory_order_relaxed ); break; case 2: - m_nFlags |= key_infinite2; + m_nFlags.store( nFlags | key_infinite2, atomics::memory_order_relaxed ); break; case 0: break; diff --git a/cds/intrusive/fcqueue.h b/cds/intrusive/fcqueue.h index 86287365..630b2ae3 100644 --- a/cds/intrusive/fcqueue.h +++ b/cds/intrusive/fcqueue.h @@ -308,7 +308,6 @@ namespace cds { namespace intrusive { break; } } - CDS_TSAN_ANNOTATE_IGNORE_RW_END; } //@endcond diff --git a/cds/intrusive/fcstack.h b/cds/intrusive/fcstack.h index 3bdf1242..57f9c293 100644 --- a/cds/intrusive/fcstack.h +++ b/cds/intrusive/fcstack.h @@ -292,7 +292,6 @@ namespace cds { namespace intrusive { break; } } - CDS_TSAN_ANNOTATE_IGNORE_RW_END; } //@endcond diff --git a/cds/intrusive/impl/ellen_bintree.h b/cds/intrusive/impl/ellen_bintree.h index a6fcd1a2..a3ae991b 100644 --- a/cds/intrusive/impl/ellen_bintree.h +++ b/cds/intrusive/impl/ellen_bintree.h @@ -1236,7 +1236,10 @@ namespace cds { namespace intrusive { if ( res.pGrandParent ) { assert( !res.pLeaf->infinite_key() ); pNewInternal->infinite_key( 0 ); + // TSan false positive: there is the release fence below, pNewInternal is not linked yet + CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; key_extractor()(pNewInternal->m_Key, *node_traits::to_value_ptr( res.pLeaf )); + CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; } else { assert( res.pLeaf->infinite_key() == tree_node::key_infinite1 ); @@ -1249,7 +1252,10 @@ namespace cds { namespace intrusive { assert( !res.pLeaf->is_internal() ); pNewInternal->infinite_key( 0 ); + // TSan false positive: there is the release fence below, pNewInternal is not linked yet + CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; key_extractor()(pNewInternal->m_Key, val); + CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; pNewInternal->m_pLeft.store( static_cast(res.pLeaf), memory_model::memory_order_relaxed ); pNewInternal->m_pRight.store( static_cast(pNewLeaf), memory_model::memory_order_release ); assert( !res.pLeaf->infinite_key() ); diff --git a/cds/intrusive/msqueue.h b/cds/intrusive/msqueue.h index c56d4b45..404cd189 100644 --- a/cds/intrusive/msqueue.h +++ b/cds/intrusive/msqueue.h @@ -372,7 +372,7 @@ namespace cds { namespace intrusive { node_type * h; while ( true ) { h = res.guards.protect( 0, m_pHead, node_to_value() ); - pNext = h->m_pNext.load( memory_model::memory_order_relaxed ); + pNext = h->m_pNext.load( memory_model::memory_order_acquire ); res.guards.assign( 1, node_to_value()( pNext )); if ( m_pHead.load(memory_model::memory_order_acquire) != h ) continue; @@ -390,7 +390,7 @@ namespace cds { namespace intrusive { continue; } - if ( m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed )) + if ( m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) break; m_Stat.onDequeueRace(); @@ -499,7 +499,7 @@ namespace cds { namespace intrusive { ++m_ItemCounter; m_Stat.onEnqueue(); - if ( !m_pTail.compare_exchange_strong( t, pNew, memory_model::memory_order_acq_rel, atomics::memory_order_relaxed )) + if ( !m_pTail.compare_exchange_strong( t, pNew, memory_model::memory_order_release, atomics::memory_order_relaxed )) m_Stat.onAdvanceTailFailed(); return true; } diff --git a/cds/intrusive/optimistic_queue.h b/cds/intrusive/optimistic_queue.h index b7b6601e..0f98f75f 100644 --- a/cds/intrusive/optimistic_queue.h +++ b/cds/intrusive/optimistic_queue.h @@ -473,7 +473,7 @@ namespace cds { namespace intrusive { fix_list( pTail, pHead ); continue; } - if ( m_pHead.compare_exchange_weak( pHead, pFirstNodePrev, memory_model::memory_order_release, atomics::memory_order_relaxed )) { + if ( m_pHead.compare_exchange_weak( pHead, pFirstNodePrev, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { // dequeue success break; } diff --git a/cds/intrusive/segmented_queue.h b/cds/intrusive/segmented_queue.h index b3f244c4..a84948a9 100644 --- a/cds/intrusive/segmented_queue.h +++ b/cds/intrusive/segmented_queue.h @@ -411,21 +411,12 @@ namespace cds { namespace intrusive { segment * allocate_segment() { - // TSan: release barrier will be issued when the segment will link to the list of segments - CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; - segment * p = segment_allocator().NewBlock( sizeof(segment) + sizeof(cell) * m_nQuasiFactor, - quasi_factor() ); - CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; - return p; + return segment_allocator().NewBlock( sizeof(segment) + sizeof(cell) * m_nQuasiFactor, quasi_factor() ); } static void free_segment( segment * pSegment ) { - // TSan: deallocating is called inside SMR reclamation cycle - // so necessary barriers have been already issued - CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; segment_allocator().Delete( pSegment ); - CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; } static void retire_segment( segment * pSegment ) diff --git a/cds/intrusive/split_list_rcu.h b/cds/intrusive/split_list_rcu.h index 0ef8a765..3ba40492 100644 --- a/cds/intrusive/split_list_rcu.h +++ b/cds/intrusive/split_list_rcu.h @@ -255,16 +255,11 @@ namespace cds { namespace intrusive { dummy_node_type * alloc_dummy_node( size_t nHash ) { m_Stat.onHeadNodeAllocated(); - CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; - dummy_node_type * p = dummy_node_allocator().New( nHash ); - CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; - return p; + return dummy_node_allocator().New( nHash ); } void free_dummy_node( dummy_node_type * p ) { - CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; dummy_node_allocator().Delete( p ); - CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; m_Stat.onHeadNodeFreed(); } diff --git a/cds/intrusive/treiber_stack.h b/cds/intrusive/treiber_stack.h index b2347f45..74ca175c 100644 --- a/cds/intrusive/treiber_stack.h +++ b/cds/intrusive/treiber_stack.h @@ -77,8 +77,9 @@ namespace cds { namespace intrusive { operation() : pVal( nullptr ) - , nStatus(0) - {} + { + nStatus.store( 0 /*op_free*/, atomics::memory_order_release ); + } }; //@endcond @@ -329,8 +330,8 @@ namespace cds { namespace intrusive { /// Elimination back-off data struct elimination_data { - elimination_random_engine randEngine; ///< random engine - collision_array collisions; ///< collision array + mutable elimination_random_engine randEngine; ///< random engine + collision_array collisions; ///< collision array elimination_data() { @@ -351,6 +352,18 @@ namespace cds { namespace intrusive { typedef std::unique_lock< elimination_lock_type > slot_scoped_lock; + template + typename std::enable_if< Exp2, size_t >::type slot_index() const + { + return m_Elimination.randEngine() & (m_Elimination.collisions.capacity() - 1); + } + + template + typename std::enable_if< !Exp2, size_t >::type slot_index() const + { + return m_Elimination.randEngine() % m_Elimination.collisions.capacity(); + } + public: elimination_backoff() { @@ -377,11 +390,11 @@ namespace cds { namespace intrusive { bool backoff( operation_desc& op, Stat& stat ) { elimination_backoff_type bkoff; - op.nStatus.store( op_busy, atomics::memory_order_relaxed ); + op.nStatus.store( op_busy, atomics::memory_order_release ); elimination_rec * myRec = cds::algo::elimination::init_record( op ); - collision_array_record& slot = m_Elimination.collisions[m_Elimination.randEngine() % m_Elimination.collisions.capacity()]; + collision_array_record& slot = m_Elimination.collisions[ slot_index() ]; { slot.lock.lock(); elimination_rec * himRec = slot.pRec; @@ -394,9 +407,9 @@ namespace cds { namespace intrusive { else op.pVal = himOp->pVal; slot.pRec = nullptr; + himOp->nStatus.store( op_collided, atomics::memory_order_release ); slot.lock.unlock(); - himOp->nStatus.store( op_collided, atomics::memory_order_release ); cds::algo::elimination::clear_record(); stat.onActiveCollision( op.idOp ); return true; diff --git a/cds/memory/michael/allocator.h b/cds/memory/michael/allocator.h index d82d10cd..c4336d7a 100644 --- a/cds/memory/michael/allocator.h +++ b/cds/memory/michael/allocator.h @@ -60,12 +60,17 @@ namespace michael { /// Allocates memory block of \p nSize bytes (\p malloc wrapper) static void * alloc( size_t nSize ) { - return ::malloc( nSize ); + CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; + void * p = ::malloc( nSize ); + CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; + return p; } /// Returning memory block to the system (\p free wrapper) static void free( void * p ) { + CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; ::free( p ); + CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; } }; @@ -1221,8 +1226,10 @@ namespace michael { newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire); assert( oldAnchor.avail < pDesc->nCapacity ); + CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN; pAddr = pDesc->pSB + oldAnchor.avail * (unsigned long long) pDesc->nBlockSize; newAnchor.avail = reinterpret_cast( pAddr )->nNextFree; + CDS_TSAN_ANNOTATE_IGNORE_READS_END; newAnchor.tag += 1; if ( oldActive.credits() == 0 ) { @@ -1303,8 +1310,10 @@ namespace michael { newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire); assert( oldAnchor.avail < pDesc->nCapacity ); + CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN; pAddr = pDesc->pSB + oldAnchor.avail * pDesc->nBlockSize; newAnchor.avail = reinterpret_cast( pAddr )->nNextFree; + CDS_TSAN_ANNOTATE_IGNORE_READS_END; ++newAnchor.tag; } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed) ); @@ -1341,11 +1350,13 @@ namespace michael { byte * pEnd = pDesc->pSB + pDesc->nCapacity * pDesc->nBlockSize; unsigned int nNext = 0; const unsigned int nBlockSize = pDesc->nBlockSize; + CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; for ( byte * p = pDesc->pSB; p < pEnd; p += nBlockSize ) { reinterpret_cast( p )->set( pDesc, 0 ); reinterpret_cast( p )->nNextFree = ++nNext; } reinterpret_cast( pEnd - nBlockSize )->nNextFree = 0; + CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; active_tag newActive; newActive.set( pDesc, ( (pDesc->nCapacity - 1 < active_tag::c_nMaxCredits) ? pDesc->nCapacity - 1 : active_tag::c_nMaxCredits ) - 1 ); diff --git a/cds/opt/buffer.h b/cds/opt/buffer.h index b081f93e..9dca22ff 100644 --- a/cds/opt/buffer.h +++ b/cds/opt/buffer.h @@ -52,8 +52,8 @@ namespace cds { namespace opt { { public: typedef T value_type ; ///< value type - static const size_t c_nCapacity = Capacity ; ///< Capacity - static const bool c_bExp2 = Exp2; ///< \p Exp2 flag + static CDS_CONSTEXPR const size_t c_nCapacity = Capacity ; ///< Capacity + static CDS_CONSTEXPR const bool c_bExp2 = Exp2; ///< \p Exp2 flag /// Rebind buffer for other template parameters template diff --git a/tests/unit/pqueue/push_pop.cpp b/tests/unit/pqueue/push_pop.cpp index d53e8596..7a1f47cf 100644 --- a/tests/unit/pqueue/push_pop.cpp +++ b/tests/unit/pqueue/push_pop.cpp @@ -137,14 +137,14 @@ namespace pqueue { } }; - size_t m_nPusherCount; + atomics::atomic m_nPusherCount; void end_pusher() { - --m_nPusherCount; + m_nPusherCount.fetch_sub( 1, atomics::memory_order_relaxed ); } bool pushing() const { - return m_nPusherCount != 0; + return m_nPusherCount.load( atomics::memory_order_relaxed ) != 0; } protected: @@ -179,7 +179,7 @@ namespace pqueue { pool.add( new Popper( pool, testQueue ), s_nPopThreadCount ); - m_nPusherCount = s_nPushThreadCount; + m_nPusherCount.store( s_nPushThreadCount, atomics::memory_order_release ); CPPUNIT_MSG( " push thread count=" << s_nPushThreadCount << " pop thread count=" << s_nPopThreadCount << ", item count=" << nThreadItemCount * s_nPushThreadCount << " ..." ); pool.run(); -- 2.34.1