template <typename Q>
node_type * New( unsigned int nHeight, Q const& v )
{
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
unsigned char * pMem = alloc_space( nHeight );
node_type * p = new( pMem )
node_type( nHeight, nHeight > 1 ? reinterpret_cast<node_tower_item *>(pMem + c_nNodeSize) : nullptr, v );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
return p;
}
template <typename... Args>
node_type * New( unsigned int nHeight, Args&&... args )
{
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
unsigned char * pMem = alloc_space( nHeight );
node_type * p = new( pMem )
node_type( nHeight, nHeight > 1 ? reinterpret_cast<node_tower_item *>(pMem + c_nNodeSize) : nullptr,
std::forward<Args>(args)... );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
return p;
}
unsigned int nHeight = p->height();
node_allocator_type().destroy( p );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
free_space( reinterpret_cast<unsigned char *>(p), nHeight );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
}
};
template <typename... S>
value_type * New( S const&... src )
{
-# if CDS_THREAD_SANITIZER_ENABLED
- if ( c_bStdAllocator ) {
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
- }
- value_type * pv = Construct( allocator_type::allocate(1), src... );
- if ( c_bStdAllocator ) {
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
- }
- return pv;
-# else
return Construct( allocator_type::allocate(1), src... );
-# endif
}
/// Analogue of <tt>operator new T( std::forward<Args>(args)... )</tt> (move semantics)
template <typename... Args>
value_type * MoveNew( Args&&... args )
{
-# if CDS_THREAD_SANITIZER_ENABLED
- if ( c_bStdAllocator ) {
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
- }
- value_type * pv = MoveConstruct( allocator_type::allocate(1), std::forward<Args>(args)... );
- if ( c_bStdAllocator ) {
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
- }
- return pv;
-# else
return MoveConstruct( allocator_type::allocate(1), std::forward<Args>(args)... );
-# endif
}
/// Analogue of operator new T[\p nCount ]
value_type * NewArray( size_t nCount )
{
-# if CDS_THREAD_SANITIZER_ENABLED
- if ( c_bStdAllocator ) {
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
- }
-# endif
value_type * p = allocator_type::allocate( nCount );
-# if CDS_THREAD_SANITIZER_ENABLED
- if ( c_bStdAllocator ) {
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
- }
-# endif
for ( size_t i = 0; i < nCount; ++i )
Construct( p + i );
return p;
template <typename S>
value_type * NewArray( size_t nCount, S const& src )
{
-# if CDS_THREAD_SANITIZER_ENABLED
- if ( c_bStdAllocator ) {
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
- }
-# endif
value_type * p = allocator_type::allocate( nCount );
-# if CDS_THREAD_SANITIZER_ENABLED
- if ( c_bStdAllocator ) {
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
- }
-# endif
for ( size_t i = 0; i < nCount; ++i )
Construct( p + i, src );
return p;
/// Analogue of operator delete
void Delete( value_type * p )
{
- // TSan false positive possible
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
allocator_type::destroy( p );
allocator_type::deallocate( p, 1 );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
}
/// Analogue of operator delete []
void Delete( value_type * p, size_t nCount )
{
- // TSan false positive possible
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
for ( size_t i = 0; i < nCount; ++i )
allocator_type::destroy( p + i );
allocator_type::deallocate( p, nCount );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
}
# if CDS_COMPILER == CDS_COMPILER_INTEL
template <typename... S>
value_type * Construct( void * p, S const&... src )
{
- // TSan false positive possible
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
value_type * pv = new( p ) value_type( src... );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
return pv;
}
template <typename... Args>
value_type * MoveConstruct( void * p, Args&&... args )
{
- // TSan false positive possible
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
value_type * pv = new( p ) value_type( std::forward<Args>(args)... );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
return pv;
}
size_t const nPtrSize = ( nByteSize + sizeof(void *) - 1 ) / sizeof(void *);
typedef typename allocator_type::template rebind< void * >::other void_allocator;
-# if CDS_THREAD_SANITIZER_ENABLED
- if ( c_bStdAllocator ) {
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
- }
-# endif
- void * p = void_allocator().allocate( nPtrSize );
-# if CDS_THREAD_SANITIZER_ENABLED
- if ( c_bStdAllocator ) {
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
- }
-# endif
- return p;
+ return void_allocator().allocate( nPtrSize );
}
//@endcond
};
template <typename T>
T protect( atomics::atomic<T> const& toGuard )
{
- T pCur = toGuard.load(atomics::memory_order_relaxed);
+ T pCur = toGuard.load(atomics::memory_order_acquire);
T pRet;
do {
pRet = assign( pCur );
template <typename T, class Func>
T protect( atomics::atomic<T> const& toGuard, Func f )
{
- T pCur = toGuard.load(atomics::memory_order_relaxed);
+ T pCur = toGuard.load(atomics::memory_order_acquire);
T pRet;
do {
pRet = pCur;
{
T pRet;
do {
- pRet = assign( nIndex, toGuard.load(atomics::memory_order_relaxed) );
- } while ( pRet != toGuard.load(atomics::memory_order_acquire));
+ pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire) );
+ } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
return pRet;
}
{
T pRet;
do {
- assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_relaxed) ));
- } while ( pRet != toGuard.load(atomics::memory_order_acquire));
+ assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire) ));
+ } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
return pRet;
}
template <typename T>
T protect( atomics::atomic<T> const& toGuard )
{
- T pCur = toGuard.load(atomics::memory_order_relaxed);
+ T pCur = toGuard.load(atomics::memory_order_acquire);
T pRet;
do {
pRet = assign( pCur );
template <typename T, class Func>
T protect( atomics::atomic<T> const& toGuard, Func f )
{
- T pCur = toGuard.load(atomics::memory_order_relaxed);
+ T pCur = toGuard.load(atomics::memory_order_acquire);
T pRet;
do {
pRet = pCur;
{
T pRet;
do {
- pRet = assign( nIndex, toGuard.load(atomics::memory_order_relaxed) );
- } while ( pRet != toGuard.load(atomics::memory_order_acquire));
+ pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire) );
+ } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
return pRet;
}
{
T pRet;
do {
- assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_relaxed) ));
- } while ( pRet != toGuard.load(atomics::memory_order_acquire));
+ assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire) ));
+ } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
return pRet;
}
{
splitlist_node_type * p = static_cast<splitlist_node_type *>( node_traits::to_node_ptr( v ));
if ( p->is_dummy() ) {
- CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( p );
dummy_node_disposer<gc, typename traits::allocator>()( p );
}
else {
- CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( v );
native_disposer()( v );
}
}
struct clean_disposer {
void operator()( value_type * p )
{
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
michael_list::node_cleaner<gc, node_type, memory_model>()( node_traits::to_node_ptr( p ) );
disposer()( p );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
}
};
//@endcond
// Mark the node (logical deleting)
marked_node_ptr next(pos.pNext, 0);
- if ( pos.pCur->m_pNext.compare_exchange_strong( next, marked_node_ptr(pos.pNext, 1), memory_model::memory_order_release, atomics::memory_order_relaxed )) {
+ if ( pos.pCur->m_pNext.compare_exchange_strong( next, marked_node_ptr(pos.pNext, 1), memory_model::memory_order_acq_rel, atomics::memory_order_relaxed )) {
// physical deletion may be performed by search function if it detects that a node is logically deleted (marked)
// CAS may be successful here or in other thread that searching something
marked_node_ptr cur(pos.pCur);
pPrev = &refHead;
pNext = nullptr;
- pCur = pPrev->load(memory_model::memory_order_relaxed);
+ pCur = pPrev->load(memory_model::memory_order_acquire);
pos.guards.assign( position::guard_current_item, node_traits::to_value_ptr( pCur.ptr() ) );
- if ( pPrev->load(memory_model::memory_order_acquire) != pCur.ptr() )
+ if ( pPrev->load(memory_model::memory_order_relaxed) != pCur.ptr() )
goto try_again;
while ( true ) {
return false;
}
- pNext = pCur->m_pNext.load(memory_model::memory_order_relaxed);
- CDS_TSAN_ANNOTATE_HAPPENS_AFTER( pNext.ptr() );
+ pNext = pCur->m_pNext.load(memory_model::memory_order_acquire);
pos.guards.assign( position::guard_next_item, node_traits::to_value_ptr( pNext.ptr() ));
- if ( pCur->m_pNext.load(memory_model::memory_order_acquire).all() != pNext.all() ) {
+ if ( pCur->m_pNext.load(memory_model::memory_order_relaxed).all() != pNext.all() ) {
bkoff();
goto try_again;
}
- if ( pPrev->load(memory_model::memory_order_acquire).all() != pCur.ptr() ) {
+ if ( pPrev->load(memory_model::memory_order_relaxed).all() != pCur.ptr() ) {
bkoff();
goto try_again;
}
return insert_at( pHead, *node_traits::to_value_ptr( pNode ) );
}
- bool insert_at( node_type * pHead, value_type& val, bool bLock = true )
+ bool insert_at( node_type * pHead, value_type& val )
{
+ rcu_lock l;
+ return insert_at_locked( pHead, val );
+ }
+
+ bool insert_at_locked( node_type * pHead, value_type& val )
+ {
+ // RCU lock should be locked!!!
+ assert( gc::is_locked() );
+
link_checker::is_empty( node_traits::to_node_ptr( val ) );
position pos;
key_comparator cmp;
- rcu_lock l( bLock );
while ( true ) {
search( pHead, val, pos );
{
}
}
- iterator insert_at_( node_type * pHead, value_type& val, bool bLock = true )
+ iterator insert_at_( node_type * pHead, value_type& val )
{
- rcu_lock l( bLock );
- if ( insert_at( pHead, val, false ))
+ rcu_lock l;
+ if ( insert_at_locked( pHead, val ))
return iterator( node_traits::to_node_ptr( val ));
return end();
}
template <typename Func>
- std::pair<iterator, bool> ensure_at_( node_type * pHead, value_type& val, Func func, bool bLock = true )
+ std::pair<iterator, bool> ensure_at_( node_type * pHead, value_type& val, Func func )
+ {
+ rcu_lock l;
+ return ensure_at_locked( pHead, val, func );
+ }
+
+ template <typename Func>
+ std::pair<iterator, bool> ensure_at_locked( node_type * pHead, value_type& val, Func func )
{
+ // RCU lock should be locked!!!
+ assert( gc::is_locked() );
+
position pos;
key_comparator cmp;
- rcu_lock l( bLock );
while ( true ) {
search( pHead, val, pos );
{
}
template <typename Func>
- std::pair<bool, bool> ensure_at( node_type * pHead, value_type& val, Func func, bool bLock = true )
+ std::pair<bool, bool> ensure_at( node_type * pHead, value_type& val, Func func )
{
- rcu_lock l( bLock );
- std::pair<iterator, bool> ret = ensure_at_( pHead, val, func, false );
+ rcu_lock l;
+ std::pair<iterator, bool> ret = ensure_at_locked( pHead, val, func );
return std::make_pair( ret.first != end(), ret.second );
}
}
template <typename Q, typename Compare, typename Func>
- bool find_at( node_type * pHead, Q& val, Compare cmp, Func f, bool bLock = true ) const
+ bool find_at( node_type * pHead, Q& val, Compare cmp, Func f ) const
{
position pos;
- rcu_lock l( bLock );
+ rcu_lock l;
search( pHead, val, pos, cmp );
if ( pos.pCur != &m_Tail ) {
std::unique_lock< typename node_type::lock_type> al( pos.pCur->m_Lock );
dummy_node_type * pHead = get_bucket( nHash );
assert( pHead != nullptr );
- // TSan false positive: hash is read-only, will be ordered when we insert a node
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
if ( m_List.insert_at( pHead, val )) {
inc_item_count();
dummy_node_type * pHead = get_bucket( nHash );
assert( pHead != nullptr );
- // TSan false positive: hash is read-only, will be ordered when we insert a node
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
if ( m_List.insert_at( pHead, val, f )) {
inc_item_count();
dummy_node_type * pHead = get_bucket( nHash );
assert( pHead != nullptr );
- // TSan false positive: hash is read-only, will be ordered when we insert a node
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
std::pair<bool, bool> bRet = m_List.ensure_at( pHead, val, func );
if ( bRet.first && bRet.second ) {
/// Allocates memory block of \p nSize bytes (\p malloc wrapper)
static void * alloc( size_t nSize )
{
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
void * p = ::malloc( nSize );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
return p;
}
/// Returning memory block to the system (\p free wrapper)
static void free( void * p )
{
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
::free( p );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
}
};
newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
assert( oldAnchor.avail < pDesc->nCapacity );
- CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
pAddr = pDesc->pSB + oldAnchor.avail * (unsigned long long) pDesc->nBlockSize;
newAnchor.avail = reinterpret_cast<free_block_header *>( pAddr )->nNextFree;
- CDS_TSAN_ANNOTATE_IGNORE_READS_END;
newAnchor.tag += 1;
if ( oldActive.credits() == 0 ) {
newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
assert( oldAnchor.avail < pDesc->nCapacity );
- CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
pAddr = pDesc->pSB + oldAnchor.avail * pDesc->nBlockSize;
newAnchor.avail = reinterpret_cast<free_block_header *>( pAddr )->nNextFree;
- CDS_TSAN_ANNOTATE_IGNORE_READS_END;
++newAnchor.tag;
} while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed) );
byte * pEnd = pDesc->pSB + pDesc->nCapacity * pDesc->nBlockSize;
unsigned int nNext = 0;
const unsigned int nBlockSize = pDesc->nBlockSize;
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
for ( byte * p = pDesc->pSB; p < pEnd; p += nBlockSize ) {
reinterpret_cast<block_header *>( p )->set( pDesc, 0 );
reinterpret_cast<free_block_header *>( p )->nNextFree = ++nNext;
}
reinterpret_cast<free_block_header *>( pEnd - nBlockSize )->nNextFree = 0;
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
active_tag newActive;
newActive.set( pDesc, ( (pDesc->nCapacity - 1 < active_tag::c_nMaxCredits) ? pDesc->nCapacity - 1 : active_tag::c_nMaxCredits ) - 1 );
typedef ThreadGC thread_gc;
typedef typename thread_gc::rcu_tag rcu_tag;
- bool m_bLocked;
public:
- scoped_lock(bool bLock = true)
- : m_bLocked( bLock )
+ scoped_lock()
{
- if ( bLock )
- thread_gc::access_lock();
+ thread_gc::access_lock();
}
~scoped_lock()
{
- if ( m_bLocked )
- thread_gc::access_unlock();
+ thread_gc::access_unlock();
}
};
//@endcond
epoch_retired_ptr p;
while ( m_Buffer.pop( p )) {
if ( p.m_nEpoch <= nEpoch ) {
- CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
p.free();
- CDS_TSAN_ANNOTATE_IGNORE_RW_END;
}
else {
push_buffer( p );
if ( !bPushed || m_Buffer.size() >= capacity() ) {
synchronize();
if ( !bPushed ) {
- CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
ep.free();
- CDS_TSAN_ANNOTATE_IGNORE_RW_END;
}
return true;
}
{
synchronize();
if ( p.m_p ) {
- CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
+ // TSan ignores atomic_thread_fence in synchronize()
+ //CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( p.m_p );
p.free();
- CDS_TSAN_ANNOTATE_IGNORE_RW_END;
}
}
while ( itFirst != itLast ) {
retired_ptr p( *itFirst );
++itFirst;
- if ( p.m_p )
+ if ( p.m_p ) {
+ // TSan ignores atomic_thread_fence in synchronize()
+ //CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( p.m_p );
p.free();
+ }
}
}
}
if ( !bPushed || m_Buffer.size() >= capacity() ) {
synchronize();
if ( !bPushed ) {
- CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
p.free();
- CDS_TSAN_ANNOTATE_IGNORE_RW_END;
}
return true;
}
epoch_retired_ptr p;
while ( m_Buffer.pop( p )) {
if ( p.m_nEpoch <= nEpoch ) {
- CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
p.free();
- CDS_TSAN_ANNOTATE_IGNORE_RW_END;
}
else {
push_buffer( p );
if ( !bPushed || m_Buffer.size() >= capacity() ) {
synchronize();
if ( !bPushed ) {
- CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
ep.free();
- CDS_TSAN_ANNOTATE_IGNORE_RW_END;
}
return true;
}
if ( !bPushed || m_Buffer.size() >= capacity() ) {
synchronize();
if ( !bPushed ) {
- CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
p.free();
- CDS_TSAN_ANNOTATE_IGNORE_RW_END;
}
return true;
}
epoch_retired_ptr p;
while ( pBuf->pop( p ) ) {
if ( p.m_nEpoch <= nCurEpoch ) {
- CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
p.free();
- CDS_TSAN_ANNOTATE_IGNORE_RW_END;
}
else {
pBuf->push( p );
ThreadPool::~ThreadPool()
{
- CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
-
delete m_pBarrierStart;
delete m_pBarrierDone;
for ( size_t i = 0; i < m_arrThreads.size(); ++i )
delete m_arrThreads[i];
m_arrThreads.resize( 0 );
-
- CDS_TSAN_ANNOTATE_IGNORE_RW_END;
}
void ThreadPool::add( TestThread * pThread, size_t nCount )
T * allocate( size_t n, void const * pHint = nullptr )
{
internal_node_counter::onAlloc();
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
T * p = base_class::allocate( n, pHint );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
return p;
}
while ( true ) {
typename Queue::value_type * p = m_Queue.pop();
if ( p ) {
- CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
p->nConsumer = m_nThreadNo;
++m_nPopped;
if ( p->nWriterNo < nTotalWriters )
m_WriterData[ p->nWriterNo ].push_back( p->nNo );
else
++m_nBadWriter;
- CDS_TSAN_ANNOTATE_IGNORE_RW_END;
}
else {
++m_nPopEmpty;
while ( !(getTest().m_nWorkingProducers.load(atomics::memory_order_acquire) == 0 && m_Stack.empty()) ) {
typename Stack::value_type * p = m_Stack.pop();
if ( p ) {
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
p->nConsumer = m_nThreadNo;
++m_nPopCount;
if ( p->nNo < sizeof(m_arrPop)/sizeof(m_arrPop[0]) )
++m_arrPop[p->nNo];
else
++m_nDirtyPop;
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
}
else
++m_nPopEmpty;
--- /dev/null
+
+
+# Boost race [?] in test framework. Do not affect to libcds
+race:CppUnitMini::ThreadPool::~ThreadPool