X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=src%2Fdhp.cpp;h=27014fc84b2946d31a6eb511a09937f36d240c9f;hb=c77d4e592d1b528896b0378378a2afc3d80c8328;hp=462de952549eeb029938fd910ed63ced9e2b9c2b;hpb=c599f1b83462dccfe023884a7b4fd05fd11b85a0;p=libcds.git diff --git a/src/dhp.cpp b/src/dhp.cpp index 462de952..27014fc8 100644 --- a/src/dhp.cpp +++ b/src/dhp.cpp @@ -99,7 +99,7 @@ namespace cds { namespace gc { namespace dhp { else { // allocate new block gb = new( s_alloc_memory( sizeof( guard_block ) + sizeof( guard ) * defaults::c_extended_guard_block_size )) guard_block; - new ( gb->first() ) guard[defaults::c_extended_guard_block_size]; + new ( gb->first()) guard[defaults::c_extended_guard_block_size]; CDS_HPSTAT( block_allocated_.fetch_add( 1, atomics::memory_order_relaxed )); } @@ -118,7 +118,7 @@ namespace cds { namespace gc { namespace dhp { CDS_EXPORT_API retired_allocator::~retired_allocator() { - while ( retired_block* rb = static_cast( free_list_.get() ) ) { + while ( retired_block* rb = static_cast( free_list_.get()) ) { rb->~retired_block(); s_free_memory( rb ); } @@ -134,7 +134,7 @@ namespace cds { namespace gc { namespace dhp { // allocate new block rb = new( s_alloc_memory( sizeof( retired_block ) + sizeof( retired_ptr ) * retired_block::c_capacity )) retired_block; new ( rb->first()) retired_ptr[retired_block::c_capacity]; - CDS_HPSTAT( block_allocated_.fetch_add( 1, atomics::memory_order_relaxed ) ); + CDS_HPSTAT( block_allocated_.fetch_add( 1, atomics::memory_order_relaxed )); } rb->next_ = nullptr; @@ -149,6 +149,8 @@ namespace cds { namespace gc { namespace dhp { thread_record( guard* guards, size_t guard_count ) : thread_data( guards, guard_count ) + , m_pNextNode( nullptr ) + , m_idOwner( cds::OS::c_NullThreadId ) , m_bFree( false ) {} }; @@ -191,28 +193,27 @@ namespace cds { namespace gc { namespace dhp { } CDS_EXPORT_API smr::smr( size_t nInitialHazardPtrCount ) - : thread_list_( nullptr ) - , initial_hazard_count_( nInitialHazardPtrCount < 4 ? 16 : nInitialHazardPtrCount ) + : initial_hazard_count_( nInitialHazardPtrCount < 4 ? 16 : nInitialHazardPtrCount ) , last_plist_size_( initial_hazard_count_ * 64 ) - {} + { + thread_list_.store( nullptr, atomics::memory_order_release ); + } CDS_EXPORT_API smr::~smr() { CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; ) CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::get_current_thread_id(); ) - CDS_HPSTAT( statistics( s_postmortem_stat ) ); + CDS_HPSTAT( statistics( s_postmortem_stat )); thread_record* pHead = thread_list_.load( atomics::memory_order_relaxed ); - thread_list_.store( nullptr, atomics::memory_order_relaxed ); + thread_list_.store( nullptr, atomics::memory_order_release ); thread_record* pNext = nullptr; for ( thread_record* hprec = pHead; hprec; hprec = pNext ) { assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId - || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId - || !cds::OS::is_thread_alive( hprec->m_idOwner.load( atomics::memory_order_relaxed ) ) - ); + || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId ); retired_array& retired = hprec->retired_; @@ -249,7 +250,7 @@ namespace cds { namespace gc { namespace dhp { thread_data* rec = tls_; if ( rec ) { tls_ = nullptr; - instance().free_thread_data( static_cast( rec ) ); + instance().free_thread_data( static_cast( rec )); } } @@ -288,7 +289,7 @@ namespace cds { namespace gc { namespace dhp { char* mem = reinterpret_cast( s_alloc_memory( sizeof( thread_record ) + guard_array_size )); return new( mem ) thread_record( - reinterpret_cast( mem + sizeof( thread_record ) ), initial_hazard_count_ + reinterpret_cast( mem + sizeof( thread_record )), initial_hazard_count_ ); } @@ -306,24 +307,24 @@ namespace cds { namespace gc { namespace dhp { const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id(); // First try to reuse a free (non-active) DHP record - for ( hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed ) ) { + for ( hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_acquire )) { cds::OS::ThreadId thId = nullThreadId; - if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_relaxed, atomics::memory_order_relaxed ) ) + if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_relaxed, atomics::memory_order_relaxed )) continue; hprec->m_bFree.store( false, atomics::memory_order_release ); break; } - + if ( !hprec ) { // No HP records available for reuse // Allocate and push a new HP record hprec = create_thread_data(); hprec->m_idOwner.store( curThreadId, atomics::memory_order_relaxed ); - thread_record* pOldHead = thread_list_.load( atomics::memory_order_relaxed ); + thread_record* pOldHead = thread_list_.load( atomics::memory_order_acquire ); do { - hprec->m_pNextNode.store( pOldHead, atomics::memory_order_relaxed ); - } while ( !thread_list_.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_acquire ) ); + hprec->m_pNextNode.store( pOldHead, atomics::memory_order_release ); + } while ( !thread_list_.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_acquire )); } hprec->hazards_.init(); @@ -341,7 +342,7 @@ namespace cds { namespace gc { namespace dhp { scan( pRec ); help_scan( pRec ); - if ( pRec->retired_.empty() ) { + if ( pRec->retired_.empty()) { pRec->retired_.fini(); pRec->m_bFree.store( true, std::memory_order_release ); } @@ -410,8 +411,12 @@ namespace cds { namespace gc { namespace dhp { if ( pNode->m_idOwner.load( std::memory_order_relaxed ) != cds::OS::c_NullThreadId ) { copy_hazards( plist, pNode->hazards_.array_, pNode->hazards_.initial_capacity_ ); - for ( guard_block* block = pNode->hazards_.extended_list_; block; block = block->next_ ) + for ( guard_block* block = pNode->hazards_.extended_list_.load( atomics::memory_order_acquire ); + block; + block = block->next_block_.load( atomics::memory_order_acquire )) + { copy_hazards( plist, block->first(), defaults::c_extended_guard_block_size ); + } } pNode = pNode->m_pNextNode.load( atomics::memory_order_relaxed ); @@ -422,7 +427,7 @@ namespace cds { namespace gc { namespace dhp { last_plist_size_.compare_exchange_weak( plist_size, plist.size(), std::memory_order_relaxed, std::memory_order_relaxed ); // Sort plist to simplify search in - std::sort( plist.begin(), plist.end() ); + std::sort( plist.begin(), plist.end()); // Stage 2: Search plist size_t free_count = 0; @@ -446,22 +451,27 @@ namespace cds { namespace gc { namespace dhp { CDS_HPSTAT( pRec->free_call_count_ += free_count ); // If the count of freed elements is too small, increase retired array - if ( free_count < retired_count / 4 && last_block == pRec->retired_.list_tail_ && last_block_cell == last_block->last() ) + if ( free_count < retired_count / 4 && last_block == pRec->retired_.list_tail_ && last_block_cell == last_block->last()) pRec->retired_.extend(); } CDS_EXPORT_API void smr::help_scan( thread_data* pThis ) { - assert( static_cast( pThis )->m_idOwner.load( atomics::memory_order_relaxed ) == cds::OS::get_current_thread_id() ); + assert( static_cast( pThis )->m_idOwner.load( atomics::memory_order_relaxed ) == cds::OS::get_current_thread_id()); CDS_HPSTAT( ++pThis->help_scan_call_count_ ); const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id(); - for ( thread_record* hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed ) ) + for ( thread_record* hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed )) { + if ( hprec == static_cast( pThis )) + continue; + // If m_bFree == true then hprec->retired_ is empty - we don't need to see it - if ( hprec->m_bFree.load( atomics::memory_order_acquire ) ) { - assert( hprec->retired_.empty() ); + if ( hprec->m_bFree.load( atomics::memory_order_acquire )) { + CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN; + assert( hprec->retired_.empty()); + CDS_TSAN_ANNOTATE_IGNORE_READS_END; continue; } @@ -469,8 +479,8 @@ namespace cds { namespace gc { namespace dhp { // Several threads may work concurrently so we use atomic technique { cds::OS::ThreadId curOwner = hprec->m_idOwner.load( atomics::memory_order_relaxed ); - if ( curOwner == nullThreadId || !cds::OS::is_thread_alive( curOwner ) ) { - if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed ) ) + if ( curOwner == nullThreadId ) { + if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed )) continue; } else @@ -478,14 +488,14 @@ namespace cds { namespace gc { namespace dhp { } // We own the thread record successfully. Now, we can see whether it has retired pointers. - // If it has ones then we move to pThis that is private for current thread. + // If it has ones then we move them to pThis that is private for current thread. retired_array& src = hprec->retired_; retired_array& dest = pThis->retired_; for ( retired_block* block = src.list_head_; block; block = block->next_ ) { retired_ptr* last = block == src.current_block_ ? src.current_cell_ : block->last(); for ( retired_ptr* p = block->first(); p != last; ++p ) { - if ( !dest.push( *p ) ) + if ( !dest.push( *p )) scan( pThis ); } @@ -505,8 +515,9 @@ namespace cds { namespace gc { namespace dhp { { st.clear(); # ifdef CDS_ENABLE_HPSTAT - for ( thread_record* hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed ) ) + for ( thread_record* hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed )) { + CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN; ++st.thread_rec_count; st.guard_allocated += hprec->hazards_.alloc_guard_count_; st.guard_freed += hprec->hazards_.free_guard_count_; @@ -516,10 +527,13 @@ namespace cds { namespace gc { namespace dhp { st.free_count += hprec->free_call_count_; st.scan_count += hprec->scan_call_count_; st.help_scan_count += hprec->help_scan_call_count_; + CDS_TSAN_ANNOTATE_IGNORE_READS_END; } + CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN; st.hp_block_count = hp_allocator_.block_allocated_.load( atomics::memory_order_relaxed ); st.retired_block_count = retired_allocator_.block_allocated_.load( atomics::memory_order_relaxed ); + CDS_TSAN_ANNOTATE_IGNORE_READS_END; # endif }