X-Git-Url: http://plrg.eecs.uci.edu/git/?p=libcds.git;a=blobdiff_plain;f=src%2Fdhp.cpp;h=ac8ec62f56f3de94fb91a51fde63b31bae7c2527;hp=53a7bf08e79c3af3cebf42ef7fe5542e8fb0e84d;hb=bc277979f85299cd681e5a413d63f0c7f1d67beb;hpb=596f57eb98fbf4b3717c749dc640b77ce70169f7 diff --git a/src/dhp.cpp b/src/dhp.cpp index 53a7bf08..ac8ec62f 100644 --- a/src/dhp.cpp +++ b/src/dhp.cpp @@ -76,6 +76,7 @@ namespace cds { namespace gc { namespace dhp { } }; + stat s_postmortem_stat; } // namespace /*static*/ CDS_EXPORT_API smr* smr::instance_ = nullptr; @@ -99,6 +100,8 @@ namespace cds { namespace gc { namespace dhp { // allocate new block gb = new( s_alloc_memory( sizeof( guard_block ) + sizeof( guard ) * defaults::c_extended_guard_block_size )) guard_block; new ( gb->first() ) guard[defaults::c_extended_guard_block_size]; + + CDS_HPSTAT( block_allocated_.fetch_add( 1, atomics::memory_order_relaxed )); } // links guards in the block @@ -131,6 +134,7 @@ namespace cds { namespace gc { namespace dhp { // allocate new block rb = new( s_alloc_memory( sizeof( retired_block ) + sizeof( retired_ptr ) * retired_block::c_capacity )) retired_block; new ( rb->first()) retired_ptr[retired_block::c_capacity]; + CDS_HPSTAT( block_allocated_.fetch_add( 1, atomics::memory_order_relaxed ) ); } rb->next_ = nullptr; @@ -145,6 +149,8 @@ namespace cds { namespace gc { namespace dhp { thread_record( guard* guards, size_t guard_count ) : thread_data( guards, guard_count ) + , m_pNextNode( nullptr ) + , m_idOwner( cds::OS::c_NullThreadId ) , m_bFree( false ) {} }; @@ -187,37 +193,42 @@ namespace cds { namespace gc { namespace dhp { } CDS_EXPORT_API smr::smr( size_t nInitialHazardPtrCount ) - : thread_list_( nullptr ) - , initial_hazard_count_( nInitialHazardPtrCount < 4 ? 16 : nInitialHazardPtrCount ) + : initial_hazard_count_( nInitialHazardPtrCount < 4 ? 16 : nInitialHazardPtrCount ) , last_plist_size_( initial_hazard_count_ * 64 ) - {} + { + thread_list_.store( nullptr, atomics::memory_order_release ); + } CDS_EXPORT_API smr::~smr() { CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; ) CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::get_current_thread_id(); ) + CDS_HPSTAT( statistics( s_postmortem_stat ) ); + thread_record* pHead = thread_list_.load( atomics::memory_order_relaxed ); - thread_list_.store( nullptr, atomics::memory_order_relaxed ); + thread_list_.store( nullptr, atomics::memory_order_release ); thread_record* pNext = nullptr; for ( thread_record* hprec = pHead; hprec; hprec = pNext ) { assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId - || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId - || !cds::OS::is_thread_alive( hprec->m_idOwner.load( atomics::memory_order_relaxed ) ) - ); + || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId ); retired_array& retired = hprec->retired_; // delete retired data for ( retired_block* block = retired.list_head_; block && block != retired.current_block_; block = block->next_ ) { - for ( retired_ptr* p = block->first(); p != block->last(); ++p ) + for ( retired_ptr* p = block->first(); p != block->last(); ++p ) { p->free(); + CDS_HPSTAT( ++s_postmortem_stat.free_count ); + } } if ( retired.current_block_ ) { - for ( retired_ptr* p = retired.current_block_->first(); p != retired.current_cell_; ++p ) + for ( retired_ptr* p = retired.current_block_->first(); p != retired.current_cell_; ++p ) { p->free(); + CDS_HPSTAT( ++s_postmortem_stat.free_count ); + } } hprec->retired_.fini(); hprec->hazards_.clear(); @@ -296,7 +307,7 @@ namespace cds { namespace gc { namespace dhp { const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id(); // First try to reuse a free (non-active) DHP record - for ( hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed ) ) { + for ( hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_acquire ) ) { cds::OS::ThreadId thId = nullThreadId; if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_relaxed, atomics::memory_order_relaxed ) ) continue; @@ -310,9 +321,9 @@ namespace cds { namespace gc { namespace dhp { hprec = create_thread_data(); hprec->m_idOwner.store( curThreadId, atomics::memory_order_relaxed ); - thread_record* pOldHead = thread_list_.load( atomics::memory_order_relaxed ); + thread_record* pOldHead = thread_list_.load( atomics::memory_order_acquire ); do { - hprec->m_pNextNode.store( pOldHead, atomics::memory_order_relaxed ); + hprec->m_pNextNode.store( pOldHead, atomics::memory_order_release ); } while ( !thread_list_.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_acquire ) ); } @@ -372,7 +383,7 @@ namespace cds { namespace gc { namespace dhp { for ( retired_ptr* p = block->first(), *end = p + block_size; p != end; ++p ) { if ( cds_unlikely( std::binary_search( hp_begin, hp_end, p->m_p ))) - stg.safe_push( p ); + stg.repush( p ); else { p->free(); ++count; @@ -388,6 +399,8 @@ namespace cds { namespace gc { namespace dhp { { thread_record* pRec = static_cast( pThreadRec ); + CDS_HPSTAT( ++pRec->scan_call_count_ ); + hp_vector plist; size_t plist_size = last_plist_size_.load( std::memory_order_relaxed ); plist.reserve( plist_size ); @@ -398,8 +411,12 @@ namespace cds { namespace gc { namespace dhp { if ( pNode->m_idOwner.load( std::memory_order_relaxed ) != cds::OS::c_NullThreadId ) { copy_hazards( plist, pNode->hazards_.array_, pNode->hazards_.initial_capacity_ ); - for ( guard_block* block = pNode->hazards_.extended_list_; block; block = block->next_ ) + for ( guard_block* block = pNode->hazards_.extended_list_.load( atomics::memory_order_acquire ); + block; + block = block->next_block_.load( atomics::memory_order_acquire ) ) + { copy_hazards( plist, block->first(), defaults::c_extended_guard_block_size ); + } } pNode = pNode->m_pNextNode.load( atomics::memory_order_relaxed ); @@ -414,6 +431,7 @@ namespace cds { namespace gc { namespace dhp { // Stage 2: Search plist size_t free_count = 0; + size_t retired_count = 0; retired_block* last_block = pRec->retired_.current_block_; retired_ptr* last_block_cell = pRec->retired_.current_cell_; @@ -424,28 +442,36 @@ namespace cds { namespace gc { namespace dhp { bool const end_block = block == last_block; size_t const size = end_block ? last_block_cell - block->first() : retired_block::c_capacity; + retired_count += retired_block::c_capacity; free_count += retire_data( plist, pRec->retired_, block, size ); if ( end_block ) break; } + CDS_HPSTAT( pRec->free_call_count_ += free_count ); // If the count of freed elements is too small, increase retired array - if ( free_count == 0 && last_block == pRec->retired_.list_tail_ && last_block_cell == last_block->last() ) + if ( free_count < retired_count / 4 && last_block == pRec->retired_.list_tail_ && last_block_cell == last_block->last() ) pRec->retired_.extend(); } CDS_EXPORT_API void smr::help_scan( thread_data* pThis ) { assert( static_cast( pThis )->m_idOwner.load( atomics::memory_order_relaxed ) == cds::OS::get_current_thread_id() ); + CDS_HPSTAT( ++pThis->help_scan_call_count_ ); const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id(); for ( thread_record* hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed ) ) { + if ( hprec == static_cast( pThis )) + continue; + // If m_bFree == true then hprec->retired_ is empty - we don't need to see it if ( hprec->m_bFree.load( atomics::memory_order_acquire ) ) { + CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN; assert( hprec->retired_.empty() ); + CDS_TSAN_ANNOTATE_IGNORE_READS_END; continue; } @@ -453,7 +479,7 @@ namespace cds { namespace gc { namespace dhp { // Several threads may work concurrently so we use atomic technique { cds::OS::ThreadId curOwner = hprec->m_idOwner.load( atomics::memory_order_relaxed ); - if ( curOwner == nullThreadId || !cds::OS::is_thread_alive( curOwner ) ) { + if ( curOwner == nullThreadId ) { if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed ) ) continue; } @@ -462,7 +488,7 @@ namespace cds { namespace gc { namespace dhp { } // We own the thread record successfully. Now, we can see whether it has retired pointers. - // If it has ones then we move to pThis that is private for current thread. + // If it has ones then we move them to pThis that is private for current thread. retired_array& src = hprec->retired_; retired_array& dest = pThis->retired_; @@ -485,4 +511,37 @@ namespace cds { namespace gc { namespace dhp { scan( pThis ); } + CDS_EXPORT_API void smr::statistics( stat& st ) + { + st.clear(); +# ifdef CDS_ENABLE_HPSTAT + for ( thread_record* hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed ) ) + { + CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN; + ++st.thread_rec_count; + st.guard_allocated += hprec->hazards_.alloc_guard_count_; + st.guard_freed += hprec->hazards_.free_guard_count_; + st.hp_extend_count += hprec->hazards_.extend_call_count_; + st.retired_count += hprec->retired_.retire_call_count_; + st.retired_extend_count += hprec->retired_.extend_call_count_; + st.free_count += hprec->free_call_count_; + st.scan_count += hprec->scan_call_count_; + st.help_scan_count += hprec->help_scan_call_count_; + CDS_TSAN_ANNOTATE_IGNORE_READS_END; + } + + CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN; + st.hp_block_count = hp_allocator_.block_allocated_.load( atomics::memory_order_relaxed ); + st.retired_block_count = retired_allocator_.block_allocated_.load( atomics::memory_order_relaxed ); + CDS_TSAN_ANNOTATE_IGNORE_READS_END; +# endif + } + + }}} // namespace cds::gc::dhp + +CDS_EXPORT_API /*static*/ cds::gc::DHP::stat const& cds::gc::DHP::postmortem_statistics() +{ + return cds::gc::dhp::s_postmortem_stat; +} +