X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=src%2Fhp_gc.cpp;h=eb667c2fec5eb9dc6e0bbfa0f0cbeebb106e9b7c;hb=88007bd60cc71a2d34e5d957b6a6c71850590e76;hp=4899dd4d748af930f680f02d470bf54af43e0e75;hpb=9b4289e2ef6efad125925b14ae6cdcc670ce4d67;p=libcds.git diff --git a/src/hp_gc.cpp b/src/hp_gc.cpp index 4899dd4d..eb667c2f 100644 --- a/src/hp_gc.cpp +++ b/src/hp_gc.cpp @@ -1,4 +1,32 @@ -//$$CDS-header$$ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ /* File: hzp_gc.cpp @@ -49,7 +77,7 @@ namespace cds { namespace gc { scan_type nScanType ) : m_pListHead( nullptr ) - ,m_bStatEnabled( true ) + ,m_bStatEnabled( false ) ,m_nHazardPointerCount( nHazardPtrCount == 0 ? c_nHazardPointerPerThread : nHazardPtrCount ) ,m_nMaxThreadCount( nMaxThreadCount == 0 ? c_nMaxThreadCount : nMaxThreadCount ) ,m_nMaxRetiredPtrCount( nMaxRetiredPtrCount > c_nMaxRetireNodeCount ? nMaxRetiredPtrCount : c_nMaxRetireNodeCount ) @@ -59,7 +87,7 @@ namespace cds { namespace gc { GarbageCollector::~GarbageCollector() { CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; ) - CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::getCurrentThreadId() ;) + CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::get_current_thread_id() ;) hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed ); m_pListHead.store( nullptr, atomics::memory_order_relaxed ); @@ -68,13 +96,13 @@ namespace cds { namespace gc { for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) { assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId - || !cds::OS::isThreadAlive( hprec->m_idOwner.load( atomics::memory_order_relaxed ) ) + || !cds::OS::is_thread_alive( hprec->m_idOwner.load( atomics::memory_order_relaxed )) ); details::retired_vector& vect = hprec->m_arrRetired; details::retired_vector::iterator itRetired = vect.begin(); details::retired_vector::iterator itRetiredEnd = vect.end(); while ( itRetired != itRetiredEnd ) { - DeletePtr( *itRetired ); + itRetired->free(); ++itRetired; } vect.clear(); @@ -86,35 +114,29 @@ namespace cds { namespace gc { inline GarbageCollector::hplist_node * GarbageCollector::NewHPRec() { - CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocNewHPRec ); + CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocNewHPRec ) return new hplist_node( *this ); } inline void GarbageCollector::DeleteHPRec( hplist_node * pNode ) { - CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeleteHPRec ); + CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeleteHPRec ) assert( pNode->m_arrRetired.size() == 0 ); delete pNode; } - inline void GarbageCollector::DeletePtr( details::retired_ptr& p ) - { - CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeletedNode ); - p.free(); - } - details::hp_record * GarbageCollector::alloc_hp_record() { - CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec ); + CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec ) hplist_node * hprec; const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; - const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId(); + const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id(); // First try to reuse a retired (non-active) HP record for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) { cds::OS::ThreadId thId = nullThreadId; - if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) ) + if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed )) continue; hprec->m_bFree.store( false, atomics::memory_order_release ); return hprec; @@ -123,10 +145,8 @@ namespace cds { namespace gc { // No HP records available for reuse // Allocate and push a new HP record hprec = NewHPRec(); - hprec->m_idOwner.store( curThreadId, atomics::memory_order_relaxed ); - hprec->m_bFree.store( false, atomics::memory_order_relaxed ); - - atomics::atomic_thread_fence( atomics::memory_order_release ); + hprec->m_idOwner.store( curThreadId, atomics::memory_order_release ); + hprec->m_bFree.store( false, atomics::memory_order_release ); hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire ); do { @@ -139,10 +159,11 @@ namespace cds { namespace gc { void GarbageCollector::free_hp_record( details::hp_record * pRec ) { assert( pRec != nullptr ); - CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec ); + CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec ) pRec->clear(); Scan( pRec ); + HelpScan( pRec ); hplist_node * pNode = static_cast( pRec ); pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release ); } @@ -161,7 +182,7 @@ namespace cds { namespace gc { void GarbageCollector::classic_scan( details::hp_record * pRec ) { - CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount ); + CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount ) std::vector< void * > plist; plist.reserve( m_nMaxThreadCount * m_nHazardPointerCount ); @@ -173,7 +194,8 @@ namespace cds { namespace gc { while ( pNode ) { for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) { - void * hptr = pNode->m_hzp[i]; + pRec->sync(); + void * hptr = pNode->m_hzp[i].get(); if ( hptr ) plist.push_back( hptr ); } @@ -181,7 +203,7 @@ namespace cds { namespace gc { } // Sort plist to simplify search in - std::sort( plist.begin(), plist.end() ); + std::sort( plist.begin(), plist.end()); // Stage 2: Search plist details::retired_vector& arrRetired = pRec->m_arrRetired; @@ -189,25 +211,30 @@ namespace cds { namespace gc { details::retired_vector::iterator itRetired = arrRetired.begin(); details::retired_vector::iterator itRetiredEnd = arrRetired.end(); // arrRetired is not a std::vector! - // clear is just set up item counter to 0, the items is not destroying + // clear() is just set up item counter to 0, the items is not destroyed arrRetired.clear(); - std::vector< void * >::iterator itBegin = plist.begin(); - std::vector< void * >::iterator itEnd = plist.end(); - while ( itRetired != itRetiredEnd ) { - if ( std::binary_search( itBegin, itEnd, itRetired->m_p) ) { - CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeferredNode ); - arrRetired.push( *itRetired ); + { + std::vector< void * >::iterator itBegin = plist.begin(); + std::vector< void * >::iterator itEnd = plist.end(); + size_t nDeferredCount = 0; + while ( itRetired != itRetiredEnd ) { + if ( std::binary_search( itBegin, itEnd, itRetired->m_p )) { + arrRetired.push( *itRetired ); + ++nDeferredCount; + } + else + itRetired->free(); + ++itRetired; } - else - DeletePtr( *itRetired ); - ++itRetired; + CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferredCount ) + CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - arrRetired.begin()) - nDeferredCount ) } } void GarbageCollector::inplace_scan( details::hp_record * pRec ) { - CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount ); + CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount ) // In-place scan algo uses LSB of retired ptr as a mark for internal purposes. // It is correct if all retired pointers are ar least 2-byte aligned (LSB is zero). @@ -217,8 +244,8 @@ namespace cds { namespace gc { // LSB is used for marking pointers that cannot be deleted yet details::retired_vector::iterator itRetired = pRec->m_arrRetired.begin(); details::retired_vector::iterator itRetiredEnd = pRec->m_arrRetired.end(); - for ( details::retired_vector::iterator it = itRetired; it != itRetiredEnd; ++it ) { - if ( reinterpret_cast(it->m_p) & 1 ) { + for ( auto it = itRetired; it != itRetiredEnd; ++it ) { + if ( it->m_n & 1 ) { // found a pointer with LSB bit set - use classic_scan classic_scan( pRec ); return; @@ -228,62 +255,84 @@ namespace cds { namespace gc { // Sort retired pointer array std::sort( itRetired, itRetiredEnd, cds::gc::details::retired_ptr::less ); - // Search guarded pointers in retired array - - hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire); + // Check double free + /* + { + auto it = itRetired; + auto itPrev = it; + while ( ++it != itRetiredEnd ) { + if ( it->m_p == itPrev->m_p ) + throw std::runtime_error( "Double free" ); + itPrev = it; + } + } + */ - while ( pNode ) { - for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) { - void * hptr = pNode->m_hzp[i]; - if ( hptr ) { - details::retired_ptr dummyRetired; - dummyRetired.m_p = hptr; - details::retired_vector::iterator it = std::lower_bound( itRetired, itRetiredEnd, dummyRetired, cds::gc::details::retired_ptr::less ); - if ( it != itRetiredEnd && it->m_p == hptr ) { - // Mark retired pointer as guarded - it->m_p = reinterpret_cast(reinterpret_cast(it->m_p ) | 1); + // Search guarded pointers in retired array + hplist_node * pNode = m_pListHead.load( atomics::memory_order_acquire ); + + { + details::retired_ptr dummyRetired; + while ( pNode ) { + if ( !pNode->m_bFree.load( atomics::memory_order_acquire )) { + for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) { + pRec->sync(); + void * hptr = pNode->m_hzp[i].get(); + if ( hptr ) { + dummyRetired.m_p = hptr; + details::retired_vector::iterator it = std::lower_bound( itRetired, itRetiredEnd, dummyRetired, cds::gc::details::retired_ptr::less ); + if ( it != itRetiredEnd && it->m_p == hptr ) { + // Mark retired pointer as guarded + it->m_n |= 1; + } + } } } + pNode = pNode->m_pNextNode; } - pNode = pNode->m_pNextNode; } // Move all marked pointers to head of array - details::retired_vector::iterator itInsert = itRetired; - for ( details::retired_vector::iterator it = itRetired; it != itRetiredEnd; ++it ) { - if ( reinterpret_cast(it->m_p) & 1 ) { - it->m_p = reinterpret_cast(reinterpret_cast(it->m_p ) & ~1); - *itInsert = *it; - ++itInsert; - CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeferredNode ); - } - else { - // Retired pointer may be freed - DeletePtr( *it ); + { + auto itInsert = itRetired; + for ( auto it = itRetired; it != itRetiredEnd; ++it ) { + if ( it->m_n & 1 ) { + it->m_n &= ~1; + if ( itInsert != it ) + *itInsert = *it; + ++itInsert; + } + else { + // Retired pointer may be freed + it->free(); + } } + const size_t nDeferred = itInsert - itRetired; + pRec->m_arrRetired.size( nDeferred ); + CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferred ) + CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - itRetired) - nDeferred ) } - pRec->m_arrRetired.size( itInsert - itRetired ); } void GarbageCollector::HelpScan( details::hp_record * pThis ) { - CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount ); + CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount ) - assert( static_cast(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::getCurrentThreadId() ); + assert( static_cast(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::get_current_thread_id()); const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; - const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId(); + const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id(); for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) { // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it - if ( hprec->m_bFree.load(atomics::memory_order_acquire) ) + if ( hprec->m_bFree.load(atomics::memory_order_acquire)) continue; // Owns hprec if it is empty. // Several threads may work concurrently so we use atomic technique only. { cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_acquire); - if ( curOwner == nullThreadId || !cds::OS::isThreadAlive( curOwner )) { + if ( curOwner == nullThreadId || !cds::OS::is_thread_alive( curOwner )) { if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed )) continue; } @@ -300,19 +349,33 @@ namespace cds { namespace gc { details::retired_vector& dest = pThis->m_arrRetired; assert( !dest.isFull()); details::retired_vector::iterator itRetired = src.begin(); + + // TSan can issue a warning here: + // read src.m_nSize in src.end() + // write src.m_nSize in src.clear() + // This is false positive since we own hprec + CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN; details::retired_vector::iterator itRetiredEnd = src.end(); + CDS_TSAN_ANNOTATE_IGNORE_READS_END; + while ( itRetired != itRetiredEnd ) { dest.push( *itRetired ); if ( dest.isFull()) { - CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_CallScanFromHelpScan ); + CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_CallScanFromHelpScan ) Scan( pThis ); } ++itRetired; } + + // TSan: write src.m_nSize, see a comment above + CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; src.clear(); + CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; hprec->m_bFree.store(true, atomics::memory_order_release); hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release ); + + Scan( pThis ); } } @@ -333,7 +396,7 @@ namespace cds { namespace gc { ++stat.nHPRecAllocated; stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size(); - if ( hprec->m_bFree.load(atomics::memory_order_relaxed) ) { + if ( hprec->m_bFree.load(atomics::memory_order_relaxed)) { // Free HP record stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size(); }