From: khizmax Date: Fri, 14 Nov 2014 11:53:03 +0000 (+0300) Subject: rename GC sources X-Git-Tag: v2.0.0~95 X-Git-Url: http://plrg.eecs.uci.edu/git/?a=commitdiff_plain;h=880b263082f2ec708ae821981b6ceb14fb4bb9b3;p=libcds.git rename GC sources --- diff --git a/projects/Win/vc12/cds.vcxproj b/projects/Win/vc12/cds.vcxproj index cf1ef383..c78c9cc7 100644 --- a/projects/Win/vc12/cds.vcxproj +++ b/projects/Win/vc12/cds.vcxproj @@ -619,11 +619,11 @@ + - + - @@ -818,7 +818,6 @@ - @@ -965,6 +964,7 @@ + diff --git a/projects/Win/vc12/cds.vcxproj.filters b/projects/Win/vc12/cds.vcxproj.filters index bee42d54..5ce38054 100644 --- a/projects/Win/vc12/cds.vcxproj.filters +++ b/projects/Win/vc12/cds.vcxproj.filters @@ -159,18 +159,12 @@ Source Files - - Source Files - Source Files Source Files - - Source Files - Source Files @@ -186,11 +180,14 @@ Source Files + + Source Files + + + Source Files + - - Source Files - Header Files\cds @@ -1190,5 +1187,8 @@ Header Files\cds\gc\hp + + Source Files + \ No newline at end of file diff --git a/projects/source.libcds.mk b/projects/source.libcds.mk index b823c3af..ba2b1b1f 100644 --- a/projects/source.libcds.mk +++ b/projects/source.libcds.mk @@ -1,6 +1,7 @@ -CDS_SOURCES=src/hzp_gc.cpp \ +CDS_SOURCES= \ + src/hp_gc.cpp \ src/init.cpp \ - src/ptb_gc.cpp \ + src/dhp_gc.cpp \ src/urcu_gp.cpp \ src/urcu_sh.cpp \ src/michael_heap.cpp \ diff --git a/src/dhp_gc.cpp b/src/dhp_gc.cpp new file mode 100644 index 00000000..b17d0e61 --- /dev/null +++ b/src/dhp_gc.cpp @@ -0,0 +1,307 @@ +//$$CDS-header$$ + +// Pass The Buck (PTB) Memory manager implementation + +#include // std::fill +#include // std::hash + +#include +#include + +namespace cds { namespace gc { namespace ptb { + + namespace details { + + class liberate_set { + typedef retired_ptr_node * item_type; + typedef cds::details::Allocator allocator_type; + + size_t const m_nBucketCount; + item_type * m_Buckets; + + item_type& bucket( retired_ptr_node& node ) + { + return bucket( node.m_ptr.m_p ); + } + item_type& bucket( guard_data::guarded_ptr p ) + { + return m_Buckets[ std::hash()( p ) & (m_nBucketCount - 1) ]; + } + + public: + liberate_set( size_t nBucketCount ) + : m_nBucketCount( nBucketCount ) + { + assert( nBucketCount > 0 ); + assert( (nBucketCount & (nBucketCount - 1)) == 0 ); + + m_Buckets = allocator_type().NewArray( nBucketCount ); + std::fill( m_Buckets, m_Buckets + nBucketCount, nullptr ); + } + + ~liberate_set() + { + allocator_type().Delete( m_Buckets, m_nBucketCount ); + } + + void insert( retired_ptr_node& node ) + { + node.m_pNext = nullptr; + + item_type& refBucket = bucket( node ); + if ( refBucket ) { + item_type p = refBucket; + do { + if ( p->m_ptr.m_p == node.m_ptr.m_p ) { + assert( node.m_pNextFree == nullptr ); + + node.m_pNextFree = p->m_pNextFree; + p->m_pNextFree = &node; + return; + } + p = p->m_pNext; + } while ( p ); + + node.m_pNext = refBucket; + } + refBucket = &node; + } + + item_type erase( guard_data::guarded_ptr ptr ) + { + item_type& refBucket = bucket( ptr ); + item_type p = refBucket; + item_type pPrev = nullptr; + + while ( p ) { + if ( p->m_ptr.m_p == ptr ) { + if ( pPrev ) + pPrev->m_pNext = p->m_pNext; + else + refBucket = p->m_pNext; + p->m_pNext = nullptr; + return p; + } + pPrev = p; + p = p->m_pNext; + } + + return nullptr; + } + + typedef std::pair list_range; + + list_range free_all() + { + item_type pTail = nullptr; + list_range ret = std::make_pair( pTail, pTail ); + + item_type const * pEndBucket = m_Buckets + m_nBucketCount; + for ( item_type * ppBucket = m_Buckets; ppBucket < pEndBucket; ++ppBucket ) { + item_type pBucket = *ppBucket; + if ( pBucket ) { + if ( !ret.first ) + ret.first = pBucket; + else + pTail->m_pNextFree = pBucket; + + pTail = pBucket; + for (;;) { + item_type pNext = pTail->m_pNext; + pTail->m_ptr.free(); + pTail->m_pNext = nullptr; + + while ( pTail->m_pNextFree ) { + pTail = pTail->m_pNextFree; + pTail->m_ptr.free(); + pTail->m_pNext = nullptr; + } + + if ( pNext ) + pTail = pTail->m_pNextFree = pNext; + else + break; + } + } + } + + if ( pTail ) + pTail->m_pNextFree = nullptr; + ret.second = pTail; + return ret; + } + }; + } + + GarbageCollector * GarbageCollector::m_pManager = nullptr; + + void CDS_STDCALL GarbageCollector::Construct( + size_t nLiberateThreshold + , size_t nInitialThreadGuardCount + ) + { + if ( !m_pManager ) { + m_pManager = new GarbageCollector( nLiberateThreshold, nInitialThreadGuardCount ); + } + } + + void CDS_STDCALL GarbageCollector::Destruct() + { + if ( m_pManager ) { + delete m_pManager; + m_pManager = nullptr; + } + } + + GarbageCollector::GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount ) + : m_nLiberateThreshold( nLiberateThreshold ? nLiberateThreshold : 1024 ) + , m_nInitialThreadGuardCount( nInitialThreadGuardCount ? nInitialThreadGuardCount : 8 ) + //, m_nInLiberate(0) + { + } + + GarbageCollector::~GarbageCollector() + { + liberate(); + +#if 0 + details::retired_ptr_node * pHead = nullptr; + details::retired_ptr_node * pTail = nullptr; + + for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_relaxed)) { + details::guard_data::handoff_ptr h = pGuard->pHandOff; + pGuard->pHandOff = nullptr; + while ( h ) { + details::guard_data::handoff_ptr pNext = h->m_pNextFree; + if ( h->m_ptr.m_p ) + h->m_ptr.free(); + if ( !pHead ) + pTail = pHead = h; + else + pTail = pTail->m_pNextFree = h; + h = pNext; + } + } + if ( pHead ) + m_RetiredAllocator.free_range( pHead, pTail ); +#endif + } + + void GarbageCollector::liberate() + { + details::retired_ptr_buffer::privatize_result retiredList = m_RetiredBuffer.privatize(); + if ( retiredList.first ) { + + size_t nLiberateThreshold = m_nLiberateThreshold.load(atomics::memory_order_relaxed); + details::liberate_set set( beans::ceil2( retiredList.second > nLiberateThreshold ? retiredList.second : nLiberateThreshold ) ); + + // Get list of retired pointers + details::retired_ptr_node * pHead = retiredList.first; + while ( pHead ) { + details::retired_ptr_node * pNext = pHead->m_pNext; + pHead->m_pNextFree = nullptr; + set.insert( *pHead ); + pHead = pNext; + } + + // Liberate cycle + for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_acquire) ) + { + // get guarded pointer + details::guard_data::guarded_ptr valGuarded = pGuard->pPost.load(atomics::memory_order_acquire); + + if ( valGuarded ) { + details::retired_ptr_node * pRetired = set.erase( valGuarded ); + if ( pRetired ) { + // Retired pointer is being guarded + // pRetired is the head of retired pointers list for which the m_ptr.m_p field is equal + // List is linked on m_pNextFree field + + do { + details::retired_ptr_node * pNext = pRetired->m_pNextFree; + m_RetiredBuffer.push( *pRetired ); + pRetired = pNext; + } while ( pRetired ); + } + } + } + + // Free all retired pointers + details::liberate_set::list_range range = set.free_all(); + + m_RetiredAllocator.inc_epoch(); + + if ( range.first ) { + assert( range.second != nullptr ); + m_RetiredAllocator.free_range( range.first, range.second ); + } + else { + // liberate cycle did not free any retired pointer - double liberate threshold + m_nLiberateThreshold.compare_exchange_strong( nLiberateThreshold, nLiberateThreshold * 2, atomics::memory_order_release, atomics::memory_order_relaxed ); + } + } + } + +#if 0 + void GarbageCollector::liberate( details::liberate_set& set ) + { + details::guard_data::handoff_ptr const nullHandOff = nullptr; + + for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_acquire) ) + { + // get guarded pointer + details::guard_data::guarded_ptr valGuarded = pGuard->pPost.load(atomics::memory_order_acquire); + details::guard_data::handoff_ptr h; + + if ( valGuarded ) { + details::retired_ptr_node * pRetired = set.erase( valGuarded ); + if ( pRetired ) { + // Retired pointer is being guarded + + // pRetired is the head of retired pointers list for which the m_ptr.m_p field is equal + // List is linked on m_pNextFree field + + // Now, try to set retired node pRetired as a hand-off node for the guard + cds::lock::Auto al( pGuard->spinHandOff ); + if ( valGuarded == pGuard->pPost.load(atomics::memory_order_acquire) ) { + if ( pGuard->pHandOff && pGuard->pHandOff->m_ptr.m_p == pRetired->m_ptr.m_p ) { + h = nullHandOff ; //nullptr; + details::retired_ptr_node * pTail = pGuard->pHandOff; + while ( pTail->m_pNextFree ) + pTail = pTail->m_pNextFree; + pTail->m_pNextFree = pRetired; + } + else { + // swap h and pGuard->pHandOff + h = pGuard->pHandOff; + pGuard->pHandOff = pRetired; + } + } + else + h = pRetired; + } + else { + cds::lock::Auto al( pGuard->spinHandOff ); + h = pGuard->pHandOff; + if ( h ) { + if ( h->m_ptr.m_p != valGuarded ) + pGuard->pHandOff = nullHandOff; + else + h = nullHandOff; + } + } + } + else { + cds::lock::Auto al( pGuard->spinHandOff ); + h = pGuard->pHandOff; + pGuard->pHandOff = nullHandOff; + } + + // h is the head of a list linked on m_pNextFree field + if ( h ) { + set.insert( *h ); + } + } + } +#endif +}}} // namespace cds::gc::ptb diff --git a/src/hp_const.h b/src/hp_const.h new file mode 100644 index 00000000..71359d95 --- /dev/null +++ b/src/hp_const.h @@ -0,0 +1,30 @@ +//$$CDS-header$$ + +#ifndef __CDSIMPL_HP_CONST_H +#define __CDSIMPL_HP_CONST_H + +/* + File: hp_const.h + + Michael's Hazard Pointer reclamation schema global constants + Gidenstam's reclamation schema global constants + + Editions: + 2008.03.10 Maxim.Khiszinsky Created +*/ + +namespace cds { namespace gc { + + //--------------------------------------------------------------- + // Hazard Pointers reclamation schema constants + namespace hzp { + // Max number of threads expected + static const size_t c_nMaxThreadCount = 100; + + // Number of Hazard Pointers per thread + static const size_t c_nHazardPointerPerThread = 8; + } // namespace hzp + +} /* namespace gc */ } /* namespace cds */ + +#endif // #ifndef __CDSIMPL_HZP_CONST_H diff --git a/src/hp_gc.cpp b/src/hp_gc.cpp new file mode 100644 index 00000000..5bfbee4f --- /dev/null +++ b/src/hp_gc.cpp @@ -0,0 +1,364 @@ +//$$CDS-header$$ + +/* + File: hzp_gc.cpp + + Hazard Pointers memory reclamation strategy implementation + + Editions: + 2008.02.10 Maxim.Khiszinsky Created +*/ + +#include + +#include // std::sort +#include "hp_const.h" + +#define CDS_HAZARDPTR_STATISTIC( _x ) if ( m_bStatEnabled ) { _x; } + +namespace cds { namespace gc { + namespace hzp { + + /// Max array size of retired pointers + static const size_t c_nMaxRetireNodeCount = c_nHazardPointerPerThread * c_nMaxThreadCount * 2; + + GarbageCollector * GarbageCollector::m_pHZPManager = nullptr; + + void CDS_STDCALL GarbageCollector::Construct( size_t nHazardPtrCount, size_t nMaxThreadCount, size_t nMaxRetiredPtrCount, scan_type nScanType ) + { + if ( !m_pHZPManager ) { + m_pHZPManager = new GarbageCollector( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType ); + } + } + + void CDS_STDCALL GarbageCollector::Destruct( bool bDetachAll ) + { + if ( m_pHZPManager ) { + if ( bDetachAll ) + m_pHZPManager->detachAllThread(); + + delete m_pHZPManager; + m_pHZPManager = nullptr; + } + } + + GarbageCollector::GarbageCollector( + size_t nHazardPtrCount, + size_t nMaxThreadCount, + size_t nMaxRetiredPtrCount, + scan_type nScanType + ) + : m_pListHead( nullptr ) + ,m_bStatEnabled( true ) + ,m_nHazardPointerCount( nHazardPtrCount == 0 ? c_nHazardPointerPerThread : nHazardPtrCount ) + ,m_nMaxThreadCount( nMaxThreadCount == 0 ? c_nMaxThreadCount : nMaxThreadCount ) + ,m_nMaxRetiredPtrCount( nMaxRetiredPtrCount > c_nMaxRetireNodeCount ? nMaxRetiredPtrCount : c_nMaxRetireNodeCount ) + ,m_nScanType( nScanType ) + {} + + GarbageCollector::~GarbageCollector() + { + CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; ) + CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::getCurrentThreadId() ;) + + hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed ); + m_pListHead.store( nullptr, atomics::memory_order_relaxed ); + + hplist_node * pNext = nullptr; + for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) { + assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId + || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId + || !cds::OS::isThreadAlive( hprec->m_idOwner.load( atomics::memory_order_relaxed ) ) + ); + details::retired_vector& vect = hprec->m_arrRetired; + details::retired_vector::iterator itRetired = vect.begin(); + details::retired_vector::iterator itRetiredEnd = vect.end(); + while ( itRetired != itRetiredEnd ) { + DeletePtr( *itRetired ); + ++itRetired; + } + vect.clear(); + pNext = hprec->m_pNextNode; + hprec->m_bFree.store( true, atomics::memory_order_relaxed ); + DeleteHPRec( hprec ); + } + } + + inline GarbageCollector::hplist_node * GarbageCollector::NewHPRec() + { + CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocNewHPRec ); + return new hplist_node( *this ); + } + + inline void GarbageCollector::DeleteHPRec( hplist_node * pNode ) + { + CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeleteHPRec ); + assert( pNode->m_arrRetired.size() == 0 ); + delete pNode; + } + + inline void GarbageCollector::DeletePtr( details::retired_ptr& p ) + { + CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeletedNode ); + p.free(); + } + + details::HPRec * GarbageCollector::AllocateHPRec() + { + CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec ); + + hplist_node * hprec; + const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; + const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId(); + + // First try to reuse a retired (non-active) HP record + for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) { + cds::OS::ThreadId thId = nullThreadId; + if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) ) + continue; + hprec->m_bFree.store( false, atomics::memory_order_release ); + return hprec; + } + + // No HP records available for reuse + // Allocate and push a new HP record + hprec = NewHPRec(); + hprec->m_idOwner.store( curThreadId, atomics::memory_order_relaxed ); + hprec->m_bFree.store( false, atomics::memory_order_relaxed ); + + atomics::atomic_thread_fence( atomics::memory_order_release ); + + hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire ); + do { + hprec->m_pNextNode = pOldHead; + } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed )); + + return hprec; + } + + void GarbageCollector::RetireHPRec( details::HPRec * pRec ) + { + assert( pRec != nullptr ); + CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec ); + + pRec->clear(); + Scan( pRec ); + hplist_node * pNode = static_cast( pRec ); + pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release ); + } + + void GarbageCollector::detachAllThread() + { + hplist_node * pNext = nullptr; + const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; + for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = pNext ) { + pNext = hprec->m_pNextNode; + if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) { + RetireHPRec( hprec ); + } + } + } + + void GarbageCollector::classic_scan( details::HPRec * pRec ) + { + CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount ); + + std::vector< void * > plist; + plist.reserve( m_nMaxThreadCount * m_nHazardPointerCount ); + assert( plist.size() == 0 ); + + // Stage 1: Scan HP list and insert non-null values in plist + + hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire); + + while ( pNode ) { + for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) { + void * hptr = pNode->m_hzp[i]; + if ( hptr ) + plist.push_back( hptr ); + } + pNode = pNode->m_pNextNode; + } + + // Sort plist to simplify search in + std::sort( plist.begin(), plist.end() ); + + // Stage 2: Search plist + details::retired_vector& arrRetired = pRec->m_arrRetired; + + details::retired_vector::iterator itRetired = arrRetired.begin(); + details::retired_vector::iterator itRetiredEnd = arrRetired.end(); + // arrRetired is not a std::vector! + // clear is just set up item counter to 0, the items is not destroying + arrRetired.clear(); + + std::vector< void * >::iterator itBegin = plist.begin(); + std::vector< void * >::iterator itEnd = plist.end(); + while ( itRetired != itRetiredEnd ) { + if ( std::binary_search( itBegin, itEnd, itRetired->m_p) ) { + CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeferredNode ); + arrRetired.push( *itRetired ); + } + else + DeletePtr( *itRetired ); + ++itRetired; + } + } + + void GarbageCollector::inplace_scan( details::HPRec * pRec ) + { + CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount ); + + // In-place scan algo uses LSB of retired ptr as a mark for internal purposes. + // It is correct if all retired pointers are ar least 2-byte aligned (LSB is zero). + // If it is wrong, we use classic scan algorithm + + // Check if all retired pointers has zero LSB + // LSB is used for marking pointers that cannot be deleted yet + details::retired_vector::iterator itRetired = pRec->m_arrRetired.begin(); + details::retired_vector::iterator itRetiredEnd = pRec->m_arrRetired.end(); + for ( details::retired_vector::iterator it = itRetired; it != itRetiredEnd; ++it ) { + if ( reinterpret_cast(it->m_p) & 1 ) { + // found a pointer with LSB bit set - use classic_scan + classic_scan( pRec ); + return; + } + } + + // Sort retired pointer array + std::sort( itRetired, itRetiredEnd, cds::gc::details::retired_ptr::less ); + + // Search guarded pointers in retired array + + hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire); + + while ( pNode ) { + for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) { + void * hptr = pNode->m_hzp[i]; + if ( hptr ) { + details::retired_ptr dummyRetired; + dummyRetired.m_p = hptr; + details::retired_vector::iterator it = std::lower_bound( itRetired, itRetiredEnd, dummyRetired, cds::gc::details::retired_ptr::less ); + if ( it != itRetiredEnd && it->m_p == hptr ) { + // Mark retired pointer as guarded + it->m_p = reinterpret_cast(reinterpret_cast(it->m_p ) | 1); + } + } + } + pNode = pNode->m_pNextNode; + } + + // Move all marked pointers to head of array + details::retired_vector::iterator itInsert = itRetired; + for ( details::retired_vector::iterator it = itRetired; it != itRetiredEnd; ++it ) { + if ( reinterpret_cast(it->m_p) & 1 ) { + it->m_p = reinterpret_cast(reinterpret_cast(it->m_p ) & ~1); + *itInsert = *it; + ++itInsert; + CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeferredNode ); + } + else { + // Retired pointer may be freed + DeletePtr( *it ); + } + } + pRec->m_arrRetired.size( itInsert - itRetired ); + } + + void GarbageCollector::HelpScan( details::HPRec * pThis ) + { + CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount ); + + assert( static_cast(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::getCurrentThreadId() ); + + const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; + const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId(); + for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) { + + // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it + if ( hprec->m_bFree.load(atomics::memory_order_acquire) ) + continue; + + // Owns hprec if it is empty. + // Several threads may work concurrently so we use atomic technique only. + { + cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_acquire); + if ( curOwner == nullThreadId || !cds::OS::isThreadAlive( curOwner )) { + if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed )) + continue; + } + else { + curOwner = nullThreadId; + if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed )) + continue; + } + } + + // We own the thread successfully. Now, we can see whether HPRec has retired pointers. + // If it has ones then we move to pThis that is private for current thread. + details::retired_vector& src = hprec->m_arrRetired; + details::retired_vector& dest = pThis->m_arrRetired; + assert( !dest.isFull()); + details::retired_vector::iterator itRetired = src.begin(); + details::retired_vector::iterator itRetiredEnd = src.end(); + while ( itRetired != itRetiredEnd ) { + dest.push( *itRetired ); + if ( dest.isFull()) { + CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_CallScanFromHelpScan ); + Scan( pThis ); + } + ++itRetired; + } + src.clear(); + + hprec->m_bFree.store(true, atomics::memory_order_release); + hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release ); + } + } + + GarbageCollector::InternalState& GarbageCollector::getInternalState( GarbageCollector::InternalState& stat) const + { + stat.nHPCount = m_nHazardPointerCount; + stat.nMaxThreadCount = m_nMaxThreadCount; + stat.nMaxRetiredPtrCount = m_nMaxRetiredPtrCount; + stat.nHPRecSize = sizeof( hplist_node ) + + sizeof(details::retired_ptr) * m_nMaxRetiredPtrCount; + + stat.nHPRecAllocated = + stat.nHPRecUsed = + stat.nTotalRetiredPtrCount = + stat.nRetiredPtrInFreeHPRecs = 0; + + for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) { + ++stat.nHPRecAllocated; + stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size(); + + if ( hprec->m_bFree.load(atomics::memory_order_relaxed) ) { + // Free HP record + stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size(); + } + else { + // Used HP record + ++stat.nHPRecUsed; + } + } + + // Events + stat.evcAllocHPRec = m_Stat.m_AllocHPRec; + stat.evcRetireHPRec = m_Stat.m_RetireHPRec; + stat.evcAllocNewHPRec= m_Stat.m_AllocNewHPRec; + stat.evcDeleteHPRec = m_Stat.m_DeleteHPRec; + + stat.evcScanCall = m_Stat.m_ScanCallCount; + stat.evcHelpScanCall = m_Stat.m_HelpScanCallCount; + stat.evcScanFromHelpScan= m_Stat.m_CallScanFromHelpScan; + + stat.evcDeletedNode = m_Stat.m_DeletedNode; + stat.evcDeferredNode = m_Stat.m_DeferredNode; + + return stat; + } + + + } //namespace hzp +}} // namespace cds::gc diff --git a/src/hzp_const.h b/src/hzp_const.h deleted file mode 100644 index af10977e..00000000 --- a/src/hzp_const.h +++ /dev/null @@ -1,30 +0,0 @@ -//$$CDS-header$$ - -#ifndef __CDSIMPL_HZP_CONST_H -#define __CDSIMPL_HZP_CONST_H - -/* - File: hzp_const.h - - Michael's Hazard Pointer reclamation schema global constants - Gidenstam's reclamation schema global constants - - Editions: - 2008.03.10 Maxim.Khiszinsky Created -*/ - -namespace cds { namespace gc { - - //--------------------------------------------------------------- - // Hazard Pointers reclamation schema constants - namespace hzp { - // Max number of threads expected - static const size_t c_nMaxThreadCount = 100; - - // Number of Hazard Pointers per thread - static const size_t c_nHazardPointerPerThread = 8; - } // namespace hzp - -} /* namespace gc */ } /* namespace cds */ - -#endif // #ifndef __CDSIMPL_HZP_CONST_H diff --git a/src/hzp_gc.cpp b/src/hzp_gc.cpp deleted file mode 100644 index a8bb004d..00000000 --- a/src/hzp_gc.cpp +++ /dev/null @@ -1,364 +0,0 @@ -//$$CDS-header$$ - -/* - File: hzp_gc.cpp - - Hazard Pointers memory reclamation strategy implementation - - Editions: - 2008.02.10 Maxim.Khiszinsky Created -*/ - -#include - -#include // std::sort -#include "hzp_const.h" - -#define CDS_HAZARDPTR_STATISTIC( _x ) if ( m_bStatEnabled ) { _x; } - -namespace cds { namespace gc { - namespace hzp { - - /// Max array size of retired pointers - static const size_t c_nMaxRetireNodeCount = c_nHazardPointerPerThread * c_nMaxThreadCount * 2; - - GarbageCollector * GarbageCollector::m_pHZPManager = nullptr; - - void CDS_STDCALL GarbageCollector::Construct( size_t nHazardPtrCount, size_t nMaxThreadCount, size_t nMaxRetiredPtrCount, scan_type nScanType ) - { - if ( !m_pHZPManager ) { - m_pHZPManager = new GarbageCollector( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType ); - } - } - - void CDS_STDCALL GarbageCollector::Destruct( bool bDetachAll ) - { - if ( m_pHZPManager ) { - if ( bDetachAll ) - m_pHZPManager->detachAllThread(); - - delete m_pHZPManager; - m_pHZPManager = nullptr; - } - } - - GarbageCollector::GarbageCollector( - size_t nHazardPtrCount, - size_t nMaxThreadCount, - size_t nMaxRetiredPtrCount, - scan_type nScanType - ) - : m_pListHead( nullptr ) - ,m_bStatEnabled( true ) - ,m_nHazardPointerCount( nHazardPtrCount == 0 ? c_nHazardPointerPerThread : nHazardPtrCount ) - ,m_nMaxThreadCount( nMaxThreadCount == 0 ? c_nMaxThreadCount : nMaxThreadCount ) - ,m_nMaxRetiredPtrCount( nMaxRetiredPtrCount > c_nMaxRetireNodeCount ? nMaxRetiredPtrCount : c_nMaxRetireNodeCount ) - ,m_nScanType( nScanType ) - {} - - GarbageCollector::~GarbageCollector() - { - CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; ) - CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::getCurrentThreadId() ;) - - hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed ); - m_pListHead.store( nullptr, atomics::memory_order_relaxed ); - - hplist_node * pNext = nullptr; - for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) { - assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId - || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId - || !cds::OS::isThreadAlive( hprec->m_idOwner.load( atomics::memory_order_relaxed ) ) - ); - details::retired_vector& vect = hprec->m_arrRetired; - details::retired_vector::iterator itRetired = vect.begin(); - details::retired_vector::iterator itRetiredEnd = vect.end(); - while ( itRetired != itRetiredEnd ) { - DeletePtr( *itRetired ); - ++itRetired; - } - vect.clear(); - pNext = hprec->m_pNextNode; - hprec->m_bFree.store( true, atomics::memory_order_relaxed ); - DeleteHPRec( hprec ); - } - } - - inline GarbageCollector::hplist_node * GarbageCollector::NewHPRec() - { - CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocNewHPRec ); - return new hplist_node( *this ); - } - - inline void GarbageCollector::DeleteHPRec( hplist_node * pNode ) - { - CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeleteHPRec ); - assert( pNode->m_arrRetired.size() == 0 ); - delete pNode; - } - - inline void GarbageCollector::DeletePtr( details::retired_ptr& p ) - { - CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeletedNode ); - p.free(); - } - - details::HPRec * GarbageCollector::AllocateHPRec() - { - CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec ); - - hplist_node * hprec; - const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; - const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId(); - - // First try to reuse a retired (non-active) HP record - for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) { - cds::OS::ThreadId thId = nullThreadId; - if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) ) - continue; - hprec->m_bFree.store( false, atomics::memory_order_release ); - return hprec; - } - - // No HP records available for reuse - // Allocate and push a new HP record - hprec = NewHPRec(); - hprec->m_idOwner.store( curThreadId, atomics::memory_order_relaxed ); - hprec->m_bFree.store( false, atomics::memory_order_relaxed ); - - atomics::atomic_thread_fence( atomics::memory_order_release ); - - hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire ); - do { - hprec->m_pNextNode = pOldHead; - } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed )); - - return hprec; - } - - void GarbageCollector::RetireHPRec( details::HPRec * pRec ) - { - assert( pRec != nullptr ); - CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec ); - - pRec->clear(); - Scan( pRec ); - hplist_node * pNode = static_cast( pRec ); - pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release ); - } - - void GarbageCollector::detachAllThread() - { - hplist_node * pNext = nullptr; - const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; - for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = pNext ) { - pNext = hprec->m_pNextNode; - if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) { - RetireHPRec( hprec ); - } - } - } - - void GarbageCollector::classic_scan( details::HPRec * pRec ) - { - CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount ); - - std::vector< void * > plist; - plist.reserve( m_nMaxThreadCount * m_nHazardPointerCount ); - assert( plist.size() == 0 ); - - // Stage 1: Scan HP list and insert non-null values in plist - - hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire); - - while ( pNode ) { - for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) { - void * hptr = pNode->m_hzp[i]; - if ( hptr ) - plist.push_back( hptr ); - } - pNode = pNode->m_pNextNode; - } - - // Sort plist to simplify search in - std::sort( plist.begin(), plist.end() ); - - // Stage 2: Search plist - details::retired_vector& arrRetired = pRec->m_arrRetired; - - details::retired_vector::iterator itRetired = arrRetired.begin(); - details::retired_vector::iterator itRetiredEnd = arrRetired.end(); - // arrRetired is not a std::vector! - // clear is just set up item counter to 0, the items is not destroying - arrRetired.clear(); - - std::vector< void * >::iterator itBegin = plist.begin(); - std::vector< void * >::iterator itEnd = plist.end(); - while ( itRetired != itRetiredEnd ) { - if ( std::binary_search( itBegin, itEnd, itRetired->m_p) ) { - CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeferredNode ); - arrRetired.push( *itRetired ); - } - else - DeletePtr( *itRetired ); - ++itRetired; - } - } - - void GarbageCollector::inplace_scan( details::HPRec * pRec ) - { - CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount ); - - // In-place scan algo uses LSB of retired ptr as a mark for internal purposes. - // It is correct if all retired pointers are ar least 2-byte aligned (LSB is zero). - // If it is wrong, we use classic scan algorithm - - // Check if all retired pointers has zero LSB - // LSB is used for marking pointers that cannot be deleted yet - details::retired_vector::iterator itRetired = pRec->m_arrRetired.begin(); - details::retired_vector::iterator itRetiredEnd = pRec->m_arrRetired.end(); - for ( details::retired_vector::iterator it = itRetired; it != itRetiredEnd; ++it ) { - if ( reinterpret_cast(it->m_p) & 1 ) { - // found a pointer with LSB bit set - use classic_scan - classic_scan( pRec ); - return; - } - } - - // Sort retired pointer array - std::sort( itRetired, itRetiredEnd, cds::gc::details::retired_ptr::less ); - - // Search guarded pointers in retired array - - hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire); - - while ( pNode ) { - for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) { - void * hptr = pNode->m_hzp[i]; - if ( hptr ) { - details::retired_ptr dummyRetired; - dummyRetired.m_p = hptr; - details::retired_vector::iterator it = std::lower_bound( itRetired, itRetiredEnd, dummyRetired, cds::gc::details::retired_ptr::less ); - if ( it != itRetiredEnd && it->m_p == hptr ) { - // Mark retired pointer as guarded - it->m_p = reinterpret_cast(reinterpret_cast(it->m_p ) | 1); - } - } - } - pNode = pNode->m_pNextNode; - } - - // Move all marked pointers to head of array - details::retired_vector::iterator itInsert = itRetired; - for ( details::retired_vector::iterator it = itRetired; it != itRetiredEnd; ++it ) { - if ( reinterpret_cast(it->m_p) & 1 ) { - it->m_p = reinterpret_cast(reinterpret_cast(it->m_p ) & ~1); - *itInsert = *it; - ++itInsert; - CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeferredNode ); - } - else { - // Retired pointer may be freed - DeletePtr( *it ); - } - } - pRec->m_arrRetired.size( itInsert - itRetired ); - } - - void GarbageCollector::HelpScan( details::HPRec * pThis ) - { - CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount ); - - assert( static_cast(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::getCurrentThreadId() ); - - const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; - const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId(); - for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) { - - // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it - if ( hprec->m_bFree.load(atomics::memory_order_acquire) ) - continue; - - // Owns hprec if it is empty. - // Several threads may work concurrently so we use atomic technique only. - { - cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_acquire); - if ( curOwner == nullThreadId || !cds::OS::isThreadAlive( curOwner )) { - if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed )) - continue; - } - else { - curOwner = nullThreadId; - if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed )) - continue; - } - } - - // We own the thread successfully. Now, we can see whether HPRec has retired pointers. - // If it has ones then we move to pThis that is private for current thread. - details::retired_vector& src = hprec->m_arrRetired; - details::retired_vector& dest = pThis->m_arrRetired; - assert( !dest.isFull()); - details::retired_vector::iterator itRetired = src.begin(); - details::retired_vector::iterator itRetiredEnd = src.end(); - while ( itRetired != itRetiredEnd ) { - dest.push( *itRetired ); - if ( dest.isFull()) { - CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_CallScanFromHelpScan ); - Scan( pThis ); - } - ++itRetired; - } - src.clear(); - - hprec->m_bFree.store(true, atomics::memory_order_release); - hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release ); - } - } - - GarbageCollector::InternalState& GarbageCollector::getInternalState( GarbageCollector::InternalState& stat) const - { - stat.nHPCount = m_nHazardPointerCount; - stat.nMaxThreadCount = m_nMaxThreadCount; - stat.nMaxRetiredPtrCount = m_nMaxRetiredPtrCount; - stat.nHPRecSize = sizeof( hplist_node ) - + sizeof(details::retired_ptr) * m_nMaxRetiredPtrCount; - - stat.nHPRecAllocated = - stat.nHPRecUsed = - stat.nTotalRetiredPtrCount = - stat.nRetiredPtrInFreeHPRecs = 0; - - for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) { - ++stat.nHPRecAllocated; - stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size(); - - if ( hprec->m_bFree.load(atomics::memory_order_relaxed) ) { - // Free HP record - stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size(); - } - else { - // Used HP record - ++stat.nHPRecUsed; - } - } - - // Events - stat.evcAllocHPRec = m_Stat.m_AllocHPRec; - stat.evcRetireHPRec = m_Stat.m_RetireHPRec; - stat.evcAllocNewHPRec= m_Stat.m_AllocNewHPRec; - stat.evcDeleteHPRec = m_Stat.m_DeleteHPRec; - - stat.evcScanCall = m_Stat.m_ScanCallCount; - stat.evcHelpScanCall = m_Stat.m_HelpScanCallCount; - stat.evcScanFromHelpScan= m_Stat.m_CallScanFromHelpScan; - - stat.evcDeletedNode = m_Stat.m_DeletedNode; - stat.evcDeferredNode = m_Stat.m_DeferredNode; - - return stat; - } - - - } //namespace hzp -}} // namespace cds::gc diff --git a/src/ptb_gc.cpp b/src/ptb_gc.cpp deleted file mode 100644 index b17d0e61..00000000 --- a/src/ptb_gc.cpp +++ /dev/null @@ -1,307 +0,0 @@ -//$$CDS-header$$ - -// Pass The Buck (PTB) Memory manager implementation - -#include // std::fill -#include // std::hash - -#include -#include - -namespace cds { namespace gc { namespace ptb { - - namespace details { - - class liberate_set { - typedef retired_ptr_node * item_type; - typedef cds::details::Allocator allocator_type; - - size_t const m_nBucketCount; - item_type * m_Buckets; - - item_type& bucket( retired_ptr_node& node ) - { - return bucket( node.m_ptr.m_p ); - } - item_type& bucket( guard_data::guarded_ptr p ) - { - return m_Buckets[ std::hash()( p ) & (m_nBucketCount - 1) ]; - } - - public: - liberate_set( size_t nBucketCount ) - : m_nBucketCount( nBucketCount ) - { - assert( nBucketCount > 0 ); - assert( (nBucketCount & (nBucketCount - 1)) == 0 ); - - m_Buckets = allocator_type().NewArray( nBucketCount ); - std::fill( m_Buckets, m_Buckets + nBucketCount, nullptr ); - } - - ~liberate_set() - { - allocator_type().Delete( m_Buckets, m_nBucketCount ); - } - - void insert( retired_ptr_node& node ) - { - node.m_pNext = nullptr; - - item_type& refBucket = bucket( node ); - if ( refBucket ) { - item_type p = refBucket; - do { - if ( p->m_ptr.m_p == node.m_ptr.m_p ) { - assert( node.m_pNextFree == nullptr ); - - node.m_pNextFree = p->m_pNextFree; - p->m_pNextFree = &node; - return; - } - p = p->m_pNext; - } while ( p ); - - node.m_pNext = refBucket; - } - refBucket = &node; - } - - item_type erase( guard_data::guarded_ptr ptr ) - { - item_type& refBucket = bucket( ptr ); - item_type p = refBucket; - item_type pPrev = nullptr; - - while ( p ) { - if ( p->m_ptr.m_p == ptr ) { - if ( pPrev ) - pPrev->m_pNext = p->m_pNext; - else - refBucket = p->m_pNext; - p->m_pNext = nullptr; - return p; - } - pPrev = p; - p = p->m_pNext; - } - - return nullptr; - } - - typedef std::pair list_range; - - list_range free_all() - { - item_type pTail = nullptr; - list_range ret = std::make_pair( pTail, pTail ); - - item_type const * pEndBucket = m_Buckets + m_nBucketCount; - for ( item_type * ppBucket = m_Buckets; ppBucket < pEndBucket; ++ppBucket ) { - item_type pBucket = *ppBucket; - if ( pBucket ) { - if ( !ret.first ) - ret.first = pBucket; - else - pTail->m_pNextFree = pBucket; - - pTail = pBucket; - for (;;) { - item_type pNext = pTail->m_pNext; - pTail->m_ptr.free(); - pTail->m_pNext = nullptr; - - while ( pTail->m_pNextFree ) { - pTail = pTail->m_pNextFree; - pTail->m_ptr.free(); - pTail->m_pNext = nullptr; - } - - if ( pNext ) - pTail = pTail->m_pNextFree = pNext; - else - break; - } - } - } - - if ( pTail ) - pTail->m_pNextFree = nullptr; - ret.second = pTail; - return ret; - } - }; - } - - GarbageCollector * GarbageCollector::m_pManager = nullptr; - - void CDS_STDCALL GarbageCollector::Construct( - size_t nLiberateThreshold - , size_t nInitialThreadGuardCount - ) - { - if ( !m_pManager ) { - m_pManager = new GarbageCollector( nLiberateThreshold, nInitialThreadGuardCount ); - } - } - - void CDS_STDCALL GarbageCollector::Destruct() - { - if ( m_pManager ) { - delete m_pManager; - m_pManager = nullptr; - } - } - - GarbageCollector::GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount ) - : m_nLiberateThreshold( nLiberateThreshold ? nLiberateThreshold : 1024 ) - , m_nInitialThreadGuardCount( nInitialThreadGuardCount ? nInitialThreadGuardCount : 8 ) - //, m_nInLiberate(0) - { - } - - GarbageCollector::~GarbageCollector() - { - liberate(); - -#if 0 - details::retired_ptr_node * pHead = nullptr; - details::retired_ptr_node * pTail = nullptr; - - for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_relaxed)) { - details::guard_data::handoff_ptr h = pGuard->pHandOff; - pGuard->pHandOff = nullptr; - while ( h ) { - details::guard_data::handoff_ptr pNext = h->m_pNextFree; - if ( h->m_ptr.m_p ) - h->m_ptr.free(); - if ( !pHead ) - pTail = pHead = h; - else - pTail = pTail->m_pNextFree = h; - h = pNext; - } - } - if ( pHead ) - m_RetiredAllocator.free_range( pHead, pTail ); -#endif - } - - void GarbageCollector::liberate() - { - details::retired_ptr_buffer::privatize_result retiredList = m_RetiredBuffer.privatize(); - if ( retiredList.first ) { - - size_t nLiberateThreshold = m_nLiberateThreshold.load(atomics::memory_order_relaxed); - details::liberate_set set( beans::ceil2( retiredList.second > nLiberateThreshold ? retiredList.second : nLiberateThreshold ) ); - - // Get list of retired pointers - details::retired_ptr_node * pHead = retiredList.first; - while ( pHead ) { - details::retired_ptr_node * pNext = pHead->m_pNext; - pHead->m_pNextFree = nullptr; - set.insert( *pHead ); - pHead = pNext; - } - - // Liberate cycle - for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_acquire) ) - { - // get guarded pointer - details::guard_data::guarded_ptr valGuarded = pGuard->pPost.load(atomics::memory_order_acquire); - - if ( valGuarded ) { - details::retired_ptr_node * pRetired = set.erase( valGuarded ); - if ( pRetired ) { - // Retired pointer is being guarded - // pRetired is the head of retired pointers list for which the m_ptr.m_p field is equal - // List is linked on m_pNextFree field - - do { - details::retired_ptr_node * pNext = pRetired->m_pNextFree; - m_RetiredBuffer.push( *pRetired ); - pRetired = pNext; - } while ( pRetired ); - } - } - } - - // Free all retired pointers - details::liberate_set::list_range range = set.free_all(); - - m_RetiredAllocator.inc_epoch(); - - if ( range.first ) { - assert( range.second != nullptr ); - m_RetiredAllocator.free_range( range.first, range.second ); - } - else { - // liberate cycle did not free any retired pointer - double liberate threshold - m_nLiberateThreshold.compare_exchange_strong( nLiberateThreshold, nLiberateThreshold * 2, atomics::memory_order_release, atomics::memory_order_relaxed ); - } - } - } - -#if 0 - void GarbageCollector::liberate( details::liberate_set& set ) - { - details::guard_data::handoff_ptr const nullHandOff = nullptr; - - for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_acquire) ) - { - // get guarded pointer - details::guard_data::guarded_ptr valGuarded = pGuard->pPost.load(atomics::memory_order_acquire); - details::guard_data::handoff_ptr h; - - if ( valGuarded ) { - details::retired_ptr_node * pRetired = set.erase( valGuarded ); - if ( pRetired ) { - // Retired pointer is being guarded - - // pRetired is the head of retired pointers list for which the m_ptr.m_p field is equal - // List is linked on m_pNextFree field - - // Now, try to set retired node pRetired as a hand-off node for the guard - cds::lock::Auto al( pGuard->spinHandOff ); - if ( valGuarded == pGuard->pPost.load(atomics::memory_order_acquire) ) { - if ( pGuard->pHandOff && pGuard->pHandOff->m_ptr.m_p == pRetired->m_ptr.m_p ) { - h = nullHandOff ; //nullptr; - details::retired_ptr_node * pTail = pGuard->pHandOff; - while ( pTail->m_pNextFree ) - pTail = pTail->m_pNextFree; - pTail->m_pNextFree = pRetired; - } - else { - // swap h and pGuard->pHandOff - h = pGuard->pHandOff; - pGuard->pHandOff = pRetired; - } - } - else - h = pRetired; - } - else { - cds::lock::Auto al( pGuard->spinHandOff ); - h = pGuard->pHandOff; - if ( h ) { - if ( h->m_ptr.m_p != valGuarded ) - pGuard->pHandOff = nullHandOff; - else - h = nullHandOff; - } - } - } - else { - cds::lock::Auto al( pGuard->spinHandOff ); - h = pGuard->pHandOff; - pGuard->pHandOff = nullHandOff; - } - - // h is the head of a list linked on m_pNextFree field - if ( h ) { - set.insert( *h ); - } - } - } -#endif -}}} // namespace cds::gc::ptb