2008.02.10 Maxim.Khiszinsky Created
*/
-#include <cds/gc/hzp/hzp.h>
+#include <cds/gc/hp/hp.h>
#include <algorithm> // std::sort
#include "hzp_const.h"
GarbageCollector::~GarbageCollector()
{
- CDS_DEBUG_DO( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
- CDS_DEBUG_DO( const cds::OS::ThreadId mainThreadId = cds::OS::getCurrentThreadId() ;)
+ CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
+ CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::getCurrentThreadId() ;)
- hplist_node * pHead = m_pListHead.load( CDS_ATOMIC::memory_order_relaxed );
- m_pListHead.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+ hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed );
+ m_pListHead.store( nullptr, atomics::memory_order_relaxed );
hplist_node * pNext = nullptr;
for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) {
- assert( hprec->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == nullThreadId
- || hprec->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == mainThreadId
- || !cds::OS::isThreadAlive( hprec->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) )
+ assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
+ || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
+ || !cds::OS::isThreadAlive( hprec->m_idOwner.load( atomics::memory_order_relaxed ) )
);
details::retired_vector& vect = hprec->m_arrRetired;
details::retired_vector::iterator itRetired = vect.begin();
}
vect.clear();
pNext = hprec->m_pNextNode;
- hprec->m_bFree.store( true, CDS_ATOMIC::memory_order_relaxed );
+ hprec->m_bFree.store( true, atomics::memory_order_relaxed );
DeleteHPRec( hprec );
}
}
const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
// First try to reuse a retired (non-active) HP record
- for ( hprec = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
+ for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
cds::OS::ThreadId thId = nullThreadId;
- if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ) )
+ if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) )
continue;
- hprec->m_bFree.store( false, CDS_ATOMIC::memory_order_release );
+ hprec->m_bFree.store( false, atomics::memory_order_release );
return hprec;
}
// No HP records available for reuse
// Allocate and push a new HP record
hprec = NewHPRec();
- hprec->m_idOwner.store( curThreadId, CDS_ATOMIC::memory_order_relaxed );
- hprec->m_bFree.store( false, CDS_ATOMIC::memory_order_relaxed );
+ hprec->m_idOwner.store( curThreadId, atomics::memory_order_relaxed );
+ hprec->m_bFree.store( false, atomics::memory_order_relaxed );
- CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release );
+ atomics::atomic_thread_fence( atomics::memory_order_release );
- hplist_node * pOldHead = m_pListHead.load( CDS_ATOMIC::memory_order_acquire );
+ hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire );
do {
hprec->m_pNextNode = pOldHead;
- } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+ } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed ));
return hprec;
}
pRec->clear();
Scan( pRec );
hplist_node * pNode = static_cast<hplist_node *>( pRec );
- pNode->m_idOwner.store( cds::OS::c_NullThreadId, CDS_ATOMIC::memory_order_release );
+ pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
}
void GarbageCollector::detachAllThread()
{
hplist_node * pNext = nullptr;
const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
- for ( hplist_node * hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = pNext ) {
+ for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = pNext ) {
pNext = hprec->m_pNextNode;
- if ( hprec->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) != nullThreadId ) {
+ if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
RetireHPRec( hprec );
}
}
// Stage 1: Scan HP list and insert non-null values in plist
- hplist_node * pNode = m_pListHead.load(CDS_ATOMIC::memory_order_acquire);
+ hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
while ( pNode ) {
for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
// Search guarded pointers in retired array
- hplist_node * pNode = m_pListHead.load(CDS_ATOMIC::memory_order_acquire);
+ hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
while ( pNode ) {
for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
{
CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount );
- assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) == cds::OS::getCurrentThreadId() );
+ assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::getCurrentThreadId() );
const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
- for ( hplist_node * hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
+ for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
// If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
- if ( hprec->m_bFree.load(CDS_ATOMIC::memory_order_acquire) )
+ if ( hprec->m_bFree.load(atomics::memory_order_acquire) )
continue;
// Owns hprec if it is empty.
// Several threads may work concurrently so we use atomic technique only.
{
- cds::OS::ThreadId curOwner = hprec->m_idOwner.load(CDS_ATOMIC::memory_order_acquire);
+ cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_acquire);
if ( curOwner == nullThreadId || !cds::OS::isThreadAlive( curOwner )) {
- if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
continue;
}
else {
curOwner = nullThreadId;
- if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
continue;
}
}
}
src.clear();
- hprec->m_bFree.store(true, CDS_ATOMIC::memory_order_release);
- hprec->m_idOwner.store( nullThreadId, CDS_ATOMIC::memory_order_release );
+ hprec->m_bFree.store(true, atomics::memory_order_release);
+ hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
}
}
stat.nTotalRetiredPtrCount =
stat.nRetiredPtrInFreeHPRecs = 0;
- for ( hplist_node * hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
+ for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
++stat.nHPRecAllocated;
stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size();
- if ( hprec->m_bFree.load(CDS_ATOMIC::memory_order_relaxed) ) {
+ if ( hprec->m_bFree.load(atomics::memory_order_relaxed) ) {
// Free HP record
stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size();
}