#include <algorithm> // std::fill
#include <functional> // std::hash
-#include <cds/gc/dhp/dhp.h>
+#include <cds/gc/details/dhp.h>
#include <cds/algo/int_algo.h>
namespace cds { namespace gc { namespace dhp {
void insert( retired_ptr_node& node )
{
- node.m_pNext = nullptr;
+ node.m_pNext.store( nullptr, atomics::memory_order_relaxed );
item_type& refBucket = bucket( node );
if ( refBucket ) {
item_type p = refBucket;
do {
if ( p->m_ptr.m_p == node.m_ptr.m_p ) {
- assert( node.m_pNextFree == nullptr );
+ assert( node.m_pNextFree.load( atomics::memory_order_relaxed ) == nullptr );
- node.m_pNextFree = p->m_pNextFree;
- p->m_pNextFree = &node;
+ node.m_pNextFree.store( p->m_pNextFree.load( atomics::memory_order_relaxed ), atomics::memory_order_relaxed );
+ p->m_pNextFree.store( &node, atomics::memory_order_relaxed );
return;
}
- p = p->m_pNext;
+ p = p->m_pNext.load(atomics::memory_order_relaxed);
} while ( p );
- node.m_pNext = refBucket;
+ node.m_pNext.store( refBucket, atomics::memory_order_relaxed );
}
refBucket = &node;
}
while ( p ) {
if ( p->m_ptr.m_p == ptr ) {
if ( pPrev )
- pPrev->m_pNext = p->m_pNext;
+ pPrev->m_pNext.store( p->m_pNext.load(atomics::memory_order_relaxed ), atomics::memory_order_relaxed );
else
- refBucket = p->m_pNext;
- p->m_pNext = nullptr;
+ refBucket = p->m_pNext.load(atomics::memory_order_relaxed);
+ p->m_pNext.store( nullptr, atomics::memory_order_relaxed );
return p;
}
pPrev = p;
- p = p->m_pNext;
+ p = p->m_pNext.load( atomics::memory_order_relaxed );
}
return nullptr;
if ( !ret.first )
ret.first = pBucket;
else
- pTail->m_pNextFree = pBucket;
+ pTail->m_pNextFree.store( pBucket, atomics::memory_order_relaxed );
pTail = pBucket;
for (;;) {
- item_type pNext = pTail->m_pNext;
+ item_type pNext = pTail->m_pNext.load( atomics::memory_order_relaxed );
pTail->m_ptr.free();
- pTail->m_pNext = nullptr;
+ pTail->m_pNext.store( nullptr, atomics::memory_order_relaxed );
- while ( pTail->m_pNextFree ) {
- pTail = pTail->m_pNextFree;
+ while ( pTail->m_pNextFree.load( atomics::memory_order_relaxed )) {
+ pTail = pTail->m_pNextFree.load( atomics::memory_order_relaxed );
pTail->m_ptr.free();
- pTail->m_pNext = nullptr;
+ pTail->m_pNext.store( nullptr, atomics::memory_order_relaxed );
}
- if ( pNext )
- pTail = pTail->m_pNextFree = pNext;
+ if ( pNext ) {
+ pTail->m_pNextFree.store( pNext, atomics::memory_order_relaxed );
+ pTail = pNext;
+ }
else
break;
}
}
if ( pTail )
- pTail->m_pNextFree = nullptr;
+ pTail->m_pNextFree.store( nullptr, atomics::memory_order_relaxed );
ret.second = pTail;
return ret;
}
void CDS_STDCALL GarbageCollector::Destruct()
{
- if ( m_pManager ) {
- delete m_pManager;
- m_pManager = nullptr;
- }
+ delete m_pManager;
+ m_pManager = nullptr;
}
GarbageCollector::GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount )
: m_nLiberateThreshold( nLiberateThreshold ? nLiberateThreshold : 1024 )
, m_nInitialThreadGuardCount( nInitialThreadGuardCount ? nInitialThreadGuardCount : 8 )
+ , m_bStatEnabled( false )
//, m_nInLiberate(0)
{
}
GarbageCollector::~GarbageCollector()
{
- liberate();
+ scan();
}
- void GarbageCollector::liberate()
+ void GarbageCollector::scan()
{
details::retired_ptr_buffer::privatize_result retiredList = m_RetiredBuffer.privatize();
if ( retiredList.first ) {
details::liberate_set set( beans::ceil2( retiredList.second > nLiberateThreshold ? retiredList.second : nLiberateThreshold ) );
// Get list of retired pointers
+ size_t nRetiredCount = 0;
details::retired_ptr_node * pHead = retiredList.first;
while ( pHead ) {
- details::retired_ptr_node * pNext = pHead->m_pNext;
- pHead->m_pNextFree = nullptr;
+ details::retired_ptr_node * pNext = pHead->m_pNext.load( atomics::memory_order_relaxed );
+ pHead->m_pNextFree.store( nullptr, atomics::memory_order_relaxed );
set.insert( *pHead );
pHead = pNext;
+ ++nRetiredCount;
}
// Liberate cycle
+
+ details::retired_ptr_node * pBusyFirst = nullptr;
+ details::retired_ptr_node * pBusyLast = nullptr;
+ size_t nBusyCount = 0;
+
for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_acquire) )
{
// get guarded pointer
- details::guard_data::guarded_ptr valGuarded = pGuard->pPost.load(atomics::memory_order_acquire);
+ details::guard_data::guarded_ptr valGuarded = pGuard->pPost.load(atomics::memory_order_acquire);
if ( valGuarded ) {
details::retired_ptr_node * pRetired = set.erase( valGuarded );
// pRetired is the head of retired pointers list for which the m_ptr.m_p field is equal
// List is linked on m_pNextFree field
- do {
- details::retired_ptr_node * pNext = pRetired->m_pNextFree;
- m_RetiredBuffer.push( *pRetired );
- pRetired = pNext;
- } while ( pRetired );
+ if ( pBusyLast )
+ pBusyLast->m_pNext.store( pRetired, atomics::memory_order_relaxed );
+ else
+ pBusyFirst = pRetired;
+ pBusyLast = pRetired;
+ ++nBusyCount;
+ details::retired_ptr_node * p = pBusyLast->m_pNextFree.load(atomics::memory_order_relaxed);
+ while ( p != nullptr ) {
+ pBusyLast->m_pNext.store( p, atomics::memory_order_relaxed );
+ pBusyLast = p;
+ ++nBusyCount;
+ }
}
}
}
+ // Place [pBusyList, pBusyLast] back to m_RetiredBuffer
+ if ( pBusyFirst )
+ m_RetiredBuffer.push_list( pBusyFirst, pBusyLast, nBusyCount );
+
// Free all retired pointers
details::liberate_set::list_range range = set.free_all();
assert( range.second != nullptr );
m_RetiredAllocator.free_range( range.first, range.second );
}
- else {
- // liberate cycle did not free any retired pointer - double liberate threshold
+ else if ( nRetiredCount >= nLiberateThreshold ) {
+ // scan() cycle did not free any retired pointer - double scan() threshold
m_nLiberateThreshold.compare_exchange_strong( nLiberateThreshold, nLiberateThreshold * 2, atomics::memory_order_release, atomics::memory_order_relaxed );
}
}