6 Implementation of cds::gc::hrc::HRCGarbageCollector
9 2008.03.10 Maxim.Khiszinsky Created
12 #include <cds/gc/hrc/hrc.h>
14 #include "hzp_const.h"
16 #include <algorithm> // std::sort
18 #define CDS_HRC_STATISTIC( _x ) if ( m_bStatEnabled ) { _x; }
20 namespace cds { namespace gc {
23 GarbageCollector * GarbageCollector::m_pGC = nullptr;
25 GarbageCollector::GarbageCollector(
26 size_t nHazardPtrCount,
27 size_t nMaxThreadCount,
28 size_t nRetiredNodeArraySize
30 : m_pListHead( nullptr ),
31 m_bStatEnabled( true ),
32 m_nHazardPointerCount( nHazardPtrCount ),
33 m_nMaxThreadCount( nMaxThreadCount ),
34 m_nMaxRetiredPtrCount( nRetiredNodeArraySize )
37 GarbageCollector::~GarbageCollector()
39 thread_list_node * pNode = m_pListHead.load( CDS_ATOMIC::memory_order_relaxed );
41 assert( pNode->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == std::thread::id() );
42 clearHRCThreadDesc( pNode );
43 thread_list_node * pNext = pNode->m_pNext;
44 deleteHRCThreadDesc( pNode );
49 void CDS_STDCALL GarbageCollector::Construct(
50 size_t nHazardPtrCount, // hazard pointers count
51 size_t nMaxThreadCount, // max thread count
52 size_t nMaxNodeLinkCount, // max HRC-pointer count in the HRC-container's item
53 size_t nMaxTransientLinks // max HRC-container's item count that can point to deleting item of container
57 if ( nHazardPtrCount == 0 )
58 nHazardPtrCount = c_nHazardPointerPerThread + c_nCleanUpHazardPointerPerThread;
59 if ( nMaxThreadCount == 0 )
60 nMaxThreadCount = c_nMaxThreadCount;
61 if ( nMaxNodeLinkCount == 0 )
62 nMaxNodeLinkCount = c_nHRCMaxNodeLinkCount;
63 if ( nMaxTransientLinks == 0 )
64 nMaxTransientLinks = c_nHRCMaxTransientLinks;
66 size_t nRetiredNodeArraySize = nMaxThreadCount * ( nHazardPtrCount + nMaxNodeLinkCount + nMaxTransientLinks + 1 );
68 m_pGC = new GarbageCollector( nHazardPtrCount, nMaxThreadCount, nRetiredNodeArraySize );
72 void CDS_STDCALL GarbageCollector::Destruct()
78 m_pGC->HelpScan( &tgc );
80 // tgc dtor calls fini()
88 inline GarbageCollector::thread_list_node * GarbageCollector::newHRCThreadDesc()
90 CDS_HRC_STATISTIC( ++m_Stat.m_AllocNewHRCThreadDesc );
91 return new thread_list_node( *this );
94 inline void GarbageCollector::deleteHRCThreadDesc( thread_list_node * pNode )
96 assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
97 CDS_HRC_STATISTIC( ++m_Stat.m_DeleteHRCThreadDesc );
101 void GarbageCollector::clearHRCThreadDesc( thread_list_node * pNode )
103 assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
104 ContainerNode * pItem;
105 for ( size_t n = 0; n < pNode->m_arrRetired.capacity(); ++n ) {
106 if ( (pItem = pNode->m_arrRetired[n].m_pNode.load( CDS_ATOMIC::memory_order_relaxed )) != nullptr ) {
107 pNode->m_arrRetired[n].m_funcFree( pItem );
109 pNode->m_arrRetired[n].m_pNode.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
112 assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
115 GarbageCollector::thread_list_node * GarbageCollector::getHRCThreadDescForCurrentThread() const
117 thread_list_node * hprec;
118 const std::thread::id curThreadId = std::this_thread::get_id();
120 for ( hprec = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); hprec; hprec = hprec->m_pNext ) {
121 if ( hprec->m_idOwner.load( CDS_ATOMIC::memory_order_acquire ) == curThreadId ) {
122 assert( !hprec->m_bFree );
129 details::thread_descriptor * GarbageCollector::allocateHRCThreadDesc( ThreadGC * pThreadGC )
131 CDS_HRC_STATISTIC( ++m_Stat.m_AllocHRCThreadDesc );
133 thread_list_node * hprec;
134 const std::thread::id nullThreadId = std::thread::id();
135 const std::thread::id curThreadId = std::this_thread::get_id();
137 // First try to reuse a retired (non-active) HP record
138 for ( hprec = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); hprec; hprec = hprec->m_pNext ) {
139 std::thread::id expectedThreadId = nullThreadId;
140 if ( !hprec->m_idOwner.compare_exchange_strong( expectedThreadId, curThreadId, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed ) )
142 hprec->m_pOwner = pThreadGC;
143 hprec->m_bFree = false;
144 assert( hprec->m_hzp.size() == hprec->m_hzp.capacity() );
148 // No HP records available for reuse
149 // Allocate and push a new HP record
150 hprec = newHRCThreadDesc();
151 assert( hprec->m_hzp.size() == hprec->m_hzp.capacity() );
152 hprec->m_idOwner.store( curThreadId, CDS_ATOMIC::memory_order_relaxed );
153 hprec->m_pOwner = pThreadGC;
154 hprec->m_bFree = false;
155 thread_list_node * pOldHead;
157 pOldHead = m_pListHead.load( CDS_ATOMIC::memory_order_relaxed );
159 hprec->m_pNext = pOldHead;
160 } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
162 assert( hprec->m_hzp.size() == hprec->m_hzp.capacity() );
166 void GarbageCollector::retireHRCThreadDesc( details::thread_descriptor * pRec )
168 CDS_HRC_STATISTIC( ++m_Stat.m_RetireHRCThreadDesc );
171 thread_list_node * pNode = static_cast<thread_list_node *>( pRec );
172 assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
175 pNode->m_idOwner.value() != std::this_thread::get_id()
176 if the destruction of thread object is called by the destructor
177 after thread termination
179 assert( pNode->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) != std::thread::id() );
180 pNode->m_pOwner = nullptr;
181 pNode->m_idOwner.store( std::thread::id(), CDS_ATOMIC::memory_order_release );
182 assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
185 void GarbageCollector::Scan( ThreadGC * pThreadGC )
187 CDS_HRC_STATISTIC( ++m_Stat.m_ScanCalls );
189 typedef std::vector< ContainerNode * > hazard_ptr_list;
191 details::thread_descriptor * pRec = pThreadGC->m_pDesc;
192 assert( static_cast< thread_list_node *>(pRec)->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == std::this_thread::get_id() );
194 // Step 1: mark all pRec->m_arrRetired items as "traced"
196 details::retired_vector::const_iterator itEnd = pRec->m_arrRetired.end();
198 for ( details::retired_vector::const_iterator it = pRec->m_arrRetired.begin() ; it != itEnd; ++it ) {
199 ContainerNode * pNode = it->m_pNode.load( CDS_ATOMIC::memory_order_acquire );
201 if ( pNode->m_RC.value() == 0 ) {
202 pNode->m_bTrace.store( true, CDS_ATOMIC::memory_order_release );
203 if ( pNode->m_RC.value() != 0 )
204 pNode->m_bTrace.store( false, CDS_ATOMIC::memory_order_release );
210 // Array of hazard pointers for all threads
211 hazard_ptr_list plist;
212 plist.reserve( m_nMaxThreadCount * m_nHazardPointerCount );
213 assert( plist.size() == 0 );
215 // Stage 2: Scan HP list and insert non-null values to plist
217 thread_list_node * pNode = m_pListHead.load( CDS_ATOMIC::memory_order_acquire );
220 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
221 ContainerNode * hptr = pNode->m_hzp[i];
223 plist.push_back( hptr );
225 pNode = pNode->m_pNext;
229 // Sort plist to simplify search in
230 std::sort( plist.begin(), plist.end() );
232 // Stage 3: Deletes all nodes for refCount == 0 and that do not declare as Hazard in other thread
234 details::retired_vector& arr = pRec->m_arrRetired;
236 hazard_ptr_list::iterator itHPBegin = plist.begin();
237 hazard_ptr_list::iterator itHPEnd = plist.end();
239 details::retired_vector::iterator itEnd = arr.end();
240 details::retired_vector::iterator it = arr.begin();
242 for ( size_t nRetired = 0; it != itEnd; ++nRetired, ++it ) {
243 details::retired_node& node = *it;
244 ContainerNode * pNode = node.m_pNode.load(CDS_ATOMIC::memory_order_acquire);
248 if ( pNode->m_RC.value() == 0 && pNode->m_bTrace.load(CDS_ATOMIC::memory_order_acquire) && !std::binary_search( itHPBegin, itHPEnd, pNode ) ) {
249 // pNode may be destructed safely
251 node.m_bDone.store( true, CDS_ATOMIC::memory_order_release );
252 if ( node.m_nClaim.load( CDS_ATOMIC::memory_order_acquire ) == 0 ) {
253 pNode->terminate( pThreadGC, false );
254 pNode->clean( CDS_ATOMIC::memory_order_relaxed );
255 node.m_funcFree( pNode );
258 CDS_HRC_STATISTIC( ++m_Stat.m_DeletedNode );
262 pNode->terminate( pThreadGC, true );
263 //node.m_bDone.store( true, CDS_ATOMIC::memory_order_release );
264 CDS_HRC_STATISTIC( ++m_Stat.m_ScanClaimGuarded );
267 CDS_HRC_STATISTIC( ++m_Stat.m_ScanGuarded );
273 void GarbageCollector::HelpScan( ThreadGC * pThis )
275 if ( pThis->m_pDesc->m_arrRetired.isFull() )
278 CDS_HRC_STATISTIC( ++m_Stat.m_HelpScanCalls );
280 const std::thread::id nullThreadId = std::thread::id();
281 const std::thread::id curThreadId = std::this_thread::get_id();
283 for ( thread_list_node * pRec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_pNext )
285 // If threadDesc is free then own its
286 std::thread::id expectedThreadId = nullThreadId;
287 if ( !pRec->m_idOwner.compare_exchange_strong(expectedThreadId, curThreadId, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed) )
292 // We own threadDesc.
293 assert( pRec->m_pOwner == nullptr );
295 if ( !pRec->m_bFree ) {
296 // All undeleted pointers is moved to pThis (it is private for the current thread)
298 details::retired_vector& src = pRec->m_arrRetired;
299 details::retired_vector& dest = pThis->m_pDesc->m_arrRetired;
300 assert( !dest.isFull());
302 details::retired_vector::iterator itEnd = src.end();
303 details::retired_vector::iterator it = src.begin();
305 for ( size_t nRetired = 0; it != itEnd; ++nRetired, ++it ) {
306 if ( it->m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr )
309 dest.push( it->m_pNode.load(CDS_ATOMIC::memory_order_relaxed), it->m_funcFree );
312 while ( dest.isFull() ) {
313 pThis->cleanUpLocal();
322 pRec->m_bFree = true;
324 pRec->m_idOwner.store( nullThreadId, CDS_ATOMIC::memory_order_release );
328 void GarbageCollector::CleanUpAll( ThreadGC * pThis )
330 CDS_HRC_STATISTIC( ++m_Stat.m_CleanUpAllCalls );
332 //const std::thread::id nullThreadId = std::thread::id();
333 thread_list_node * pThread = m_pListHead.load(CDS_ATOMIC::memory_order_acquire);
335 for ( size_t i = 0; i < pThread->m_arrRetired.capacity(); ++i ) {
336 details::retired_node& rRetiredNode = pThread->m_arrRetired[i];
337 ContainerNode * pNode = rRetiredNode.m_pNode.load(CDS_ATOMIC::memory_order_acquire);
338 if ( pNode && !rRetiredNode.m_bDone.load(CDS_ATOMIC::memory_order_acquire) ) {
339 rRetiredNode.m_nClaim.fetch_add( 1, CDS_ATOMIC::memory_order_release );
340 if ( !rRetiredNode.m_bDone.load(CDS_ATOMIC::memory_order_acquire)
341 && pNode == rRetiredNode.m_pNode.load(CDS_ATOMIC::memory_order_acquire) )
343 pNode->cleanUp( pThis );
345 rRetiredNode.m_nClaim.fetch_sub( 1, CDS_ATOMIC::memory_order_release );
348 pThread = pThread->m_pNext;
352 GarbageCollector::internal_state& GarbageCollector::getInternalState( GarbageCollector::internal_state& stat) const
355 stat.nHPCount = m_nHazardPointerCount;
356 stat.nMaxThreadCount = m_nMaxThreadCount;
357 stat.nMaxRetiredPtrCount = m_nMaxRetiredPtrCount;
358 stat.nHRCRecSize = sizeof( thread_list_node )
359 + sizeof( details::retired_node) * m_nMaxRetiredPtrCount;
360 stat.nHRCRecAllocated =
362 stat.nTotalRetiredPtrCount =
363 stat.nRetiredPtrInFreeHRCRecs = 0;
365 // Walk through HRC records
366 for ( thread_list_node *hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = hprec->m_pNext ) {
367 ++stat.nHRCRecAllocated;
368 size_t nRetiredNodeCount = hprec->m_arrRetired.retiredNodeCount();
369 if ( hprec->m_bFree ) {
370 stat.nRetiredPtrInFreeHRCRecs += nRetiredNodeCount;
375 stat.nTotalRetiredPtrCount += nRetiredNodeCount;
379 stat.evcAllocHRCRec = m_Stat.m_AllocHRCThreadDesc;
380 stat.evcRetireHRCRec = m_Stat.m_RetireHRCThreadDesc;
381 stat.evcAllocNewHRCRec = m_Stat.m_AllocNewHRCThreadDesc;
382 stat.evcDeleteHRCRec = m_Stat.m_DeleteHRCThreadDesc;
383 stat.evcScanCall = m_Stat.m_ScanCalls;
384 stat.evcHelpScanCalls = m_Stat.m_HelpScanCalls;
385 stat.evcCleanUpAllCalls = m_Stat.m_CleanUpAllCalls;
386 stat.evcDeletedNode = m_Stat.m_DeletedNode;
387 stat.evcScanGuarded = m_Stat.m_ScanGuarded;
388 stat.evcScanClaimGuarded = m_Stat.m_ScanClaimGuarded;
391 stat.evcNodeConstruct = m_Stat.m_NodeConstructed;
392 stat.evcNodeDestruct = m_Stat.m_NodeDestructed;
398 void ContainerNode::cleanUp( ThreadGC * /*pGC*/ )
400 CDS_PURE_VIRTUAL_FUNCTION_CALLED_("cds::gc::hrc::ContainerNode::cleanUp");
402 void ContainerNode::terminate( ThreadGC * /*pGC*/, bool /*bConcurrent*/ )
404 CDS_PURE_VIRTUAL_FUNCTION_CALLED_("cds::gc::hrc::ContainerNode::terminate");
408 }} // namespace cds::gc