6 Hazard Pointers memory reclamation strategy implementation
9 2008.02.10 Maxim.Khiszinsky Created
12 #include <cds/gc/details/hp.h>
14 #include <algorithm> // std::sort
17 #define CDS_HAZARDPTR_STATISTIC( _x ) if ( m_bStatEnabled ) { _x; }
19 namespace cds { namespace gc {
22 /// Max array size of retired pointers
23 static const size_t c_nMaxRetireNodeCount = c_nHazardPointerPerThread * c_nMaxThreadCount * 2;
25 GarbageCollector * GarbageCollector::m_pHZPManager = nullptr;
27 void CDS_STDCALL GarbageCollector::Construct( size_t nHazardPtrCount, size_t nMaxThreadCount, size_t nMaxRetiredPtrCount, scan_type nScanType )
29 if ( !m_pHZPManager ) {
30 m_pHZPManager = new GarbageCollector( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType );
34 void CDS_STDCALL GarbageCollector::Destruct( bool bDetachAll )
36 if ( m_pHZPManager ) {
38 m_pHZPManager->detachAllThread();
41 m_pHZPManager = nullptr;
45 GarbageCollector::GarbageCollector(
46 size_t nHazardPtrCount,
47 size_t nMaxThreadCount,
48 size_t nMaxRetiredPtrCount,
51 : m_pListHead( nullptr )
52 ,m_bStatEnabled( true )
53 ,m_nHazardPointerCount( nHazardPtrCount == 0 ? c_nHazardPointerPerThread : nHazardPtrCount )
54 ,m_nMaxThreadCount( nMaxThreadCount == 0 ? c_nMaxThreadCount : nMaxThreadCount )
55 ,m_nMaxRetiredPtrCount( nMaxRetiredPtrCount > c_nMaxRetireNodeCount ? nMaxRetiredPtrCount : c_nMaxRetireNodeCount )
56 ,m_nScanType( nScanType )
59 GarbageCollector::~GarbageCollector()
61 CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
62 CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::get_current_thread_id() ;)
64 hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed );
65 m_pListHead.store( nullptr, atomics::memory_order_relaxed );
67 hplist_node * pNext = nullptr;
68 for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) {
69 assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
70 || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
71 || !cds::OS::is_thread_alive( hprec->m_idOwner.load( atomics::memory_order_relaxed ) )
73 details::retired_vector& vect = hprec->m_arrRetired;
74 details::retired_vector::iterator itRetired = vect.begin();
75 details::retired_vector::iterator itRetiredEnd = vect.end();
76 while ( itRetired != itRetiredEnd ) {
81 pNext = hprec->m_pNextNode;
82 hprec->m_bFree.store( true, atomics::memory_order_relaxed );
87 inline GarbageCollector::hplist_node * GarbageCollector::NewHPRec()
89 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocNewHPRec )
90 return new hplist_node( *this );
93 inline void GarbageCollector::DeleteHPRec( hplist_node * pNode )
95 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeleteHPRec )
96 assert( pNode->m_arrRetired.size() == 0 );
100 details::hp_record * GarbageCollector::alloc_hp_record()
102 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec )
105 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
106 const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
108 // First try to reuse a retired (non-active) HP record
109 for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
110 cds::OS::ThreadId thId = nullThreadId;
111 if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) )
113 hprec->m_bFree.store( false, atomics::memory_order_release );
117 // No HP records available for reuse
118 // Allocate and push a new HP record
120 hprec->m_idOwner.store( curThreadId, atomics::memory_order_relaxed );
121 hprec->m_bFree.store( false, atomics::memory_order_relaxed );
123 atomics::atomic_thread_fence( atomics::memory_order_release );
125 hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire );
127 hprec->m_pNextNode = pOldHead;
128 } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed ));
133 void GarbageCollector::free_hp_record( details::hp_record * pRec )
135 assert( pRec != nullptr );
136 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec )
141 hplist_node * pNode = static_cast<hplist_node *>( pRec );
142 pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
145 void GarbageCollector::detachAllThread()
147 hplist_node * pNext = nullptr;
148 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
149 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = pNext ) {
150 pNext = hprec->m_pNextNode;
151 if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
152 free_hp_record( hprec );
157 void GarbageCollector::classic_scan( details::hp_record * pRec )
159 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount )
161 std::vector< void * > plist;
162 plist.reserve( m_nMaxThreadCount * m_nHazardPointerCount );
163 assert( plist.size() == 0 );
165 // Stage 1: Scan HP list and insert non-null values in plist
167 hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
170 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
171 void * hptr = pNode->m_hzp[i];
173 plist.push_back( hptr );
175 pNode = pNode->m_pNextNode;
178 // Sort plist to simplify search in
179 std::sort( plist.begin(), plist.end() );
181 // Stage 2: Search plist
182 details::retired_vector& arrRetired = pRec->m_arrRetired;
184 details::retired_vector::iterator itRetired = arrRetired.begin();
185 details::retired_vector::iterator itRetiredEnd = arrRetired.end();
186 // arrRetired is not a std::vector!
187 // clear() is just set up item counter to 0, the items is not destroyed
191 std::vector< void * >::iterator itBegin = plist.begin();
192 std::vector< void * >::iterator itEnd = plist.end();
193 size_t nDeferredCount = 0;
194 while ( itRetired != itRetiredEnd ) {
195 if ( std::binary_search( itBegin, itEnd, itRetired->m_p ) ) {
196 arrRetired.push( *itRetired );
203 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferredCount )
204 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - arrRetired.begin()) - nDeferredCount )
208 void GarbageCollector::inplace_scan( details::hp_record * pRec )
210 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount )
212 // In-place scan algo uses LSB of retired ptr as a mark for internal purposes.
213 // It is correct if all retired pointers are ar least 2-byte aligned (LSB is zero).
214 // If it is wrong, we use classic scan algorithm
216 // Check if all retired pointers has zero LSB
217 // LSB is used for marking pointers that cannot be deleted yet
218 details::retired_vector::iterator itRetired = pRec->m_arrRetired.begin();
219 details::retired_vector::iterator itRetiredEnd = pRec->m_arrRetired.end();
220 for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
222 // found a pointer with LSB bit set - use classic_scan
223 classic_scan( pRec );
228 // Sort retired pointer array
229 std::sort( itRetired, itRetiredEnd, cds::gc::details::retired_ptr::less );
236 while ( ++it != itRetiredEnd ) {
237 if ( it->m_p == itPrev->m_p )
238 throw std::runtime_error( "Double free" );
244 // Search guarded pointers in retired array
245 hplist_node * pNode = m_pListHead.load( atomics::memory_order_acquire );
248 details::retired_ptr dummyRetired;
250 if ( !pNode->m_bFree.load( atomics::memory_order_acquire ) ) {
251 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
252 void * hptr = pNode->m_hzp[i];
254 dummyRetired.m_p = hptr;
255 details::retired_vector::iterator it = std::lower_bound( itRetired, itRetiredEnd, dummyRetired, cds::gc::details::retired_ptr::less );
256 if ( it != itRetiredEnd && it->m_p == hptr ) {
257 // Mark retired pointer as guarded
263 pNode = pNode->m_pNextNode;
267 // Move all marked pointers to head of array
269 auto itInsert = itRetired;
270 for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
273 if ( itInsert != it )
278 // Retired pointer may be freed
282 const size_t nDeferred = itInsert - itRetired;
283 pRec->m_arrRetired.size( nDeferred );
284 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferred )
285 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - itRetired) - nDeferred )
289 void GarbageCollector::HelpScan( details::hp_record * pThis )
291 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount )
293 assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::get_current_thread_id() );
295 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
296 const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
297 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
299 // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
300 if ( hprec->m_bFree.load(atomics::memory_order_acquire) )
303 // Owns hprec if it is empty.
304 // Several threads may work concurrently so we use atomic technique only.
306 cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_acquire);
307 if ( curOwner == nullThreadId || !cds::OS::is_thread_alive( curOwner )) {
308 if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
312 curOwner = nullThreadId;
313 if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
318 // We own the thread successfully. Now, we can see whether hp_record has retired pointers.
319 // If it has ones then we move to pThis that is private for current thread.
320 details::retired_vector& src = hprec->m_arrRetired;
321 details::retired_vector& dest = pThis->m_arrRetired;
322 assert( !dest.isFull());
323 details::retired_vector::iterator itRetired = src.begin();
324 details::retired_vector::iterator itRetiredEnd = src.end();
325 while ( itRetired != itRetiredEnd ) {
326 dest.push( *itRetired );
327 if ( dest.isFull()) {
328 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_CallScanFromHelpScan )
335 hprec->m_bFree.store(true, atomics::memory_order_release);
336 hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
342 GarbageCollector::InternalState& GarbageCollector::getInternalState( GarbageCollector::InternalState& stat) const
344 stat.nHPCount = m_nHazardPointerCount;
345 stat.nMaxThreadCount = m_nMaxThreadCount;
346 stat.nMaxRetiredPtrCount = m_nMaxRetiredPtrCount;
347 stat.nHPRecSize = sizeof( hplist_node )
348 + sizeof(details::retired_ptr) * m_nMaxRetiredPtrCount;
350 stat.nHPRecAllocated =
352 stat.nTotalRetiredPtrCount =
353 stat.nRetiredPtrInFreeHPRecs = 0;
355 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
356 ++stat.nHPRecAllocated;
357 stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size();
359 if ( hprec->m_bFree.load(atomics::memory_order_relaxed) ) {
361 stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size();
370 stat.evcAllocHPRec = m_Stat.m_AllocHPRec;
371 stat.evcRetireHPRec = m_Stat.m_RetireHPRec;
372 stat.evcAllocNewHPRec= m_Stat.m_AllocNewHPRec;
373 stat.evcDeleteHPRec = m_Stat.m_DeleteHPRec;
375 stat.evcScanCall = m_Stat.m_ScanCallCount;
376 stat.evcHelpScanCall = m_Stat.m_HelpScanCallCount;
377 stat.evcScanFromHelpScan= m_Stat.m_CallScanFromHelpScan;
379 stat.evcDeletedNode = m_Stat.m_DeletedNode;
380 stat.evcDeferredNode = m_Stat.m_DeferredNode;
387 }} // namespace cds::gc