2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 Hazard Pointers memory reclamation strategy implementation
37 2008.02.10 Maxim.Khiszinsky Created
40 #include <cds/gc/details/hp.h>
42 #include <algorithm> // std::sort
45 #define CDS_HAZARDPTR_STATISTIC( _x ) if ( m_bStatEnabled ) { _x; }
47 namespace cds { namespace gc {
50 /// Max array size of retired pointers
51 static const size_t c_nMaxRetireNodeCount = c_nHazardPointerPerThread * c_nMaxThreadCount * 2;
53 GarbageCollector * GarbageCollector::m_pHZPManager = nullptr;
55 void CDS_STDCALL GarbageCollector::Construct( size_t nHazardPtrCount, size_t nMaxThreadCount, size_t nMaxRetiredPtrCount, scan_type nScanType )
57 if ( !m_pHZPManager ) {
58 m_pHZPManager = new GarbageCollector( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType );
62 void CDS_STDCALL GarbageCollector::Destruct( bool bDetachAll )
64 if ( m_pHZPManager ) {
66 m_pHZPManager->detachAllThread();
69 m_pHZPManager = nullptr;
73 GarbageCollector::GarbageCollector(
74 size_t nHazardPtrCount,
75 size_t nMaxThreadCount,
76 size_t nMaxRetiredPtrCount,
79 : m_pListHead( nullptr )
80 ,m_bStatEnabled( false )
81 ,m_nHazardPointerCount( nHazardPtrCount == 0 ? c_nHazardPointerPerThread : nHazardPtrCount )
82 ,m_nMaxThreadCount( nMaxThreadCount == 0 ? c_nMaxThreadCount : nMaxThreadCount )
83 ,m_nMaxRetiredPtrCount( nMaxRetiredPtrCount > c_nMaxRetireNodeCount ? nMaxRetiredPtrCount : c_nMaxRetireNodeCount )
84 ,m_nScanType( nScanType )
87 GarbageCollector::~GarbageCollector()
89 CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
90 CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::get_current_thread_id() ;)
92 hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed );
93 m_pListHead.store( nullptr, atomics::memory_order_relaxed );
95 hplist_node * pNext = nullptr;
96 for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) {
97 assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
98 || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
99 || !cds::OS::is_thread_alive( hprec->m_idOwner.load( atomics::memory_order_relaxed ))
101 details::retired_vector& vect = hprec->m_arrRetired;
102 details::retired_vector::iterator itRetired = vect.begin();
103 details::retired_vector::iterator itRetiredEnd = vect.end();
104 while ( itRetired != itRetiredEnd ) {
109 pNext = hprec->m_pNextNode;
110 hprec->m_bFree.store( true, atomics::memory_order_relaxed );
111 DeleteHPRec( hprec );
115 inline GarbageCollector::hplist_node * GarbageCollector::NewHPRec()
117 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocNewHPRec )
118 return new hplist_node( *this );
121 inline void GarbageCollector::DeleteHPRec( hplist_node * pNode )
123 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeleteHPRec )
124 assert( pNode->m_arrRetired.size() == 0 );
128 details::hp_record * GarbageCollector::alloc_hp_record()
130 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec )
133 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
134 const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
136 // First try to reuse a retired (non-active) HP record
137 for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
138 cds::OS::ThreadId thId = nullThreadId;
139 if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ))
141 hprec->m_bFree.store( false, atomics::memory_order_release );
145 // No HP records available for reuse
146 // Allocate and push a new HP record
148 hprec->m_idOwner.store( curThreadId, atomics::memory_order_release );
149 hprec->m_bFree.store( false, atomics::memory_order_release );
151 hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire );
153 // TSan: Next CAS release orders the memory
154 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE(&hprec->m_pNextNode );
155 hprec->m_pNextNode = pOldHead;
156 } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed ));
161 void GarbageCollector::free_hp_record( details::hp_record * pRec )
163 assert( pRec != nullptr );
164 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec )
169 hplist_node * pNode = static_cast<hplist_node *>( pRec );
170 pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
173 void GarbageCollector::detachAllThread()
175 hplist_node * pNext = nullptr;
176 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
177 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = pNext ) {
178 pNext = hprec->m_pNextNode;
179 if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
180 free_hp_record( hprec );
185 void GarbageCollector::classic_scan( details::hp_record * pRec )
187 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount )
189 std::vector< void * > plist;
190 plist.reserve( m_nMaxThreadCount * m_nHazardPointerCount );
191 assert( plist.size() == 0 );
193 // Stage 1: Scan HP list and insert non-null values in plist
195 hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
198 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
200 void * hptr = pNode->m_hzp[i].get();
202 plist.push_back( hptr );
204 pNode = pNode->m_pNextNode;
207 // Sort plist to simplify search in
208 std::sort( plist.begin(), plist.end());
210 // Stage 2: Search plist
211 details::retired_vector& arrRetired = pRec->m_arrRetired;
213 details::retired_vector::iterator itRetired = arrRetired.begin();
214 details::retired_vector::iterator itRetiredEnd = arrRetired.end();
215 // arrRetired is not a std::vector!
216 // clear() is just set up item counter to 0, the items is not destroyed
220 std::vector< void * >::iterator itBegin = plist.begin();
221 std::vector< void * >::iterator itEnd = plist.end();
222 size_t nDeferredCount = 0;
223 while ( itRetired != itRetiredEnd ) {
224 if ( std::binary_search( itBegin, itEnd, itRetired->m_p )) {
225 arrRetired.push( *itRetired );
232 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferredCount )
233 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - arrRetired.begin()) - nDeferredCount )
237 void GarbageCollector::inplace_scan( details::hp_record * pRec )
239 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount )
241 // In-place scan algo uses LSB of retired ptr as a mark for internal purposes.
242 // It is correct if all retired pointers are ar least 2-byte aligned (LSB is zero).
243 // If it is wrong, we use classic scan algorithm
245 // Check if all retired pointers has zero LSB
246 // LSB is used for marking pointers that cannot be deleted yet
247 details::retired_vector::iterator itRetired = pRec->m_arrRetired.begin();
248 details::retired_vector::iterator itRetiredEnd = pRec->m_arrRetired.end();
249 for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
251 // found a pointer with LSB bit set - use classic_scan
252 classic_scan( pRec );
257 // Sort retired pointer array
258 std::sort( itRetired, itRetiredEnd, cds::gc::details::retired_ptr::less );
265 while ( ++it != itRetiredEnd ) {
266 if ( it->m_p == itPrev->m_p )
267 throw std::runtime_error( "Double free" );
273 // Search guarded pointers in retired array
274 hplist_node * pNode = m_pListHead.load( atomics::memory_order_acquire );
277 details::retired_ptr dummyRetired;
279 if ( !pNode->m_bFree.load( atomics::memory_order_acquire )) {
280 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
282 void * hptr = pNode->m_hzp[i].get();
284 dummyRetired.m_p = hptr;
285 details::retired_vector::iterator it = std::lower_bound( itRetired, itRetiredEnd, dummyRetired, cds::gc::details::retired_ptr::less );
286 if ( it != itRetiredEnd && it->m_p == hptr ) {
287 // Mark retired pointer as guarded
293 pNode = pNode->m_pNextNode;
297 // Move all marked pointers to head of array
299 auto itInsert = itRetired;
300 for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
303 if ( itInsert != it )
308 // Retired pointer may be freed
312 const size_t nDeferred = itInsert - itRetired;
313 pRec->m_arrRetired.size( nDeferred );
314 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferred )
315 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - itRetired) - nDeferred )
319 void GarbageCollector::HelpScan( details::hp_record * pThis )
321 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount )
323 assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::get_current_thread_id());
325 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
326 const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
327 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
329 // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
330 if ( hprec->m_bFree.load(atomics::memory_order_acquire))
333 // Owns hprec if it is empty.
334 // Several threads may work concurrently so we use atomic technique only.
336 cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_acquire);
337 if ( curOwner == nullThreadId || !cds::OS::is_thread_alive( curOwner )) {
338 if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
342 curOwner = nullThreadId;
343 if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
348 // We own the thread successfully. Now, we can see whether hp_record has retired pointers.
349 // If it has ones then we move to pThis that is private for current thread.
350 details::retired_vector& src = hprec->m_arrRetired;
351 details::retired_vector& dest = pThis->m_arrRetired;
352 assert( !dest.isFull());
353 details::retired_vector::iterator itRetired = src.begin();
355 // TSan can issue a warning here:
356 // read src.m_nSize in src.end()
357 // write src.m_nSize in src.clear()
358 // This is false positive since we own hprec
359 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
360 details::retired_vector::iterator itRetiredEnd = src.end();
361 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
363 while ( itRetired != itRetiredEnd ) {
364 dest.push( *itRetired );
365 if ( dest.isFull()) {
366 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_CallScanFromHelpScan )
372 // TSan: write src.m_nSize, see a comment above
373 CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
375 CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
377 hprec->m_bFree.store(true, atomics::memory_order_release);
378 hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
384 GarbageCollector::InternalState& GarbageCollector::getInternalState( GarbageCollector::InternalState& stat) const
386 stat.nHPCount = m_nHazardPointerCount;
387 stat.nMaxThreadCount = m_nMaxThreadCount;
388 stat.nMaxRetiredPtrCount = m_nMaxRetiredPtrCount;
389 stat.nHPRecSize = sizeof( hplist_node )
390 + sizeof(details::retired_ptr) * m_nMaxRetiredPtrCount;
392 stat.nHPRecAllocated =
394 stat.nTotalRetiredPtrCount =
395 stat.nRetiredPtrInFreeHPRecs = 0;
397 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
398 ++stat.nHPRecAllocated;
399 stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size();
401 if ( hprec->m_bFree.load(atomics::memory_order_relaxed)) {
403 stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size();
412 stat.evcAllocHPRec = m_Stat.m_AllocHPRec;
413 stat.evcRetireHPRec = m_Stat.m_RetireHPRec;
414 stat.evcAllocNewHPRec= m_Stat.m_AllocNewHPRec;
415 stat.evcDeleteHPRec = m_Stat.m_DeleteHPRec;
417 stat.evcScanCall = m_Stat.m_ScanCallCount;
418 stat.evcHelpScanCall = m_Stat.m_HelpScanCallCount;
419 stat.evcScanFromHelpScan= m_Stat.m_CallScanFromHelpScan;
421 stat.evcDeletedNode = m_Stat.m_DeletedNode;
422 stat.evcDeferredNode = m_Stat.m_DeferredNode;
429 }} // namespace cds::gc