Removed trailing whitespaces
[libcds.git] / src / hp_gc.cpp
1 //$$CDS-header$$
2
3 /*
4     File: hzp_gc.cpp
5
6     Hazard Pointers memory reclamation strategy implementation
7
8     Editions:
9         2008.02.10    Maxim.Khiszinsky    Created
10 */
11
12 #include <cds/gc/details/hp.h>
13
14 #include <algorithm>    // std::sort
15 #include "hp_const.h"
16
17 #define    CDS_HAZARDPTR_STATISTIC( _x )    if ( m_bStatEnabled ) { _x; }
18
19 namespace cds { namespace gc {
20     namespace hp {
21
22         /// Max array size of retired pointers
23         static const size_t c_nMaxRetireNodeCount = c_nHazardPointerPerThread * c_nMaxThreadCount * 2;
24
25         GarbageCollector *    GarbageCollector::m_pHZPManager = nullptr;
26
27         void CDS_STDCALL GarbageCollector::Construct( size_t nHazardPtrCount, size_t nMaxThreadCount, size_t nMaxRetiredPtrCount, scan_type nScanType )
28         {
29             if ( !m_pHZPManager ) {
30                 m_pHZPManager = new GarbageCollector( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType );
31             }
32         }
33
34         void CDS_STDCALL GarbageCollector::Destruct( bool bDetachAll )
35         {
36             if ( m_pHZPManager ) {
37                 if ( bDetachAll )
38                     m_pHZPManager->detachAllThread();
39
40                 delete m_pHZPManager;
41                 m_pHZPManager = nullptr;
42             }
43         }
44
45         GarbageCollector::GarbageCollector(
46             size_t nHazardPtrCount,
47             size_t nMaxThreadCount,
48             size_t nMaxRetiredPtrCount,
49             scan_type nScanType
50         )
51             : m_pListHead( nullptr )
52             ,m_bStatEnabled( false )
53             ,m_nHazardPointerCount( nHazardPtrCount == 0 ? c_nHazardPointerPerThread : nHazardPtrCount )
54             ,m_nMaxThreadCount( nMaxThreadCount == 0 ? c_nMaxThreadCount : nMaxThreadCount )
55             ,m_nMaxRetiredPtrCount( nMaxRetiredPtrCount > c_nMaxRetireNodeCount ? nMaxRetiredPtrCount : c_nMaxRetireNodeCount )
56             ,m_nScanType( nScanType )
57         {}
58
59         GarbageCollector::~GarbageCollector()
60         {
61             CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
62             CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::get_current_thread_id() ;)
63
64             hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed );
65             m_pListHead.store( nullptr, atomics::memory_order_relaxed );
66
67             hplist_node * pNext = nullptr;
68             for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) {
69                 assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
70                     || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
71                     || !cds::OS::is_thread_alive( hprec->m_idOwner.load( atomics::memory_order_relaxed ) )
72                 );
73                 details::retired_vector& vect = hprec->m_arrRetired;
74                 details::retired_vector::iterator itRetired = vect.begin();
75                 details::retired_vector::iterator itRetiredEnd = vect.end();
76                 while ( itRetired != itRetiredEnd ) {
77                     itRetired->free();
78                     ++itRetired;
79                 }
80                 vect.clear();
81                 pNext = hprec->m_pNextNode;
82                 hprec->m_bFree.store( true, atomics::memory_order_relaxed );
83                 DeleteHPRec( hprec );
84             }
85         }
86
87         inline GarbageCollector::hplist_node * GarbageCollector::NewHPRec()
88         {
89             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocNewHPRec )
90             return new hplist_node( *this );
91         }
92
93         inline void GarbageCollector::DeleteHPRec( hplist_node * pNode )
94         {
95             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeleteHPRec )
96             assert( pNode->m_arrRetired.size() == 0 );
97             delete pNode;
98         }
99
100         details::hp_record * GarbageCollector::alloc_hp_record()
101         {
102             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec )
103
104             hplist_node * hprec;
105             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
106             const cds::OS::ThreadId curThreadId  = cds::OS::get_current_thread_id();
107
108             // First try to reuse a retired (non-active) HP record
109             for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
110                 cds::OS::ThreadId thId = nullThreadId;
111                 if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) )
112                     continue;
113                 hprec->m_bFree.store( false, atomics::memory_order_release );
114                 return hprec;
115             }
116
117             // No HP records available for reuse
118             // Allocate and push a new HP record
119             hprec = NewHPRec();
120             hprec->m_idOwner.store( curThreadId, atomics::memory_order_release );
121             hprec->m_bFree.store( false, atomics::memory_order_release );
122
123             hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire );
124             do {
125                 hprec->m_pNextNode = pOldHead;
126             } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed ));
127
128             return hprec;
129         }
130
131         void GarbageCollector::free_hp_record( details::hp_record * pRec )
132         {
133             assert( pRec != nullptr );
134             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec )
135
136             pRec->clear();
137             Scan( pRec );
138             HelpScan( pRec );
139             hplist_node * pNode = static_cast<hplist_node *>( pRec );
140             pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
141         }
142
143         void GarbageCollector::detachAllThread()
144         {
145             hplist_node * pNext = nullptr;
146             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
147             for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = pNext ) {
148                 pNext = hprec->m_pNextNode;
149                 if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
150                     free_hp_record( hprec );
151                 }
152             }
153         }
154
155         void GarbageCollector::classic_scan( details::hp_record * pRec )
156         {
157             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount )
158
159             std::vector< void * >   plist;
160             plist.reserve( m_nMaxThreadCount * m_nHazardPointerCount );
161             assert( plist.size() == 0 );
162
163             // Stage 1: Scan HP list and insert non-null values in plist
164
165             hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
166
167             while ( pNode ) {
168                 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
169                     pRec->sync();
170                     void * hptr = pNode->m_hzp[i];
171                     if ( hptr )
172                         plist.push_back( hptr );
173                 }
174                 pNode = pNode->m_pNextNode;
175             }
176
177             // Sort plist to simplify search in
178             std::sort( plist.begin(), plist.end() );
179
180             // Stage 2: Search plist
181             details::retired_vector& arrRetired = pRec->m_arrRetired;
182
183             details::retired_vector::iterator itRetired     = arrRetired.begin();
184             details::retired_vector::iterator itRetiredEnd  = arrRetired.end();
185             // arrRetired is not a std::vector!
186             // clear() is just set up item counter to 0, the items is not destroyed
187             arrRetired.clear();
188
189             {
190                 std::vector< void * >::iterator itBegin = plist.begin();
191                 std::vector< void * >::iterator itEnd = plist.end();
192                 size_t nDeferredCount = 0;
193                 while ( itRetired != itRetiredEnd ) {
194                     if ( std::binary_search( itBegin, itEnd, itRetired->m_p ) ) {
195                         arrRetired.push( *itRetired );
196                         ++nDeferredCount;
197                     }
198                     else
199                         itRetired->free();
200                     ++itRetired;
201                 }
202                 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferredCount )
203                 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - arrRetired.begin()) - nDeferredCount )
204             }
205         }
206
207         void GarbageCollector::inplace_scan( details::hp_record * pRec )
208         {
209             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount )
210
211             // In-place scan algo uses LSB of retired ptr as a mark for internal purposes.
212             // It is correct if all retired pointers are ar least 2-byte aligned (LSB is zero).
213             // If it is wrong, we use classic scan algorithm
214
215             // Check if all retired pointers has zero LSB
216             // LSB is used for marking pointers that cannot be deleted yet
217             details::retired_vector::iterator itRetired     = pRec->m_arrRetired.begin();
218             details::retired_vector::iterator itRetiredEnd  = pRec->m_arrRetired.end();
219             for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
220                 if ( it->m_n & 1 ) {
221                     // found a pointer with LSB bit set - use classic_scan
222                     classic_scan( pRec );
223                     return;
224                 }
225             }
226
227             // Sort retired pointer array
228             std::sort( itRetired, itRetiredEnd, cds::gc::details::retired_ptr::less );
229
230             // Check double free
231             /*
232             {
233                 auto it = itRetired;
234                 auto itPrev = it;
235                 while ( ++it != itRetiredEnd ) {
236                     if ( it->m_p == itPrev->m_p )
237                         throw std::runtime_error( "Double free" );
238                     itPrev = it;
239                 }
240             }
241             */
242
243             // Search guarded pointers in retired array
244             hplist_node * pNode = m_pListHead.load( atomics::memory_order_acquire );
245
246             {
247                 details::retired_ptr dummyRetired;
248                 while ( pNode ) {
249                     if ( !pNode->m_bFree.load( atomics::memory_order_acquire ) ) {
250                         for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
251                             pRec->sync();
252                             void * hptr = pNode->m_hzp[i];
253                             if ( hptr ) {
254                                 dummyRetired.m_p = hptr;
255                                 details::retired_vector::iterator it = std::lower_bound( itRetired, itRetiredEnd, dummyRetired, cds::gc::details::retired_ptr::less );
256                                 if ( it != itRetiredEnd && it->m_p == hptr ) {
257                                     // Mark retired pointer as guarded
258                                     it->m_n |= 1;
259                                 }
260                             }
261                         }
262                     }
263                     pNode = pNode->m_pNextNode;
264                 }
265             }
266
267             // Move all marked pointers to head of array
268             {
269                 auto itInsert = itRetired;
270                 for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
271                     if ( it->m_n & 1 ) {
272                         it->m_n &= ~1;
273                         if ( itInsert != it )
274                             *itInsert = *it;
275                         ++itInsert;
276                     }
277                     else {
278                         // Retired pointer may be freed
279                         it->free();
280                     }
281                 }
282                 const size_t nDeferred = itInsert - itRetired;
283                 pRec->m_arrRetired.size( nDeferred );
284                 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferred )
285                 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - itRetired) - nDeferred )
286             }
287         }
288
289         void GarbageCollector::HelpScan( details::hp_record * pThis )
290         {
291             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount )
292
293             assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::get_current_thread_id() );
294
295             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
296             const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
297             for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
298
299                 // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
300                 if ( hprec->m_bFree.load(atomics::memory_order_acquire) )
301                     continue;
302
303                 // Owns hprec if it is empty.
304                 // Several threads may work concurrently so we use atomic technique only.
305                 {
306                     cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_acquire);
307                     if ( curOwner == nullThreadId || !cds::OS::is_thread_alive( curOwner )) {
308                         if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
309                             continue;
310                     }
311                     else {
312                         curOwner = nullThreadId;
313                         if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
314                             continue;
315                     }
316                 }
317
318                 // We own the thread successfully. Now, we can see whether hp_record has retired pointers.
319                 // If it has ones then we move to pThis that is private for current thread.
320                 details::retired_vector& src = hprec->m_arrRetired;
321                 details::retired_vector& dest = pThis->m_arrRetired;
322                 assert( !dest.isFull());
323                 details::retired_vector::iterator itRetired = src.begin();
324
325                 // TSan can issue a warning here:
326                 //  read src.m_nSize in src.end()
327                 //  write src.m_nSize in src.clear()
328                 // This is false positive since we own hprec
329                 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
330                 details::retired_vector::iterator itRetiredEnd = src.end();
331                 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
332
333                 while ( itRetired != itRetiredEnd ) {
334                     dest.push( *itRetired );
335                     if ( dest.isFull()) {
336                         CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_CallScanFromHelpScan )
337                         Scan( pThis );
338                     }
339                     ++itRetired;
340                 }
341
342                 // TSan: write src.m_nSize, see a comment above
343                 CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
344                 src.clear();
345                 CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
346
347                 hprec->m_bFree.store(true, atomics::memory_order_release);
348                 hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
349
350                 Scan( pThis );
351             }
352         }
353
354         GarbageCollector::InternalState& GarbageCollector::getInternalState( GarbageCollector::InternalState& stat) const
355         {
356             stat.nHPCount                = m_nHazardPointerCount;
357             stat.nMaxThreadCount         = m_nMaxThreadCount;
358             stat.nMaxRetiredPtrCount     = m_nMaxRetiredPtrCount;
359             stat.nHPRecSize              = sizeof( hplist_node )
360                                             + sizeof(details::retired_ptr) * m_nMaxRetiredPtrCount;
361
362             stat.nHPRecAllocated         =
363                 stat.nHPRecUsed              =
364                 stat.nTotalRetiredPtrCount   =
365                 stat.nRetiredPtrInFreeHPRecs = 0;
366
367             for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
368                 ++stat.nHPRecAllocated;
369                 stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size();
370
371                 if ( hprec->m_bFree.load(atomics::memory_order_relaxed) ) {
372                     // Free HP record
373                     stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size();
374                 }
375                 else {
376                     // Used HP record
377                     ++stat.nHPRecUsed;
378                 }
379             }
380
381             // Events
382             stat.evcAllocHPRec   = m_Stat.m_AllocHPRec;
383             stat.evcRetireHPRec  = m_Stat.m_RetireHPRec;
384             stat.evcAllocNewHPRec= m_Stat.m_AllocNewHPRec;
385             stat.evcDeleteHPRec  = m_Stat.m_DeleteHPRec;
386
387             stat.evcScanCall     = m_Stat.m_ScanCallCount;
388             stat.evcHelpScanCall = m_Stat.m_HelpScanCallCount;
389             stat.evcScanFromHelpScan= m_Stat.m_CallScanFromHelpScan;
390
391             stat.evcDeletedNode  = m_Stat.m_DeletedNode;
392             stat.evcDeferredNode = m_Stat.m_DeferredNode;
393
394             return stat;
395         }
396
397
398     } //namespace hp
399 }} // namespace cds::gc