rearrange cds/gc contents
[libcds.git] / src / hzp_gc.cpp
1 //$$CDS-header$$
2
3 /*
4     File: hzp_gc.cpp
5
6     Hazard Pointers memory reclamation strategy implementation
7
8     Editions:
9         2008.02.10    Maxim.Khiszinsky    Created
10 */
11
12 #include <cds/gc/hp/hp.h>
13
14 #include <algorithm>    // std::sort
15 #include "hzp_const.h"
16
17 #define    CDS_HAZARDPTR_STATISTIC( _x )    if ( m_bStatEnabled ) { _x; }
18
19 namespace cds { namespace gc {
20     namespace hzp {
21
22         /// Max array size of retired pointers
23         static const size_t c_nMaxRetireNodeCount = c_nHazardPointerPerThread * c_nMaxThreadCount * 2;
24
25         GarbageCollector *    GarbageCollector::m_pHZPManager = nullptr;
26
27         void CDS_STDCALL GarbageCollector::Construct( size_t nHazardPtrCount, size_t nMaxThreadCount, size_t nMaxRetiredPtrCount, scan_type nScanType )
28         {
29             if ( !m_pHZPManager ) {
30                 m_pHZPManager = new GarbageCollector( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType );
31             }
32         }
33
34         void CDS_STDCALL GarbageCollector::Destruct( bool bDetachAll )
35         {
36             if ( m_pHZPManager ) {
37                 if ( bDetachAll )
38                     m_pHZPManager->detachAllThread();
39
40                 delete m_pHZPManager;
41                 m_pHZPManager = nullptr;
42             }
43         }
44
45         GarbageCollector::GarbageCollector(
46             size_t nHazardPtrCount,
47             size_t nMaxThreadCount,
48             size_t nMaxRetiredPtrCount,
49             scan_type nScanType
50         )
51             : m_pListHead( nullptr )
52             ,m_bStatEnabled( true )
53             ,m_nHazardPointerCount( nHazardPtrCount == 0 ? c_nHazardPointerPerThread : nHazardPtrCount )
54             ,m_nMaxThreadCount( nMaxThreadCount == 0 ? c_nMaxThreadCount : nMaxThreadCount )
55             ,m_nMaxRetiredPtrCount( nMaxRetiredPtrCount > c_nMaxRetireNodeCount ? nMaxRetiredPtrCount : c_nMaxRetireNodeCount )
56             ,m_nScanType( nScanType )
57         {}
58
59         GarbageCollector::~GarbageCollector()
60         {
61             CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
62             CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::getCurrentThreadId() ;)
63
64             hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed );
65             m_pListHead.store( nullptr, atomics::memory_order_relaxed );
66
67             hplist_node * pNext = nullptr;
68             for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) {
69                 assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
70                     || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
71                     || !cds::OS::isThreadAlive( hprec->m_idOwner.load( atomics::memory_order_relaxed ) )
72                 );
73                 details::retired_vector& vect = hprec->m_arrRetired;
74                 details::retired_vector::iterator itRetired = vect.begin();
75                 details::retired_vector::iterator itRetiredEnd = vect.end();
76                 while ( itRetired != itRetiredEnd ) {
77                     DeletePtr( *itRetired );
78                     ++itRetired;
79                 }
80                 vect.clear();
81                 pNext = hprec->m_pNextNode;
82                 hprec->m_bFree.store( true, atomics::memory_order_relaxed );
83                 DeleteHPRec( hprec );
84             }
85         }
86
87         inline GarbageCollector::hplist_node * GarbageCollector::NewHPRec()
88         {
89             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocNewHPRec );
90             return new hplist_node( *this );
91         }
92
93         inline void GarbageCollector::DeleteHPRec( hplist_node * pNode )
94         {
95             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeleteHPRec );
96             assert( pNode->m_arrRetired.size() == 0 );
97             delete pNode;
98         }
99
100         inline void GarbageCollector::DeletePtr( details::retired_ptr& p )
101         {
102             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeletedNode );
103             p.free();
104         }
105
106         details::HPRec * GarbageCollector::AllocateHPRec()
107         {
108             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec );
109
110             hplist_node * hprec;
111             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
112             const cds::OS::ThreadId curThreadId  = cds::OS::getCurrentThreadId();
113
114             // First try to reuse a retired (non-active) HP record
115             for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
116                 cds::OS::ThreadId thId = nullThreadId;
117                 if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) )
118                     continue;
119                 hprec->m_bFree.store( false, atomics::memory_order_release );
120                 return hprec;
121             }
122
123             // No HP records available for reuse
124             // Allocate and push a new HP record
125             hprec = NewHPRec();
126             hprec->m_idOwner.store( curThreadId, atomics::memory_order_relaxed );
127             hprec->m_bFree.store( false, atomics::memory_order_relaxed );
128
129             atomics::atomic_thread_fence( atomics::memory_order_release );
130
131             hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire );
132             do {
133                 hprec->m_pNextNode = pOldHead;
134             } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed ));
135
136             return hprec;
137         }
138
139         void GarbageCollector::RetireHPRec( details::HPRec * pRec )
140         {
141             assert( pRec != nullptr );
142             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec );
143
144             pRec->clear();
145             Scan( pRec );
146             hplist_node * pNode = static_cast<hplist_node *>( pRec );
147             pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
148         }
149
150         void GarbageCollector::detachAllThread()
151         {
152             hplist_node * pNext = nullptr;
153             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
154             for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = pNext ) {
155                 pNext = hprec->m_pNextNode;
156                 if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
157                     RetireHPRec( hprec );
158                 }
159             }
160         }
161
162         void GarbageCollector::classic_scan( details::HPRec * pRec )
163         {
164             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount );
165
166             std::vector< void * >   plist;
167             plist.reserve( m_nMaxThreadCount * m_nHazardPointerCount );
168             assert( plist.size() == 0 );
169
170             // Stage 1: Scan HP list and insert non-null values in plist
171
172             hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
173
174             while ( pNode ) {
175                 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
176                     void * hptr = pNode->m_hzp[i];
177                     if ( hptr )
178                         plist.push_back( hptr );
179                 }
180                 pNode = pNode->m_pNextNode;
181             }
182
183             // Sort plist to simplify search in
184             std::sort( plist.begin(), plist.end() );
185
186             // Stage 2: Search plist
187             details::retired_vector& arrRetired = pRec->m_arrRetired;
188
189             details::retired_vector::iterator itRetired     = arrRetired.begin();
190             details::retired_vector::iterator itRetiredEnd  = arrRetired.end();
191             // arrRetired is not a std::vector!
192             // clear is just set up item counter to 0, the items is not destroying
193             arrRetired.clear();
194
195             std::vector< void * >::iterator itBegin = plist.begin();
196             std::vector< void * >::iterator itEnd = plist.end();
197             while ( itRetired != itRetiredEnd ) {
198                 if ( std::binary_search( itBegin, itEnd, itRetired->m_p) ) {
199                     CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeferredNode );
200                     arrRetired.push( *itRetired );
201                 }
202                 else
203                     DeletePtr( *itRetired );
204                 ++itRetired;
205             }
206         }
207
208         void GarbageCollector::inplace_scan( details::HPRec * pRec )
209         {
210             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount );
211
212             // In-place scan algo uses LSB of retired ptr as a mark for internal purposes.
213             // It is correct if all retired pointers are ar least 2-byte aligned (LSB is zero).
214             // If it is wrong, we use classic scan algorithm
215
216             // Check if all retired pointers has zero LSB
217             // LSB is used for marking pointers that cannot be deleted yet
218             details::retired_vector::iterator itRetired     = pRec->m_arrRetired.begin();
219             details::retired_vector::iterator itRetiredEnd  = pRec->m_arrRetired.end();
220             for ( details::retired_vector::iterator it = itRetired; it != itRetiredEnd; ++it ) {
221                 if ( reinterpret_cast<ptr_atomic_t>(it->m_p) & 1 ) {
222                     // found a pointer with LSB bit set - use classic_scan
223                     classic_scan( pRec );
224                     return;
225                 }
226             }
227
228             // Sort retired pointer array
229             std::sort( itRetired, itRetiredEnd, cds::gc::details::retired_ptr::less );
230
231             // Search guarded pointers in retired array
232
233             hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
234
235             while ( pNode ) {
236                 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
237                     void * hptr = pNode->m_hzp[i];
238                     if ( hptr ) {
239                         details::retired_ptr    dummyRetired;
240                         dummyRetired.m_p = hptr;
241                         details::retired_vector::iterator it = std::lower_bound( itRetired, itRetiredEnd, dummyRetired, cds::gc::details::retired_ptr::less );
242                         if ( it != itRetiredEnd && it->m_p == hptr )  {
243                             // Mark retired pointer as guarded
244                             it->m_p = reinterpret_cast<void *>(reinterpret_cast<ptr_atomic_t>(it->m_p ) | 1);
245                         }
246                     }
247                 }
248                 pNode = pNode->m_pNextNode;
249             }
250
251             // Move all marked pointers to head of array
252             details::retired_vector::iterator itInsert = itRetired;
253             for ( details::retired_vector::iterator it = itRetired; it != itRetiredEnd; ++it ) {
254                 if ( reinterpret_cast<ptr_atomic_t>(it->m_p) & 1 ) {
255                     it->m_p = reinterpret_cast<void *>(reinterpret_cast<ptr_atomic_t>(it->m_p ) & ~1);
256                     *itInsert = *it;
257                     ++itInsert;
258                     CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeferredNode );
259                 }
260                 else {
261                     // Retired pointer may be freed
262                     DeletePtr( *it );
263                 }
264             }
265             pRec->m_arrRetired.size( itInsert - itRetired );
266         }
267
268         void GarbageCollector::HelpScan( details::HPRec * pThis )
269         {
270             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount );
271
272             assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::getCurrentThreadId() );
273
274             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
275             const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
276             for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
277
278                 // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
279                 if ( hprec->m_bFree.load(atomics::memory_order_acquire) )
280                     continue;
281
282                 // Owns hprec if it is empty.
283                 // Several threads may work concurrently so we use atomic technique only.
284                 {
285                     cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_acquire);
286                     if ( curOwner == nullThreadId || !cds::OS::isThreadAlive( curOwner )) {
287                         if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
288                             continue;
289                     }
290                     else {
291                         curOwner = nullThreadId;
292                         if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
293                             continue;
294                     }
295                 }
296
297                 // We own the thread successfully. Now, we can see whether HPRec has retired pointers.
298                 // If it has ones then we move to pThis that is private for current thread.
299                 details::retired_vector& src = hprec->m_arrRetired;
300                 details::retired_vector& dest = pThis->m_arrRetired;
301                 assert( !dest.isFull());
302                 details::retired_vector::iterator itRetired = src.begin();
303                 details::retired_vector::iterator itRetiredEnd = src.end();
304                 while ( itRetired != itRetiredEnd ) {
305                     dest.push( *itRetired );
306                     if ( dest.isFull()) {
307                         CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_CallScanFromHelpScan );
308                         Scan( pThis );
309                     }
310                     ++itRetired;
311                 }
312                 src.clear();
313
314                 hprec->m_bFree.store(true, atomics::memory_order_release);
315                 hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
316             }
317         }
318
319         GarbageCollector::InternalState& GarbageCollector::getInternalState( GarbageCollector::InternalState& stat) const
320         {
321             stat.nHPCount                = m_nHazardPointerCount;
322             stat.nMaxThreadCount         = m_nMaxThreadCount;
323             stat.nMaxRetiredPtrCount     = m_nMaxRetiredPtrCount;
324             stat.nHPRecSize              = sizeof( hplist_node )
325                                             + sizeof(details::retired_ptr) * m_nMaxRetiredPtrCount;
326
327             stat.nHPRecAllocated         =
328                 stat.nHPRecUsed              =
329                 stat.nTotalRetiredPtrCount   =
330                 stat.nRetiredPtrInFreeHPRecs = 0;
331
332             for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
333                 ++stat.nHPRecAllocated;
334                 stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size();
335
336                 if ( hprec->m_bFree.load(atomics::memory_order_relaxed) ) {
337                     // Free HP record
338                     stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size();
339                 }
340                 else {
341                     // Used HP record
342                     ++stat.nHPRecUsed;
343                 }
344             }
345
346             // Events
347             stat.evcAllocHPRec   = m_Stat.m_AllocHPRec;
348             stat.evcRetireHPRec  = m_Stat.m_RetireHPRec;
349             stat.evcAllocNewHPRec= m_Stat.m_AllocNewHPRec;
350             stat.evcDeleteHPRec  = m_Stat.m_DeleteHPRec;
351
352             stat.evcScanCall     = m_Stat.m_ScanCallCount;
353             stat.evcHelpScanCall = m_Stat.m_HelpScanCallCount;
354             stat.evcScanFromHelpScan= m_Stat.m_CallScanFromHelpScan;
355
356             stat.evcDeletedNode  = m_Stat.m_DeletedNode;
357             stat.evcDeferredNode = m_Stat.m_DeferredNode;
358
359             return stat;
360         }
361
362
363     } //namespace hzp
364 }} // namespace cds::gc