Remove ugly reinterpret_cast from HP GC
[libcds.git] / src / hp_gc.cpp
1 //$$CDS-header$$
2
3 /*
4     File: hzp_gc.cpp
5
6     Hazard Pointers memory reclamation strategy implementation
7
8     Editions:
9         2008.02.10    Maxim.Khiszinsky    Created
10 */
11
12 #include <cds/gc/details/hp.h>
13
14 #include <algorithm>    // std::sort
15 #include "hp_const.h"
16
17 #define    CDS_HAZARDPTR_STATISTIC( _x )    if ( m_bStatEnabled ) { _x; }
18
19 namespace cds { namespace gc {
20     namespace hp {
21
22         /// Max array size of retired pointers
23         static const size_t c_nMaxRetireNodeCount = c_nHazardPointerPerThread * c_nMaxThreadCount * 2;
24
25         GarbageCollector *    GarbageCollector::m_pHZPManager = nullptr;
26
27         void CDS_STDCALL GarbageCollector::Construct( size_t nHazardPtrCount, size_t nMaxThreadCount, size_t nMaxRetiredPtrCount, scan_type nScanType )
28         {
29             if ( !m_pHZPManager ) {
30                 m_pHZPManager = new GarbageCollector( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType );
31             }
32         }
33
34         void CDS_STDCALL GarbageCollector::Destruct( bool bDetachAll )
35         {
36             if ( m_pHZPManager ) {
37                 if ( bDetachAll )
38                     m_pHZPManager->detachAllThread();
39
40                 delete m_pHZPManager;
41                 m_pHZPManager = nullptr;
42             }
43         }
44
45         GarbageCollector::GarbageCollector(
46             size_t nHazardPtrCount,
47             size_t nMaxThreadCount,
48             size_t nMaxRetiredPtrCount,
49             scan_type nScanType
50         )
51             : m_pListHead( nullptr )
52             ,m_bStatEnabled( true )
53             ,m_nHazardPointerCount( nHazardPtrCount == 0 ? c_nHazardPointerPerThread : nHazardPtrCount )
54             ,m_nMaxThreadCount( nMaxThreadCount == 0 ? c_nMaxThreadCount : nMaxThreadCount )
55             ,m_nMaxRetiredPtrCount( nMaxRetiredPtrCount > c_nMaxRetireNodeCount ? nMaxRetiredPtrCount : c_nMaxRetireNodeCount )
56             ,m_nScanType( nScanType )
57         {}
58
59         GarbageCollector::~GarbageCollector()
60         {
61             CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
62             CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::getCurrentThreadId() ;)
63
64             hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed );
65             m_pListHead.store( nullptr, atomics::memory_order_relaxed );
66
67             hplist_node * pNext = nullptr;
68             for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) {
69                 assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
70                     || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
71                     || !cds::OS::isThreadAlive( hprec->m_idOwner.load( atomics::memory_order_relaxed ) )
72                 );
73                 details::retired_vector& vect = hprec->m_arrRetired;
74                 details::retired_vector::iterator itRetired = vect.begin();
75                 details::retired_vector::iterator itRetiredEnd = vect.end();
76                 while ( itRetired != itRetiredEnd ) {
77                     DeletePtr( *itRetired );
78                     ++itRetired;
79                 }
80                 vect.clear();
81                 pNext = hprec->m_pNextNode;
82                 hprec->m_bFree.store( true, atomics::memory_order_relaxed );
83                 DeleteHPRec( hprec );
84             }
85         }
86
87         inline GarbageCollector::hplist_node * GarbageCollector::NewHPRec()
88         {
89             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocNewHPRec );
90             return new hplist_node( *this );
91         }
92
93         inline void GarbageCollector::DeleteHPRec( hplist_node * pNode )
94         {
95             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeleteHPRec );
96             assert( pNode->m_arrRetired.size() == 0 );
97             delete pNode;
98         }
99
100         inline void GarbageCollector::DeletePtr( details::retired_ptr& p )
101         {
102             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeletedNode );
103             p.free();
104         }
105
106         details::hp_record * GarbageCollector::alloc_hp_record()
107         {
108             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec );
109
110             hplist_node * hprec;
111             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
112             const cds::OS::ThreadId curThreadId  = cds::OS::getCurrentThreadId();
113
114             // First try to reuse a retired (non-active) HP record
115             for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
116                 cds::OS::ThreadId thId = nullThreadId;
117                 if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) )
118                     continue;
119                 hprec->m_bFree.store( false, atomics::memory_order_release );
120                 return hprec;
121             }
122
123             // No HP records available for reuse
124             // Allocate and push a new HP record
125             hprec = NewHPRec();
126             hprec->m_idOwner.store( curThreadId, atomics::memory_order_relaxed );
127             hprec->m_bFree.store( false, atomics::memory_order_relaxed );
128
129             atomics::atomic_thread_fence( atomics::memory_order_release );
130
131             hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire );
132             do {
133                 hprec->m_pNextNode = pOldHead;
134             } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed ));
135
136             return hprec;
137         }
138
139         void GarbageCollector::free_hp_record( details::hp_record * pRec )
140         {
141             assert( pRec != nullptr );
142             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec );
143
144             pRec->clear();
145             Scan( pRec );
146             hplist_node * pNode = static_cast<hplist_node *>( pRec );
147             pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
148         }
149
150         void GarbageCollector::detachAllThread()
151         {
152             hplist_node * pNext = nullptr;
153             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
154             for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = pNext ) {
155                 pNext = hprec->m_pNextNode;
156                 if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
157                     free_hp_record( hprec );
158                 }
159             }
160         }
161
162         void GarbageCollector::classic_scan( details::hp_record * pRec )
163         {
164             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount );
165
166             std::vector< void * >   plist;
167             plist.reserve( m_nMaxThreadCount * m_nHazardPointerCount );
168             assert( plist.size() == 0 );
169
170             // Stage 1: Scan HP list and insert non-null values in plist
171
172             hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
173
174             while ( pNode ) {
175                 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
176                     void * hptr = pNode->m_hzp[i];
177                     if ( hptr )
178                         plist.push_back( hptr );
179                 }
180                 pNode = pNode->m_pNextNode;
181             }
182
183             // Sort plist to simplify search in
184             std::sort( plist.begin(), plist.end() );
185
186             // Stage 2: Search plist
187             details::retired_vector& arrRetired = pRec->m_arrRetired;
188
189             details::retired_vector::iterator itRetired     = arrRetired.begin();
190             details::retired_vector::iterator itRetiredEnd  = arrRetired.end();
191             // arrRetired is not a std::vector!
192             // clear() is just set up item counter to 0, the items is not destroyed
193             arrRetired.clear();
194
195             std::vector< void * >::iterator itBegin = plist.begin();
196             std::vector< void * >::iterator itEnd = plist.end();
197             while ( itRetired != itRetiredEnd ) {
198                 if ( std::binary_search( itBegin, itEnd, itRetired->m_p) ) {
199                     CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeferredNode );
200                     arrRetired.push( *itRetired );
201                 }
202                 else
203                     DeletePtr( *itRetired );
204                 ++itRetired;
205             }
206         }
207
208         void GarbageCollector::inplace_scan( details::hp_record * pRec )
209         {
210             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount );
211
212             // In-place scan algo uses LSB of retired ptr as a mark for internal purposes.
213             // It is correct if all retired pointers are ar least 2-byte aligned (LSB is zero).
214             // If it is wrong, we use classic scan algorithm
215
216             // Check if all retired pointers has zero LSB
217             // LSB is used for marking pointers that cannot be deleted yet
218             details::retired_vector::iterator itRetired     = pRec->m_arrRetired.begin();
219             details::retired_vector::iterator itRetiredEnd  = pRec->m_arrRetired.end();
220             for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
221                 if ( it->m_n & 1 ) {
222                     // found a pointer with LSB bit set - use classic_scan
223                     classic_scan( pRec );
224                     return;
225                 }
226             }
227
228             // Sort retired pointer array
229             std::sort( itRetired, itRetiredEnd, cds::gc::details::retired_ptr::less );
230
231             // Search guarded pointers in retired array
232             hplist_node * pNode = m_pListHead.load( atomics::memory_order_acquire );
233
234             {
235                 details::retired_ptr dummyRetired;
236                 while ( pNode ) {
237                     for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
238                         void * hptr = pNode->m_hzp[i];
239                         if ( hptr ) {
240                             dummyRetired.m_p = hptr;
241                             details::retired_vector::iterator it = std::lower_bound( itRetired, itRetiredEnd, dummyRetired, cds::gc::details::retired_ptr::less );
242                             if ( it != itRetiredEnd && it->m_p == hptr ) {
243                                 // Mark retired pointer as guarded
244                                 it->m_n |= 1;
245                             }
246                         }
247                     }
248                     pNode = pNode->m_pNextNode;
249                 }
250             }
251
252             // Move all marked pointers to head of array
253             {
254                 auto itInsert = itRetired;
255                 for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
256                     if ( it->m_n & 1 ) {
257                         it->m_n &= ~1;
258                         if ( itInsert != it )
259                             *itInsert = *it;
260                         ++itInsert;
261                         CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeferredNode );
262                     }
263                     else {
264                         // Retired pointer may be freed
265                         DeletePtr( *it );
266                     }
267                 }
268                 pRec->m_arrRetired.size( itInsert - itRetired );
269             }
270         }
271
272         void GarbageCollector::HelpScan( details::hp_record * pThis )
273         {
274             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount );
275
276             assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::getCurrentThreadId() );
277
278             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
279             const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
280             for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
281
282                 // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
283                 if ( hprec->m_bFree.load(atomics::memory_order_acquire) )
284                     continue;
285
286                 // Owns hprec if it is empty.
287                 // Several threads may work concurrently so we use atomic technique only.
288                 {
289                     cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_acquire);
290                     if ( curOwner == nullThreadId || !cds::OS::isThreadAlive( curOwner )) {
291                         if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
292                             continue;
293                     }
294                     else {
295                         curOwner = nullThreadId;
296                         if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
297                             continue;
298                     }
299                 }
300
301                 // We own the thread successfully. Now, we can see whether hp_record has retired pointers.
302                 // If it has ones then we move to pThis that is private for current thread.
303                 details::retired_vector& src = hprec->m_arrRetired;
304                 details::retired_vector& dest = pThis->m_arrRetired;
305                 assert( !dest.isFull());
306                 details::retired_vector::iterator itRetired = src.begin();
307                 details::retired_vector::iterator itRetiredEnd = src.end();
308                 while ( itRetired != itRetiredEnd ) {
309                     dest.push( *itRetired );
310                     if ( dest.isFull()) {
311                         CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_CallScanFromHelpScan );
312                         Scan( pThis );
313                     }
314                     ++itRetired;
315                 }
316                 src.clear();
317
318                 hprec->m_bFree.store(true, atomics::memory_order_release);
319                 hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
320             }
321         }
322
323         GarbageCollector::InternalState& GarbageCollector::getInternalState( GarbageCollector::InternalState& stat) const
324         {
325             stat.nHPCount                = m_nHazardPointerCount;
326             stat.nMaxThreadCount         = m_nMaxThreadCount;
327             stat.nMaxRetiredPtrCount     = m_nMaxRetiredPtrCount;
328             stat.nHPRecSize              = sizeof( hplist_node )
329                                             + sizeof(details::retired_ptr) * m_nMaxRetiredPtrCount;
330
331             stat.nHPRecAllocated         =
332                 stat.nHPRecUsed              =
333                 stat.nTotalRetiredPtrCount   =
334                 stat.nRetiredPtrInFreeHPRecs = 0;
335
336             for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
337                 ++stat.nHPRecAllocated;
338                 stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size();
339
340                 if ( hprec->m_bFree.load(atomics::memory_order_relaxed) ) {
341                     // Free HP record
342                     stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size();
343                 }
344                 else {
345                     // Used HP record
346                     ++stat.nHPRecUsed;
347                 }
348             }
349
350             // Events
351             stat.evcAllocHPRec   = m_Stat.m_AllocHPRec;
352             stat.evcRetireHPRec  = m_Stat.m_RetireHPRec;
353             stat.evcAllocNewHPRec= m_Stat.m_AllocNewHPRec;
354             stat.evcDeleteHPRec  = m_Stat.m_DeleteHPRec;
355
356             stat.evcScanCall     = m_Stat.m_ScanCallCount;
357             stat.evcHelpScanCall = m_Stat.m_HelpScanCallCount;
358             stat.evcScanFromHelpScan= m_Stat.m_CallScanFromHelpScan;
359
360             stat.evcDeletedNode  = m_Stat.m_DeletedNode;
361             stat.evcDeferredNode = m_Stat.m_DeferredNode;
362
363             return stat;
364         }
365
366
367     } //namespace hp
368 }} // namespace cds::gc