7081cc011f657ac12c2d0e2077431adaef00b508
[libcds.git] / src / hp_gc.cpp
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 /*
32     File: hzp_gc.cpp
33
34     Hazard Pointers memory reclamation strategy implementation
35
36     Editions:
37         2008.02.10    Maxim.Khiszinsky    Created
38 */
39
40 #include <cds/gc/details/hp.h>
41
42 #include <algorithm>    // std::sort
43 #include "hp_const.h"
44
45 #define    CDS_HAZARDPTR_STATISTIC( _x )    if ( m_bStatEnabled ) { _x; }
46
47 namespace cds { namespace gc {
48     namespace hp {
49
50         /// Max array size of retired pointers
51         static const size_t c_nMaxRetireNodeCount = c_nHazardPointerPerThread * c_nMaxThreadCount * 2;
52
53         GarbageCollector *    GarbageCollector::m_pHZPManager = nullptr;
54
55         void CDS_STDCALL GarbageCollector::Construct( size_t nHazardPtrCount, size_t nMaxThreadCount, size_t nMaxRetiredPtrCount, scan_type nScanType )
56         {
57             if ( !m_pHZPManager ) {
58                 m_pHZPManager = new GarbageCollector( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType );
59             }
60         }
61
62         void CDS_STDCALL GarbageCollector::Destruct( bool bDetachAll )
63         {
64             if ( m_pHZPManager ) {
65                 if ( bDetachAll )
66                     m_pHZPManager->detachAllThread();
67
68                 delete m_pHZPManager;
69                 m_pHZPManager = nullptr;
70             }
71         }
72
73         GarbageCollector::GarbageCollector(
74             size_t nHazardPtrCount,
75             size_t nMaxThreadCount,
76             size_t nMaxRetiredPtrCount,
77             scan_type nScanType
78         )
79             : m_pListHead( nullptr )
80             ,m_bStatEnabled( false )
81             ,m_nHazardPointerCount( nHazardPtrCount == 0 ? c_nHazardPointerPerThread : nHazardPtrCount )
82             ,m_nMaxThreadCount( nMaxThreadCount == 0 ? c_nMaxThreadCount : nMaxThreadCount )
83             ,m_nMaxRetiredPtrCount( nMaxRetiredPtrCount > c_nMaxRetireNodeCount ? nMaxRetiredPtrCount : c_nMaxRetireNodeCount )
84             ,m_nScanType( nScanType )
85         {}
86
87         GarbageCollector::~GarbageCollector()
88         {
89             CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
90             CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::get_current_thread_id() ;)
91
92             hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed );
93             m_pListHead.store( nullptr, atomics::memory_order_relaxed );
94
95             hplist_node * pNext = nullptr;
96             for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) {
97                 assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
98                     || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
99                     || !cds::OS::is_thread_alive( hprec->m_idOwner.load( atomics::memory_order_relaxed ))
100                 );
101                 details::retired_vector& vect = hprec->m_arrRetired;
102                 details::retired_vector::iterator itRetired = vect.begin();
103                 details::retired_vector::iterator itRetiredEnd = vect.end();
104                 while ( itRetired != itRetiredEnd ) {
105                     itRetired->free();
106                     ++itRetired;
107                 }
108                 vect.clear();
109                 pNext = hprec->m_pNextNode.load( atomics::memory_order_relaxed );
110                 hprec->m_bFree.store( true, atomics::memory_order_relaxed );
111                 DeleteHPRec( hprec );
112             }
113         }
114
115         inline GarbageCollector::hplist_node * GarbageCollector::NewHPRec( OS::ThreadId owner )
116         {
117             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocNewHPRec )
118             return new hplist_node( *this, owner );
119         }
120
121         inline void GarbageCollector::DeleteHPRec( hplist_node * pNode )
122         {
123             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeleteHPRec )
124             assert( pNode->m_arrRetired.size() == 0 );
125             delete pNode;
126         }
127
128         details::hp_record * GarbageCollector::alloc_hp_record()
129         {
130             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec )
131
132             hplist_node * hprec;
133             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
134             const cds::OS::ThreadId curThreadId  = cds::OS::get_current_thread_id();
135
136             // First try to reuse a retired (non-active) HP record
137             for ( hprec = m_pListHead.load( atomics::memory_order_relaxed ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed )) {
138                 cds::OS::ThreadId thId = nullThreadId;
139                 if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed ))
140                     continue;
141                 hprec->m_bFree.store( false, atomics::memory_order_relaxed );
142                 return hprec;
143             }
144
145             // No HP records available for reuse
146             // Allocate and push a new HP record
147             hprec = NewHPRec( curThreadId );
148
149             hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_relaxed );
150             do {
151                 hprec->m_pNextNode.store( pOldHead, atomics::memory_order_relaxed );
152             } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_acq_rel, atomics::memory_order_acquire ));
153
154             return hprec;
155         }
156
157         void GarbageCollector::free_hp_record( details::hp_record * pRec )
158         {
159             assert( pRec != nullptr );
160             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec )
161
162             pRec->clear();
163             Scan( pRec );
164             HelpScan( pRec );
165             hplist_node * pNode = static_cast<hplist_node *>( pRec );
166             pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
167         }
168
169         void GarbageCollector::detachAllThread()
170         {
171             hplist_node * pNext = nullptr;
172             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
173             for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_relaxed); hprec; hprec = pNext ) {
174                 pNext = hprec->m_pNextNode.load( atomics::memory_order_relaxed );
175                 if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
176                     free_hp_record( hprec );
177                 }
178             }
179         }
180
181         void GarbageCollector::classic_scan( details::hp_record * pRec )
182         {
183             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount )
184
185             std::vector< void * >   plist;
186             plist.reserve( m_nMaxThreadCount * m_nHazardPointerCount );
187             assert( plist.size() == 0 );
188
189             // Stage 1: Scan HP list and insert non-null values in plist
190
191             hplist_node * pNode = m_pListHead.load(atomics::memory_order_relaxed);
192
193             while ( pNode ) {
194                 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
195                     pRec->sync();
196                     void * hptr = pNode->m_hzp[i].get();
197                     if ( hptr )
198                         plist.push_back( hptr );
199                 }
200                 pNode = pNode->m_pNextNode.load( atomics::memory_order_relaxed );
201             }
202
203             // Sort plist to simplify search in
204             std::sort( plist.begin(), plist.end());
205
206             // Stage 2: Search plist
207             details::retired_vector& arrRetired = pRec->m_arrRetired;
208
209             details::retired_vector::iterator itRetired     = arrRetired.begin();
210             details::retired_vector::iterator itRetiredEnd  = arrRetired.end();
211             // arrRetired is not a std::vector!
212             // clear() is just set up item counter to 0, the items is not destroyed
213             arrRetired.clear();
214
215             {
216                 std::vector< void * >::iterator itBegin = plist.begin();
217                 std::vector< void * >::iterator itEnd = plist.end();
218                 size_t nDeferredCount = 0;
219                 while ( itRetired != itRetiredEnd ) {
220                     if ( std::binary_search( itBegin, itEnd, itRetired->m_p )) {
221                         arrRetired.push( *itRetired );
222                         ++nDeferredCount;
223                     }
224                     else
225                         itRetired->free();
226                     ++itRetired;
227                 }
228                 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferredCount )
229                 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - arrRetired.begin()) - nDeferredCount )
230             }
231         }
232
233         void GarbageCollector::inplace_scan( details::hp_record * pRec )
234         {
235             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount )
236
237             // In-place scan algo uses LSB of retired ptr as a mark for internal purposes.
238             // It is correct if all retired pointers are ar least 2-byte aligned (LSB is zero).
239             // If it is wrong, we use classic scan algorithm
240
241             // Check if all retired pointers has zero LSB
242             // LSB is used for marking pointers that cannot be deleted yet
243             details::retired_vector::iterator itRetired     = pRec->m_arrRetired.begin();
244             details::retired_vector::iterator itRetiredEnd  = pRec->m_arrRetired.end();
245             for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
246                 if ( it->m_n & 1 ) {
247                     // found a pointer with LSB bit set - use classic_scan
248                     classic_scan( pRec );
249                     return;
250                 }
251             }
252
253             // Sort retired pointer array
254             std::sort( itRetired, itRetiredEnd, cds::gc::details::retired_ptr::less );
255
256             // Check double free
257             /*
258             {
259                 auto it = itRetired;
260                 auto itPrev = it;
261                 while ( ++it != itRetiredEnd ) {
262                     if ( it->m_p == itPrev->m_p )
263                         throw std::runtime_error( "Double free" );
264                     itPrev = it;
265                 }
266             }
267             */
268
269             // Search guarded pointers in retired array
270             hplist_node * pNode = m_pListHead.load( atomics::memory_order_relaxed );
271
272             {
273                 details::retired_ptr dummyRetired;
274                 while ( pNode ) {
275                     if ( !pNode->m_bFree.load( atomics::memory_order_relaxed )) {
276                         for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
277                             pRec->sync();
278                             void * hptr = pNode->m_hzp[i].get();
279                             if ( hptr ) {
280                                 dummyRetired.m_p = hptr;
281                                 details::retired_vector::iterator it = std::lower_bound( itRetired, itRetiredEnd, dummyRetired, cds::gc::details::retired_ptr::less );
282                                 if ( it != itRetiredEnd && it->m_p == hptr ) {
283                                     // Mark retired pointer as guarded
284                                     it->m_n |= 1;
285                                 }
286                             }
287                         }
288                     }
289                     pNode = pNode->m_pNextNode.load( atomics::memory_order_relaxed );
290                 }
291             }
292
293             // Move all marked pointers to head of array
294             {
295                 auto itInsert = itRetired;
296                 for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
297                     if ( it->m_n & 1 ) {
298                         it->m_n &= ~1;
299                         if ( itInsert != it )
300                             *itInsert = *it;
301                         ++itInsert;
302                     }
303                     else {
304                         // Retired pointer may be freed
305                         it->free();
306                     }
307                 }
308                 const size_t nDeferred = itInsert - itRetired;
309                 pRec->m_arrRetired.size( nDeferred );
310                 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferred )
311                 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - itRetired) - nDeferred )
312             }
313         }
314
315         void GarbageCollector::HelpScan( details::hp_record * pThis )
316         {
317             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount )
318
319             assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::get_current_thread_id());
320
321             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
322             const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
323             for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_relaxed); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed )) {
324
325                 // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
326                 if ( hprec->m_bFree.load(atomics::memory_order_relaxed))
327                     continue;
328
329                 // Owns hprec if it is empty.
330                 // Several threads may work concurrently so we use atomic technique only.
331                 {
332                     cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_relaxed);
333                     if ( curOwner == nullThreadId || !cds::OS::is_thread_alive( curOwner ) ) {
334                         if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
335                             continue;
336                     }
337                     else
338                         continue;
339                     //else {
340                     //    curOwner = nullThreadId;
341                     //    if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed ))
342                     //        continue;
343                     //}
344                 }
345
346                 // We own the thread successfully. Now, we can see whether hp_record has retired pointers.
347                 // If it has ones then we move to pThis that is private for current thread.
348                 details::retired_vector& src = hprec->m_arrRetired;
349                 details::retired_vector& dest = pThis->m_arrRetired;
350                 assert( !dest.isFull());
351                 details::retired_vector::iterator itRetired = src.begin();
352
353                 // TSan can issue a warning here:
354                 //  read src.m_nSize in src.end()
355                 //  write src.m_nSize in src.clear()
356                 // This is false positive since we own hprec
357                 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
358                 details::retired_vector::iterator itRetiredEnd = src.end();
359                 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
360
361                 while ( itRetired != itRetiredEnd ) {
362                     dest.push( *itRetired );
363                     if ( dest.isFull()) {
364                         CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_CallScanFromHelpScan )
365                         Scan( pThis );
366                     }
367                     ++itRetired;
368                 }
369
370                 // TSan: write src.m_nSize, see a comment above
371                 CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
372                 src.clear();
373                 CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
374
375                 hprec->m_bFree.store(true, atomics::memory_order_relaxed);
376                 hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
377
378                 Scan( pThis );
379             }
380         }
381
382         GarbageCollector::InternalState& GarbageCollector::getInternalState( GarbageCollector::InternalState& stat) const
383         {
384             stat.nHPCount                = m_nHazardPointerCount;
385             stat.nMaxThreadCount         = m_nMaxThreadCount;
386             stat.nMaxRetiredPtrCount     = m_nMaxRetiredPtrCount;
387             stat.nHPRecSize              = sizeof( hplist_node )
388                                             + sizeof(details::retired_ptr) * m_nMaxRetiredPtrCount;
389
390             stat.nHPRecAllocated         =
391                 stat.nHPRecUsed              =
392                 stat.nTotalRetiredPtrCount   =
393                 stat.nRetiredPtrInFreeHPRecs = 0;
394
395             for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_relaxed); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed )) {
396                 ++stat.nHPRecAllocated;
397                 stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size();
398
399                 if ( hprec->m_bFree.load(atomics::memory_order_relaxed)) {
400                     // Free HP record
401                     stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size();
402                 }
403                 else {
404                     // Used HP record
405                     ++stat.nHPRecUsed;
406                 }
407             }
408
409             // Events
410             stat.evcAllocHPRec   = m_Stat.m_AllocHPRec;
411             stat.evcRetireHPRec  = m_Stat.m_RetireHPRec;
412             stat.evcAllocNewHPRec= m_Stat.m_AllocNewHPRec;
413             stat.evcDeleteHPRec  = m_Stat.m_DeleteHPRec;
414
415             stat.evcScanCall     = m_Stat.m_ScanCallCount;
416             stat.evcHelpScanCall = m_Stat.m_HelpScanCallCount;
417             stat.evcScanFromHelpScan= m_Stat.m_CallScanFromHelpScan;
418
419             stat.evcDeletedNode  = m_Stat.m_DeletedNode;
420             stat.evcDeferredNode = m_Stat.m_DeferredNode;
421
422             return stat;
423         }
424
425
426     } //namespace hp
427 }} // namespace cds::gc