Removed redundant spaces
[libcds.git] / src / hp_gc.cpp
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 /*
32     File: hzp_gc.cpp
33
34     Hazard Pointers memory reclamation strategy implementation
35
36     Editions:
37         2008.02.10    Maxim.Khiszinsky    Created
38 */
39
40 #include <cds/gc/details/hp.h>
41
42 #include <algorithm>    // std::sort
43 #include "hp_const.h"
44
45 #define    CDS_HAZARDPTR_STATISTIC( _x )    if ( m_bStatEnabled ) { _x; }
46
47 namespace cds { namespace gc {
48     namespace hp {
49
50         /// Max array size of retired pointers
51         static const size_t c_nMaxRetireNodeCount = c_nHazardPointerPerThread * c_nMaxThreadCount * 2;
52
53         GarbageCollector *    GarbageCollector::m_pHZPManager = nullptr;
54
55         void CDS_STDCALL GarbageCollector::Construct( size_t nHazardPtrCount, size_t nMaxThreadCount, size_t nMaxRetiredPtrCount, scan_type nScanType )
56         {
57             if ( !m_pHZPManager ) {
58                 m_pHZPManager = new GarbageCollector( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType );
59             }
60         }
61
62         void CDS_STDCALL GarbageCollector::Destruct( bool bDetachAll )
63         {
64             if ( m_pHZPManager ) {
65                 if ( bDetachAll )
66                     m_pHZPManager->detachAllThread();
67
68                 delete m_pHZPManager;
69                 m_pHZPManager = nullptr;
70             }
71         }
72
73         GarbageCollector::GarbageCollector(
74             size_t nHazardPtrCount,
75             size_t nMaxThreadCount,
76             size_t nMaxRetiredPtrCount,
77             scan_type nScanType
78         )
79             : m_pListHead( nullptr )
80             ,m_bStatEnabled( false )
81             ,m_nHazardPointerCount( nHazardPtrCount == 0 ? c_nHazardPointerPerThread : nHazardPtrCount )
82             ,m_nMaxThreadCount( nMaxThreadCount == 0 ? c_nMaxThreadCount : nMaxThreadCount )
83             ,m_nMaxRetiredPtrCount( nMaxRetiredPtrCount > c_nMaxRetireNodeCount ? nMaxRetiredPtrCount : c_nMaxRetireNodeCount )
84             ,m_nScanType( nScanType )
85         {}
86
87         GarbageCollector::~GarbageCollector()
88         {
89             CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
90             CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::get_current_thread_id() ;)
91
92             hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed );
93             m_pListHead.store( nullptr, atomics::memory_order_relaxed );
94
95             hplist_node * pNext = nullptr;
96             for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) {
97                 assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
98                     || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
99                     || !cds::OS::is_thread_alive( hprec->m_idOwner.load( atomics::memory_order_relaxed ))
100                 );
101                 details::retired_vector& vect = hprec->m_arrRetired;
102                 details::retired_vector::iterator itRetired = vect.begin();
103                 details::retired_vector::iterator itRetiredEnd = vect.end();
104                 while ( itRetired != itRetiredEnd ) {
105                     itRetired->free();
106                     ++itRetired;
107                 }
108                 vect.clear();
109                 pNext = hprec->m_pNextNode;
110                 hprec->m_bFree.store( true, atomics::memory_order_relaxed );
111                 DeleteHPRec( hprec );
112             }
113         }
114
115         inline GarbageCollector::hplist_node * GarbageCollector::NewHPRec()
116         {
117             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocNewHPRec )
118             return new hplist_node( *this );
119         }
120
121         inline void GarbageCollector::DeleteHPRec( hplist_node * pNode )
122         {
123             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeleteHPRec )
124             assert( pNode->m_arrRetired.size() == 0 );
125             delete pNode;
126         }
127
128         details::hp_record * GarbageCollector::alloc_hp_record()
129         {
130             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec )
131
132             hplist_node * hprec;
133             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
134             const cds::OS::ThreadId curThreadId  = cds::OS::get_current_thread_id();
135
136             // First try to reuse a retired (non-active) HP record
137             for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
138                 cds::OS::ThreadId thId = nullThreadId;
139                 if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ))
140                     continue;
141                 hprec->m_bFree.store( false, atomics::memory_order_release );
142                 return hprec;
143             }
144
145             // No HP records available for reuse
146             // Allocate and push a new HP record
147             hprec = NewHPRec();
148             hprec->m_idOwner.store( curThreadId, atomics::memory_order_release );
149             hprec->m_bFree.store( false, atomics::memory_order_release );
150
151             hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire );
152             do {
153                 hprec->m_pNextNode = pOldHead;
154             } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed ));
155
156             return hprec;
157         }
158
159         void GarbageCollector::free_hp_record( details::hp_record * pRec )
160         {
161             assert( pRec != nullptr );
162             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec )
163
164             pRec->clear();
165             Scan( pRec );
166             HelpScan( pRec );
167             hplist_node * pNode = static_cast<hplist_node *>( pRec );
168             pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
169         }
170
171         void GarbageCollector::detachAllThread()
172         {
173             hplist_node * pNext = nullptr;
174             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
175             for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = pNext ) {
176                 pNext = hprec->m_pNextNode;
177                 if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
178                     free_hp_record( hprec );
179                 }
180             }
181         }
182
183         void GarbageCollector::classic_scan( details::hp_record * pRec )
184         {
185             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount )
186
187             std::vector< void * >   plist;
188             plist.reserve( m_nMaxThreadCount * m_nHazardPointerCount );
189             assert( plist.size() == 0 );
190
191             // Stage 1: Scan HP list and insert non-null values in plist
192
193             hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
194
195             while ( pNode ) {
196                 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
197                     pRec->sync();
198                     void * hptr = pNode->m_hzp[i].get();
199                     if ( hptr )
200                         plist.push_back( hptr );
201                 }
202                 pNode = pNode->m_pNextNode;
203             }
204
205             // Sort plist to simplify search in
206             std::sort( plist.begin(), plist.end());
207
208             // Stage 2: Search plist
209             details::retired_vector& arrRetired = pRec->m_arrRetired;
210
211             details::retired_vector::iterator itRetired     = arrRetired.begin();
212             details::retired_vector::iterator itRetiredEnd  = arrRetired.end();
213             // arrRetired is not a std::vector!
214             // clear() is just set up item counter to 0, the items is not destroyed
215             arrRetired.clear();
216
217             {
218                 std::vector< void * >::iterator itBegin = plist.begin();
219                 std::vector< void * >::iterator itEnd = plist.end();
220                 size_t nDeferredCount = 0;
221                 while ( itRetired != itRetiredEnd ) {
222                     if ( std::binary_search( itBegin, itEnd, itRetired->m_p )) {
223                         arrRetired.push( *itRetired );
224                         ++nDeferredCount;
225                     }
226                     else
227                         itRetired->free();
228                     ++itRetired;
229                 }
230                 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferredCount )
231                 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - arrRetired.begin()) - nDeferredCount )
232             }
233         }
234
235         void GarbageCollector::inplace_scan( details::hp_record * pRec )
236         {
237             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount )
238
239             // In-place scan algo uses LSB of retired ptr as a mark for internal purposes.
240             // It is correct if all retired pointers are ar least 2-byte aligned (LSB is zero).
241             // If it is wrong, we use classic scan algorithm
242
243             // Check if all retired pointers has zero LSB
244             // LSB is used for marking pointers that cannot be deleted yet
245             details::retired_vector::iterator itRetired     = pRec->m_arrRetired.begin();
246             details::retired_vector::iterator itRetiredEnd  = pRec->m_arrRetired.end();
247             for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
248                 if ( it->m_n & 1 ) {
249                     // found a pointer with LSB bit set - use classic_scan
250                     classic_scan( pRec );
251                     return;
252                 }
253             }
254
255             // Sort retired pointer array
256             std::sort( itRetired, itRetiredEnd, cds::gc::details::retired_ptr::less );
257
258             // Check double free
259             /*
260             {
261                 auto it = itRetired;
262                 auto itPrev = it;
263                 while ( ++it != itRetiredEnd ) {
264                     if ( it->m_p == itPrev->m_p )
265                         throw std::runtime_error( "Double free" );
266                     itPrev = it;
267                 }
268             }
269             */
270
271             // Search guarded pointers in retired array
272             hplist_node * pNode = m_pListHead.load( atomics::memory_order_acquire );
273
274             {
275                 details::retired_ptr dummyRetired;
276                 while ( pNode ) {
277                     if ( !pNode->m_bFree.load( atomics::memory_order_acquire )) {
278                         for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
279                             pRec->sync();
280                             void * hptr = pNode->m_hzp[i].get();
281                             if ( hptr ) {
282                                 dummyRetired.m_p = hptr;
283                                 details::retired_vector::iterator it = std::lower_bound( itRetired, itRetiredEnd, dummyRetired, cds::gc::details::retired_ptr::less );
284                                 if ( it != itRetiredEnd && it->m_p == hptr ) {
285                                     // Mark retired pointer as guarded
286                                     it->m_n |= 1;
287                                 }
288                             }
289                         }
290                     }
291                     pNode = pNode->m_pNextNode;
292                 }
293             }
294
295             // Move all marked pointers to head of array
296             {
297                 auto itInsert = itRetired;
298                 for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
299                     if ( it->m_n & 1 ) {
300                         it->m_n &= ~1;
301                         if ( itInsert != it )
302                             *itInsert = *it;
303                         ++itInsert;
304                     }
305                     else {
306                         // Retired pointer may be freed
307                         it->free();
308                     }
309                 }
310                 const size_t nDeferred = itInsert - itRetired;
311                 pRec->m_arrRetired.size( nDeferred );
312                 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferred )
313                 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - itRetired) - nDeferred )
314             }
315         }
316
317         void GarbageCollector::HelpScan( details::hp_record * pThis )
318         {
319             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount )
320
321             assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::get_current_thread_id());
322
323             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
324             const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
325             for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
326
327                 // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
328                 if ( hprec->m_bFree.load(atomics::memory_order_acquire))
329                     continue;
330
331                 // Owns hprec if it is empty.
332                 // Several threads may work concurrently so we use atomic technique only.
333                 {
334                     cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_acquire);
335                     if ( curOwner == nullThreadId || !cds::OS::is_thread_alive( curOwner )) {
336                         if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
337                             continue;
338                     }
339                     else {
340                         curOwner = nullThreadId;
341                         if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
342                             continue;
343                     }
344                 }
345
346                 // We own the thread successfully. Now, we can see whether hp_record has retired pointers.
347                 // If it has ones then we move to pThis that is private for current thread.
348                 details::retired_vector& src = hprec->m_arrRetired;
349                 details::retired_vector& dest = pThis->m_arrRetired;
350                 assert( !dest.isFull());
351                 details::retired_vector::iterator itRetired = src.begin();
352
353                 // TSan can issue a warning here:
354                 //  read src.m_nSize in src.end()
355                 //  write src.m_nSize in src.clear()
356                 // This is false positive since we own hprec
357                 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
358                 details::retired_vector::iterator itRetiredEnd = src.end();
359                 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
360
361                 while ( itRetired != itRetiredEnd ) {
362                     dest.push( *itRetired );
363                     if ( dest.isFull()) {
364                         CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_CallScanFromHelpScan )
365                         Scan( pThis );
366                     }
367                     ++itRetired;
368                 }
369
370                 // TSan: write src.m_nSize, see a comment above
371                 CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
372                 src.clear();
373                 CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
374
375                 hprec->m_bFree.store(true, atomics::memory_order_release);
376                 hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
377
378                 Scan( pThis );
379             }
380         }
381
382         GarbageCollector::InternalState& GarbageCollector::getInternalState( GarbageCollector::InternalState& stat) const
383         {
384             stat.nHPCount                = m_nHazardPointerCount;
385             stat.nMaxThreadCount         = m_nMaxThreadCount;
386             stat.nMaxRetiredPtrCount     = m_nMaxRetiredPtrCount;
387             stat.nHPRecSize              = sizeof( hplist_node )
388                                             + sizeof(details::retired_ptr) * m_nMaxRetiredPtrCount;
389
390             stat.nHPRecAllocated         =
391                 stat.nHPRecUsed              =
392                 stat.nTotalRetiredPtrCount   =
393                 stat.nRetiredPtrInFreeHPRecs = 0;
394
395             for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
396                 ++stat.nHPRecAllocated;
397                 stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size();
398
399                 if ( hprec->m_bFree.load(atomics::memory_order_relaxed)) {
400                     // Free HP record
401                     stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size();
402                 }
403                 else {
404                     // Used HP record
405                     ++stat.nHPRecUsed;
406                 }
407             }
408
409             // Events
410             stat.evcAllocHPRec   = m_Stat.m_AllocHPRec;
411             stat.evcRetireHPRec  = m_Stat.m_RetireHPRec;
412             stat.evcAllocNewHPRec= m_Stat.m_AllocNewHPRec;
413             stat.evcDeleteHPRec  = m_Stat.m_DeleteHPRec;
414
415             stat.evcScanCall     = m_Stat.m_ScanCallCount;
416             stat.evcHelpScanCall = m_Stat.m_HelpScanCallCount;
417             stat.evcScanFromHelpScan= m_Stat.m_CallScanFromHelpScan;
418
419             stat.evcDeletedNode  = m_Stat.m_DeletedNode;
420             stat.evcDeferredNode = m_Stat.m_DeferredNode;
421
422             return stat;
423         }
424
425
426     } //namespace hp
427 }} // namespace cds::gc