Updated TSan suppression
[libcds.git] / src / hp_gc.cpp
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 /*
32     File: hzp_gc.cpp
33
34     Hazard Pointers memory reclamation strategy implementation
35
36     Editions:
37         2008.02.10    Maxim.Khiszinsky    Created
38 */
39
40 #include <cds/gc/details/hp.h>
41
42 #include <algorithm>    // std::sort
43 #include "hp_const.h"
44
45 #define    CDS_HAZARDPTR_STATISTIC( _x )    if ( m_bStatEnabled ) { _x; }
46
47 namespace cds { namespace gc {
48     namespace hp {
49
50         /// Max array size of retired pointers
51         static const size_t c_nMaxRetireNodeCount = c_nHazardPointerPerThread * c_nMaxThreadCount * 2;
52
53         GarbageCollector *    GarbageCollector::m_pHZPManager = nullptr;
54
55         void CDS_STDCALL GarbageCollector::Construct( size_t nHazardPtrCount, size_t nMaxThreadCount, size_t nMaxRetiredPtrCount, scan_type nScanType )
56         {
57             if ( !m_pHZPManager ) {
58                 m_pHZPManager = new GarbageCollector( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType );
59             }
60         }
61
62         void CDS_STDCALL GarbageCollector::Destruct( bool bDetachAll )
63         {
64             if ( m_pHZPManager ) {
65                 if ( bDetachAll )
66                     m_pHZPManager->detachAllThread();
67
68                 delete m_pHZPManager;
69                 m_pHZPManager = nullptr;
70             }
71         }
72
73         GarbageCollector::GarbageCollector(
74             size_t nHazardPtrCount,
75             size_t nMaxThreadCount,
76             size_t nMaxRetiredPtrCount,
77             scan_type nScanType
78         )
79             : m_pListHead( nullptr )
80             ,m_bStatEnabled( false )
81             ,m_nHazardPointerCount( nHazardPtrCount == 0 ? c_nHazardPointerPerThread : nHazardPtrCount )
82             ,m_nMaxThreadCount( nMaxThreadCount == 0 ? c_nMaxThreadCount : nMaxThreadCount )
83             ,m_nMaxRetiredPtrCount( nMaxRetiredPtrCount > c_nMaxRetireNodeCount ? nMaxRetiredPtrCount : c_nMaxRetireNodeCount )
84             ,m_nScanType( nScanType )
85         {}
86
87         GarbageCollector::~GarbageCollector()
88         {
89             CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
90             CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::get_current_thread_id() ;)
91
92             hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed );
93             m_pListHead.store( nullptr, atomics::memory_order_relaxed );
94
95             hplist_node * pNext = nullptr;
96             for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) {
97                 assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
98                     || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
99                     || !cds::OS::is_thread_alive( hprec->m_idOwner.load( atomics::memory_order_relaxed ))
100                 );
101                 details::retired_vector& vect = hprec->m_arrRetired;
102                 details::retired_vector::iterator itRetired = vect.begin();
103                 details::retired_vector::iterator itRetiredEnd = vect.end();
104                 while ( itRetired != itRetiredEnd ) {
105                     itRetired->free();
106                     ++itRetired;
107                 }
108                 vect.clear();
109                 pNext = hprec->m_pNextNode;
110                 hprec->m_bFree.store( true, atomics::memory_order_relaxed );
111                 DeleteHPRec( hprec );
112             }
113         }
114
115         inline GarbageCollector::hplist_node * GarbageCollector::NewHPRec()
116         {
117             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocNewHPRec )
118             return new hplist_node( *this );
119         }
120
121         inline void GarbageCollector::DeleteHPRec( hplist_node * pNode )
122         {
123             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeleteHPRec )
124             assert( pNode->m_arrRetired.size() == 0 );
125             delete pNode;
126         }
127
128         details::hp_record * GarbageCollector::alloc_hp_record()
129         {
130             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec )
131
132             hplist_node * hprec;
133             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
134             const cds::OS::ThreadId curThreadId  = cds::OS::get_current_thread_id();
135
136             // First try to reuse a retired (non-active) HP record
137             for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
138                 cds::OS::ThreadId thId = nullThreadId;
139                 if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ))
140                     continue;
141                 hprec->m_bFree.store( false, atomics::memory_order_release );
142                 return hprec;
143             }
144
145             // No HP records available for reuse
146             // Allocate and push a new HP record
147             hprec = NewHPRec();
148             hprec->m_idOwner.store( curThreadId, atomics::memory_order_release );
149             hprec->m_bFree.store( false, atomics::memory_order_release );
150
151             hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire );
152             do {
153                 hprec->m_pNextNode = pOldHead;
154                 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &( hprec->m_pNextNode ));
155             } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed ));
156
157             return hprec;
158         }
159
160         void GarbageCollector::free_hp_record( details::hp_record * pRec )
161         {
162             assert( pRec != nullptr );
163             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec )
164
165             pRec->clear();
166             Scan( pRec );
167             HelpScan( pRec );
168             hplist_node * pNode = static_cast<hplist_node *>( pRec );
169             pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
170         }
171
172         void GarbageCollector::detachAllThread()
173         {
174             hplist_node * pNext = nullptr;
175             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
176             for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = pNext ) {
177                 pNext = hprec->m_pNextNode;
178                 if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
179                     free_hp_record( hprec );
180                 }
181             }
182         }
183
184         void GarbageCollector::classic_scan( details::hp_record * pRec )
185         {
186             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount )
187
188             std::vector< void * >   plist;
189             plist.reserve( m_nMaxThreadCount * m_nHazardPointerCount );
190             assert( plist.size() == 0 );
191
192             // Stage 1: Scan HP list and insert non-null values in plist
193
194             hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
195
196             while ( pNode ) {
197                 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
198                     pRec->sync();
199                     void * hptr = pNode->m_hzp[i].get();
200                     if ( hptr )
201                         plist.push_back( hptr );
202                 }
203                 pNode = pNode->m_pNextNode;
204             }
205
206             // Sort plist to simplify search in
207             std::sort( plist.begin(), plist.end());
208
209             // Stage 2: Search plist
210             details::retired_vector& arrRetired = pRec->m_arrRetired;
211
212             details::retired_vector::iterator itRetired     = arrRetired.begin();
213             details::retired_vector::iterator itRetiredEnd  = arrRetired.end();
214             // arrRetired is not a std::vector!
215             // clear() is just set up item counter to 0, the items is not destroyed
216             arrRetired.clear();
217
218             {
219                 std::vector< void * >::iterator itBegin = plist.begin();
220                 std::vector< void * >::iterator itEnd = plist.end();
221                 size_t nDeferredCount = 0;
222                 while ( itRetired != itRetiredEnd ) {
223                     if ( std::binary_search( itBegin, itEnd, itRetired->m_p )) {
224                         arrRetired.push( *itRetired );
225                         ++nDeferredCount;
226                     }
227                     else
228                         itRetired->free();
229                     ++itRetired;
230                 }
231                 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferredCount )
232                 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - arrRetired.begin()) - nDeferredCount )
233             }
234         }
235
236         void GarbageCollector::inplace_scan( details::hp_record * pRec )
237         {
238             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount )
239
240             // In-place scan algo uses LSB of retired ptr as a mark for internal purposes.
241             // It is correct if all retired pointers are ar least 2-byte aligned (LSB is zero).
242             // If it is wrong, we use classic scan algorithm
243
244             // Check if all retired pointers has zero LSB
245             // LSB is used for marking pointers that cannot be deleted yet
246             details::retired_vector::iterator itRetired     = pRec->m_arrRetired.begin();
247             details::retired_vector::iterator itRetiredEnd  = pRec->m_arrRetired.end();
248             for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
249                 if ( it->m_n & 1 ) {
250                     // found a pointer with LSB bit set - use classic_scan
251                     classic_scan( pRec );
252                     return;
253                 }
254             }
255
256             // Sort retired pointer array
257             std::sort( itRetired, itRetiredEnd, cds::gc::details::retired_ptr::less );
258
259             // Check double free
260             /*
261             {
262                 auto it = itRetired;
263                 auto itPrev = it;
264                 while ( ++it != itRetiredEnd ) {
265                     if ( it->m_p == itPrev->m_p )
266                         throw std::runtime_error( "Double free" );
267                     itPrev = it;
268                 }
269             }
270             */
271
272             // Search guarded pointers in retired array
273             hplist_node * pNode = m_pListHead.load( atomics::memory_order_acquire );
274
275             {
276                 details::retired_ptr dummyRetired;
277                 while ( pNode ) {
278                     if ( !pNode->m_bFree.load( atomics::memory_order_acquire )) {
279                         for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
280                             pRec->sync();
281                             void * hptr = pNode->m_hzp[i].get();
282                             if ( hptr ) {
283                                 dummyRetired.m_p = hptr;
284                                 details::retired_vector::iterator it = std::lower_bound( itRetired, itRetiredEnd, dummyRetired, cds::gc::details::retired_ptr::less );
285                                 if ( it != itRetiredEnd && it->m_p == hptr ) {
286                                     // Mark retired pointer as guarded
287                                     it->m_n |= 1;
288                                 }
289                             }
290                         }
291                     }
292                     pNode = pNode->m_pNextNode;
293                 }
294             }
295
296             // Move all marked pointers to head of array
297             {
298                 auto itInsert = itRetired;
299                 for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
300                     if ( it->m_n & 1 ) {
301                         it->m_n &= ~1;
302                         if ( itInsert != it )
303                             *itInsert = *it;
304                         ++itInsert;
305                     }
306                     else {
307                         // Retired pointer may be freed
308                         it->free();
309                     }
310                 }
311                 const size_t nDeferred = itInsert - itRetired;
312                 pRec->m_arrRetired.size( nDeferred );
313                 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferred )
314                 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - itRetired) - nDeferred )
315             }
316         }
317
318         void GarbageCollector::HelpScan( details::hp_record * pThis )
319         {
320             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount )
321
322             assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::get_current_thread_id());
323
324             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
325             const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
326             for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
327
328                 // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
329                 if ( hprec->m_bFree.load(atomics::memory_order_acquire))
330                     continue;
331
332                 // Owns hprec if it is empty.
333                 // Several threads may work concurrently so we use atomic technique only.
334                 {
335                     cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_acquire);
336                     if ( curOwner == nullThreadId || !cds::OS::is_thread_alive( curOwner )) {
337                         if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
338                             continue;
339                     }
340                     else {
341                         curOwner = nullThreadId;
342                         if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
343                             continue;
344                     }
345                 }
346
347                 // We own the thread successfully. Now, we can see whether hp_record has retired pointers.
348                 // If it has ones then we move to pThis that is private for current thread.
349                 details::retired_vector& src = hprec->m_arrRetired;
350                 details::retired_vector& dest = pThis->m_arrRetired;
351                 assert( !dest.isFull());
352                 details::retired_vector::iterator itRetired = src.begin();
353
354                 // TSan can issue a warning here:
355                 //  read src.m_nSize in src.end()
356                 //  write src.m_nSize in src.clear()
357                 // This is false positive since we own hprec
358                 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
359                 details::retired_vector::iterator itRetiredEnd = src.end();
360                 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
361
362                 while ( itRetired != itRetiredEnd ) {
363                     dest.push( *itRetired );
364                     if ( dest.isFull()) {
365                         CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_CallScanFromHelpScan )
366                         Scan( pThis );
367                     }
368                     ++itRetired;
369                 }
370
371                 // TSan: write src.m_nSize, see a comment above
372                 CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
373                 src.clear();
374                 CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
375
376                 hprec->m_bFree.store(true, atomics::memory_order_release);
377                 hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
378
379                 Scan( pThis );
380             }
381         }
382
383         GarbageCollector::InternalState& GarbageCollector::getInternalState( GarbageCollector::InternalState& stat) const
384         {
385             stat.nHPCount                = m_nHazardPointerCount;
386             stat.nMaxThreadCount         = m_nMaxThreadCount;
387             stat.nMaxRetiredPtrCount     = m_nMaxRetiredPtrCount;
388             stat.nHPRecSize              = sizeof( hplist_node )
389                                             + sizeof(details::retired_ptr) * m_nMaxRetiredPtrCount;
390
391             stat.nHPRecAllocated         =
392                 stat.nHPRecUsed              =
393                 stat.nTotalRetiredPtrCount   =
394                 stat.nRetiredPtrInFreeHPRecs = 0;
395
396             for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
397                 ++stat.nHPRecAllocated;
398                 stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size();
399
400                 if ( hprec->m_bFree.load(atomics::memory_order_relaxed)) {
401                     // Free HP record
402                     stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size();
403                 }
404                 else {
405                     // Used HP record
406                     ++stat.nHPRecUsed;
407                 }
408             }
409
410             // Events
411             stat.evcAllocHPRec   = m_Stat.m_AllocHPRec;
412             stat.evcRetireHPRec  = m_Stat.m_RetireHPRec;
413             stat.evcAllocNewHPRec= m_Stat.m_AllocNewHPRec;
414             stat.evcDeleteHPRec  = m_Stat.m_DeleteHPRec;
415
416             stat.evcScanCall     = m_Stat.m_ScanCallCount;
417             stat.evcHelpScanCall = m_Stat.m_HelpScanCallCount;
418             stat.evcScanFromHelpScan= m_Stat.m_CallScanFromHelpScan;
419
420             stat.evcDeletedNode  = m_Stat.m_DeletedNode;
421             stat.evcDeferredNode = m_Stat.m_DeferredNode;
422
423             return stat;
424         }
425
426
427     } //namespace hp
428 }} // namespace cds::gc