[TSan] added annotation for suspicious (in terms of TSan) code
[libcds.git] / src / hp_gc.cpp
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 /*
32     File: hzp_gc.cpp
33
34     Hazard Pointers memory reclamation strategy implementation
35
36     Editions:
37         2008.02.10    Maxim.Khiszinsky    Created
38 */
39
40 #include <cds/gc/details/hp.h>
41
42 #include <algorithm>    // std::sort
43 #include "hp_const.h"
44
45 #define    CDS_HAZARDPTR_STATISTIC( _x )    if ( m_bStatEnabled ) { _x; }
46
47 namespace cds { namespace gc {
48     namespace hp {
49
50         /// Max array size of retired pointers
51         static const size_t c_nMaxRetireNodeCount = c_nHazardPointerPerThread * c_nMaxThreadCount * 2;
52
53         GarbageCollector *    GarbageCollector::m_pHZPManager = nullptr;
54
55         void CDS_STDCALL GarbageCollector::Construct( size_t nHazardPtrCount, size_t nMaxThreadCount, size_t nMaxRetiredPtrCount, scan_type nScanType )
56         {
57             if ( !m_pHZPManager ) {
58                 m_pHZPManager = new GarbageCollector( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType );
59             }
60         }
61
62         void CDS_STDCALL GarbageCollector::Destruct( bool bDetachAll )
63         {
64             if ( m_pHZPManager ) {
65                 if ( bDetachAll )
66                     m_pHZPManager->detachAllThread();
67
68                 delete m_pHZPManager;
69                 m_pHZPManager = nullptr;
70             }
71         }
72
73         GarbageCollector::GarbageCollector(
74             size_t nHazardPtrCount,
75             size_t nMaxThreadCount,
76             size_t nMaxRetiredPtrCount,
77             scan_type nScanType
78         )
79             : m_pListHead( nullptr )
80             ,m_bStatEnabled( false )
81             ,m_nHazardPointerCount( nHazardPtrCount == 0 ? c_nHazardPointerPerThread : nHazardPtrCount )
82             ,m_nMaxThreadCount( nMaxThreadCount == 0 ? c_nMaxThreadCount : nMaxThreadCount )
83             ,m_nMaxRetiredPtrCount( nMaxRetiredPtrCount > c_nMaxRetireNodeCount ? nMaxRetiredPtrCount : c_nMaxRetireNodeCount )
84             ,m_nScanType( nScanType )
85         {}
86
87         GarbageCollector::~GarbageCollector()
88         {
89             CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
90             CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::get_current_thread_id() ;)
91
92             hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed );
93             m_pListHead.store( nullptr, atomics::memory_order_relaxed );
94
95             hplist_node * pNext = nullptr;
96             for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) {
97                 assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
98                     || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
99                     || !cds::OS::is_thread_alive( hprec->m_idOwner.load( atomics::memory_order_relaxed ))
100                 );
101                 details::retired_vector& vect = hprec->m_arrRetired;
102                 details::retired_vector::iterator itRetired = vect.begin();
103                 details::retired_vector::iterator itRetiredEnd = vect.end();
104                 while ( itRetired != itRetiredEnd ) {
105                     itRetired->free();
106                     ++itRetired;
107                 }
108                 vect.clear();
109                 pNext = hprec->m_pNextNode;
110                 hprec->m_bFree.store( true, atomics::memory_order_relaxed );
111                 DeleteHPRec( hprec );
112             }
113         }
114
115         inline GarbageCollector::hplist_node * GarbageCollector::NewHPRec()
116         {
117             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocNewHPRec )
118             return new hplist_node( *this );
119         }
120
121         inline void GarbageCollector::DeleteHPRec( hplist_node * pNode )
122         {
123             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeleteHPRec )
124             assert( pNode->m_arrRetired.size() == 0 );
125             delete pNode;
126         }
127
128         details::hp_record * GarbageCollector::alloc_hp_record()
129         {
130             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec )
131
132             hplist_node * hprec;
133             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
134             const cds::OS::ThreadId curThreadId  = cds::OS::get_current_thread_id();
135
136             // First try to reuse a retired (non-active) HP record
137             for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
138                 cds::OS::ThreadId thId = nullThreadId;
139                 if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ))
140                     continue;
141                 hprec->m_bFree.store( false, atomics::memory_order_release );
142                 return hprec;
143             }
144
145             // No HP records available for reuse
146             // Allocate and push a new HP record
147             hprec = NewHPRec();
148             hprec->m_idOwner.store( curThreadId, atomics::memory_order_release );
149             hprec->m_bFree.store( false, atomics::memory_order_release );
150
151             hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire );
152             do {
153                 // TSan: Next CAS release orders the memory
154                 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE(&hprec->m_pNextNode );
155                 hprec->m_pNextNode = pOldHead;
156             } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed ));
157
158             return hprec;
159         }
160
161         void GarbageCollector::free_hp_record( details::hp_record * pRec )
162         {
163             assert( pRec != nullptr );
164             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec )
165
166             pRec->clear();
167             Scan( pRec );
168             HelpScan( pRec );
169             hplist_node * pNode = static_cast<hplist_node *>( pRec );
170             pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
171         }
172
173         void GarbageCollector::detachAllThread()
174         {
175             hplist_node * pNext = nullptr;
176             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
177             for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = pNext ) {
178                 pNext = hprec->m_pNextNode;
179                 if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
180                     free_hp_record( hprec );
181                 }
182             }
183         }
184
185         void GarbageCollector::classic_scan( details::hp_record * pRec )
186         {
187             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount )
188
189             std::vector< void * >   plist;
190             plist.reserve( m_nMaxThreadCount * m_nHazardPointerCount );
191             assert( plist.size() == 0 );
192
193             // Stage 1: Scan HP list and insert non-null values in plist
194
195             hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
196
197             while ( pNode ) {
198                 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
199                     pRec->sync();
200                     void * hptr = pNode->m_hzp[i].get();
201                     if ( hptr )
202                         plist.push_back( hptr );
203                 }
204                 pNode = pNode->m_pNextNode;
205             }
206
207             // Sort plist to simplify search in
208             std::sort( plist.begin(), plist.end());
209
210             // Stage 2: Search plist
211             details::retired_vector& arrRetired = pRec->m_arrRetired;
212
213             details::retired_vector::iterator itRetired     = arrRetired.begin();
214             details::retired_vector::iterator itRetiredEnd  = arrRetired.end();
215             // arrRetired is not a std::vector!
216             // clear() is just set up item counter to 0, the items is not destroyed
217             arrRetired.clear();
218
219             {
220                 std::vector< void * >::iterator itBegin = plist.begin();
221                 std::vector< void * >::iterator itEnd = plist.end();
222                 size_t nDeferredCount = 0;
223                 while ( itRetired != itRetiredEnd ) {
224                     if ( std::binary_search( itBegin, itEnd, itRetired->m_p )) {
225                         arrRetired.push( *itRetired );
226                         ++nDeferredCount;
227                     }
228                     else
229                         itRetired->free();
230                     ++itRetired;
231                 }
232                 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferredCount )
233                 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - arrRetired.begin()) - nDeferredCount )
234             }
235         }
236
237         void GarbageCollector::inplace_scan( details::hp_record * pRec )
238         {
239             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount )
240
241             // In-place scan algo uses LSB of retired ptr as a mark for internal purposes.
242             // It is correct if all retired pointers are ar least 2-byte aligned (LSB is zero).
243             // If it is wrong, we use classic scan algorithm
244
245             // Check if all retired pointers has zero LSB
246             // LSB is used for marking pointers that cannot be deleted yet
247             details::retired_vector::iterator itRetired     = pRec->m_arrRetired.begin();
248             details::retired_vector::iterator itRetiredEnd  = pRec->m_arrRetired.end();
249             for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
250                 if ( it->m_n & 1 ) {
251                     // found a pointer with LSB bit set - use classic_scan
252                     classic_scan( pRec );
253                     return;
254                 }
255             }
256
257             // Sort retired pointer array
258             std::sort( itRetired, itRetiredEnd, cds::gc::details::retired_ptr::less );
259
260             // Check double free
261             /*
262             {
263                 auto it = itRetired;
264                 auto itPrev = it;
265                 while ( ++it != itRetiredEnd ) {
266                     if ( it->m_p == itPrev->m_p )
267                         throw std::runtime_error( "Double free" );
268                     itPrev = it;
269                 }
270             }
271             */
272
273             // Search guarded pointers in retired array
274             hplist_node * pNode = m_pListHead.load( atomics::memory_order_acquire );
275
276             {
277                 details::retired_ptr dummyRetired;
278                 while ( pNode ) {
279                     if ( !pNode->m_bFree.load( atomics::memory_order_acquire )) {
280                         for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
281                             pRec->sync();
282                             void * hptr = pNode->m_hzp[i].get();
283                             if ( hptr ) {
284                                 dummyRetired.m_p = hptr;
285                                 details::retired_vector::iterator it = std::lower_bound( itRetired, itRetiredEnd, dummyRetired, cds::gc::details::retired_ptr::less );
286                                 if ( it != itRetiredEnd && it->m_p == hptr ) {
287                                     // Mark retired pointer as guarded
288                                     it->m_n |= 1;
289                                 }
290                             }
291                         }
292                     }
293                     pNode = pNode->m_pNextNode;
294                 }
295             }
296
297             // Move all marked pointers to head of array
298             {
299                 auto itInsert = itRetired;
300                 for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
301                     if ( it->m_n & 1 ) {
302                         it->m_n &= ~1;
303                         if ( itInsert != it )
304                             *itInsert = *it;
305                         ++itInsert;
306                     }
307                     else {
308                         // Retired pointer may be freed
309                         it->free();
310                     }
311                 }
312                 const size_t nDeferred = itInsert - itRetired;
313                 pRec->m_arrRetired.size( nDeferred );
314                 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferred )
315                 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - itRetired) - nDeferred )
316             }
317         }
318
319         void GarbageCollector::HelpScan( details::hp_record * pThis )
320         {
321             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount )
322
323             assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::get_current_thread_id());
324
325             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
326             const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
327             for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
328
329                 // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
330                 if ( hprec->m_bFree.load(atomics::memory_order_acquire))
331                     continue;
332
333                 // Owns hprec if it is empty.
334                 // Several threads may work concurrently so we use atomic technique only.
335                 {
336                     cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_acquire);
337                     if ( curOwner == nullThreadId || !cds::OS::is_thread_alive( curOwner )) {
338                         if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
339                             continue;
340                     }
341                     else {
342                         curOwner = nullThreadId;
343                         if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
344                             continue;
345                     }
346                 }
347
348                 // We own the thread successfully. Now, we can see whether hp_record has retired pointers.
349                 // If it has ones then we move to pThis that is private for current thread.
350                 details::retired_vector& src = hprec->m_arrRetired;
351                 details::retired_vector& dest = pThis->m_arrRetired;
352                 assert( !dest.isFull());
353                 details::retired_vector::iterator itRetired = src.begin();
354
355                 // TSan can issue a warning here:
356                 //  read src.m_nSize in src.end()
357                 //  write src.m_nSize in src.clear()
358                 // This is false positive since we own hprec
359                 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
360                 details::retired_vector::iterator itRetiredEnd = src.end();
361                 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
362
363                 while ( itRetired != itRetiredEnd ) {
364                     dest.push( *itRetired );
365                     if ( dest.isFull()) {
366                         CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_CallScanFromHelpScan )
367                         Scan( pThis );
368                     }
369                     ++itRetired;
370                 }
371
372                 // TSan: write src.m_nSize, see a comment above
373                 CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
374                 src.clear();
375                 CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
376
377                 hprec->m_bFree.store(true, atomics::memory_order_release);
378                 hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
379
380                 Scan( pThis );
381             }
382         }
383
384         GarbageCollector::InternalState& GarbageCollector::getInternalState( GarbageCollector::InternalState& stat) const
385         {
386             stat.nHPCount                = m_nHazardPointerCount;
387             stat.nMaxThreadCount         = m_nMaxThreadCount;
388             stat.nMaxRetiredPtrCount     = m_nMaxRetiredPtrCount;
389             stat.nHPRecSize              = sizeof( hplist_node )
390                                             + sizeof(details::retired_ptr) * m_nMaxRetiredPtrCount;
391
392             stat.nHPRecAllocated         =
393                 stat.nHPRecUsed              =
394                 stat.nTotalRetiredPtrCount   =
395                 stat.nRetiredPtrInFreeHPRecs = 0;
396
397             for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
398                 ++stat.nHPRecAllocated;
399                 stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size();
400
401                 if ( hprec->m_bFree.load(atomics::memory_order_relaxed)) {
402                     // Free HP record
403                     stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size();
404                 }
405                 else {
406                     // Used HP record
407                     ++stat.nHPRecUsed;
408                 }
409             }
410
411             // Events
412             stat.evcAllocHPRec   = m_Stat.m_AllocHPRec;
413             stat.evcRetireHPRec  = m_Stat.m_RetireHPRec;
414             stat.evcAllocNewHPRec= m_Stat.m_AllocNewHPRec;
415             stat.evcDeleteHPRec  = m_Stat.m_DeleteHPRec;
416
417             stat.evcScanCall     = m_Stat.m_ScanCallCount;
418             stat.evcHelpScanCall = m_Stat.m_HelpScanCallCount;
419             stat.evcScanFromHelpScan= m_Stat.m_CallScanFromHelpScan;
420
421             stat.evcDeletedNode  = m_Stat.m_DeletedNode;
422             stat.evcDeferredNode = m_Stat.m_DeferredNode;
423
424             return stat;
425         }
426
427
428     } //namespace hp
429 }} // namespace cds::gc