Move libcds 1.6.0 from SVN
[libcds.git] / src / hrc_gc.cpp
1 //$$CDS-header$$
2
3 /*
4     File: hrc_gc.cpp
5
6     Implementation of cds::gc::hrc::HRCGarbageCollector
7
8     Editions:
9         2008.03.10    Maxim.Khiszinsky    Created
10 */
11
12 #include <cds/gc/hrc/hrc.h>
13
14 #include "hzp_const.h"
15 #include <vector>
16 #include <algorithm>    // std::sort
17
18 #define    CDS_HRC_STATISTIC( _x )    if ( m_bStatEnabled ) { _x; }
19
20 namespace cds { namespace gc {
21     namespace hrc {
22
23         GarbageCollector * GarbageCollector::m_pGC = null_ptr<GarbageCollector *>();
24
25         GarbageCollector::GarbageCollector(
26             size_t nHazardPtrCount,
27             size_t nMaxThreadCount,
28             size_t nRetiredNodeArraySize
29             )
30             : m_pListHead( null_ptr<thread_list_node *>()),
31             m_bStatEnabled( true ),
32             m_nHazardPointerCount( nHazardPtrCount ),
33             m_nMaxThreadCount( nMaxThreadCount ),
34             m_nMaxRetiredPtrCount( nRetiredNodeArraySize )
35         {}
36
37         GarbageCollector::~GarbageCollector()
38         {
39             thread_list_node * pNode = m_pListHead.load( CDS_ATOMIC::memory_order_relaxed );
40             while ( pNode ) {
41                 assert( pNode->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) == cds::OS::nullThreadId() );
42                 clearHRCThreadDesc( pNode );
43                 thread_list_node * pNext = pNode->m_pNext;
44                 deleteHRCThreadDesc( pNode );
45                 pNode = pNext;
46             }
47         }
48
49         void CDS_STDCALL GarbageCollector::Construct(
50             size_t nHazardPtrCount,        // hazard pointers count
51             size_t nMaxThreadCount,        // max thread count
52             size_t nMaxNodeLinkCount,    // max HRC-pointer count in the HRC-container's item
53             size_t nMaxTransientLinks    // max HRC-container's item count that can point to deleting item of container
54             )
55         {
56             if ( !m_pGC ) {
57                 if ( nHazardPtrCount == 0 )
58                     nHazardPtrCount = c_nHazardPointerPerThread    + c_nCleanUpHazardPointerPerThread;
59                 if ( nMaxThreadCount == 0 )
60                     nMaxThreadCount = c_nMaxThreadCount;
61                 if ( nMaxNodeLinkCount == 0 )
62                     nMaxNodeLinkCount = c_nHRCMaxNodeLinkCount;
63                 if ( nMaxTransientLinks == 0 )
64                     nMaxTransientLinks = c_nHRCMaxTransientLinks;
65
66                 size_t nRetiredNodeArraySize = nMaxThreadCount * ( nHazardPtrCount + nMaxNodeLinkCount + nMaxTransientLinks + 1 );
67
68                 m_pGC = new GarbageCollector( nHazardPtrCount, nMaxThreadCount, nRetiredNodeArraySize );
69             }
70         }
71
72         void CDS_STDCALL GarbageCollector::Destruct()
73         {
74             if ( m_pGC ) {
75                 {
76                     ThreadGC tgc;
77                     tgc.init();
78                     m_pGC->HelpScan( &tgc );
79                     m_pGC->Scan( &tgc );
80                     // tgc dtor calls fini()
81                 }
82
83                 delete m_pGC;
84                 m_pGC = null_ptr<GarbageCollector *>();
85             }
86         }
87
88         inline GarbageCollector::thread_list_node * GarbageCollector::newHRCThreadDesc()
89         {
90             CDS_HRC_STATISTIC( ++m_Stat.m_AllocNewHRCThreadDesc );
91             return new thread_list_node( *this );
92         }
93
94         inline void GarbageCollector::deleteHRCThreadDesc( thread_list_node * pNode )
95         {
96             assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
97             CDS_HRC_STATISTIC( ++m_Stat.m_DeleteHRCThreadDesc );
98             delete pNode;
99         }
100
101         void GarbageCollector::clearHRCThreadDesc( thread_list_node * pNode )
102         {
103             assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
104             ContainerNode * pItem;
105             for ( size_t n = 0; n < pNode->m_arrRetired.capacity(); ++n ) {
106                 if ( (pItem = pNode->m_arrRetired[n].m_pNode.load(CDS_ATOMIC::memory_order_relaxed)) != null_ptr<ContainerNode *>() ) {
107                     pNode->m_arrRetired[n].m_funcFree( pItem );
108                     //pItem->destroy();
109                     pNode->m_arrRetired[n].m_pNode.store( null_ptr<ContainerNode *>(), CDS_ATOMIC::memory_order_relaxed );
110                 }
111             }
112             assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
113         }
114
115         GarbageCollector::thread_list_node *  GarbageCollector::getHRCThreadDescForCurrentThread() const
116         {
117             thread_list_node * hprec;
118             const cds::OS::ThreadId curThreadId  = cds::OS::getCurrentThreadId();
119
120             for ( hprec = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); hprec; hprec = hprec->m_pNext ) {
121                 if ( hprec->m_idOwner.load( CDS_ATOMIC::memory_order_acquire ) == curThreadId ) {
122                     assert( !hprec->m_bFree );
123                     return hprec;
124                 }
125             }
126             return null_ptr<GarbageCollector::thread_list_node *>();
127         }
128
129         details::thread_descriptor * GarbageCollector::allocateHRCThreadDesc( ThreadGC * pThreadGC )
130         {
131             CDS_HRC_STATISTIC( ++m_Stat.m_AllocHRCThreadDesc );
132
133             thread_list_node * hprec;
134             const cds::OS::ThreadId nullThreadId = cds::OS::nullThreadId();
135             const cds::OS::ThreadId curThreadId  = cds::OS::getCurrentThreadId();
136
137             // First try to reuse a retired (non-active) HP record
138             for ( hprec = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); hprec; hprec = hprec->m_pNext ) {
139                 cds::OS::ThreadId expectedThreadId = nullThreadId;
140                 if ( !hprec->m_idOwner.compare_exchange_strong( expectedThreadId, curThreadId, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed ) )
141                     continue;
142                 hprec->m_pOwner = pThreadGC;
143                 hprec->m_bFree = false;
144                 assert( hprec->m_hzp.size() == hprec->m_hzp.capacity() );
145                 return hprec;
146             }
147
148             // No HP records available for reuse
149             // Allocate and push a new HP record
150             hprec = newHRCThreadDesc();
151             assert( hprec->m_hzp.size() == hprec->m_hzp.capacity() );
152             hprec->m_idOwner.store( curThreadId, CDS_ATOMIC::memory_order_relaxed );
153             hprec->m_pOwner = pThreadGC;
154             hprec->m_bFree = false;
155             thread_list_node * pOldHead;
156
157             pOldHead = m_pListHead.load( CDS_ATOMIC::memory_order_relaxed );
158             do {
159                 hprec->m_pNext = pOldHead;
160             } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
161
162             assert( hprec->m_hzp.size() == hprec->m_hzp.capacity() );
163             return hprec;
164         }
165
166         void GarbageCollector::retireHRCThreadDesc( details::thread_descriptor * pRec )
167         {
168             CDS_HRC_STATISTIC( ++m_Stat.m_RetireHRCThreadDesc );
169
170             pRec->clear();
171             thread_list_node * pNode = static_cast<thread_list_node *>( pRec );
172             assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
173             /*
174                 It is possible that
175                     pNode->m_idOwner.value() != cds::OS::getCurrentThreadId()
176                 if the destruction of thread object is called by the destructor
177                 after thread termination
178             */
179             assert( pNode->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) != cds::OS::nullThreadId() );
180             pNode->m_pOwner = null_ptr<ThreadGC *>();
181             pNode->m_idOwner.store( cds::OS::nullThreadId(), CDS_ATOMIC::memory_order_release );
182             assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
183         }
184
185         void GarbageCollector::Scan( ThreadGC * pThreadGC )
186         {
187             CDS_HRC_STATISTIC( ++m_Stat.m_ScanCalls );
188
189             typedef std::vector< ContainerNode * > hazard_ptr_list;
190
191             details::thread_descriptor * pRec = pThreadGC->m_pDesc;
192             assert( static_cast< thread_list_node *>( pRec )->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) == cds::OS::getCurrentThreadId() );
193
194             // Step 1: mark all pRec->m_arrRetired items as "traced"
195             {
196                 details::retired_vector::const_iterator itEnd = pRec->m_arrRetired.end();
197
198                 for ( details::retired_vector::const_iterator it = pRec->m_arrRetired.begin() ; it != itEnd; ++it ) {
199                     ContainerNode * pNode = it->m_pNode.load( CDS_ATOMIC::memory_order_acquire );
200                     if ( pNode ) {
201                         if ( pNode->m_RC.value() == 0 ) {
202                             pNode->m_bTrace.store( true, CDS_ATOMIC::memory_order_release );
203                             if ( pNode->m_RC.value() != 0 )
204                                 pNode->m_bTrace.store( false, CDS_ATOMIC::memory_order_release );
205                         }
206                     }
207                 }
208             }
209
210             // Array of hazard pointers for all threads
211             hazard_ptr_list   plist;
212             plist.reserve( m_nMaxThreadCount * m_nHazardPointerCount );
213             assert( plist.size() == 0 );
214
215             // Stage 2: Scan HP list and insert non-null values to plist
216             {
217                 thread_list_node * pNode = m_pListHead.load( CDS_ATOMIC::memory_order_acquire );
218
219                 while ( pNode ) {
220                     for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
221                         ContainerNode * hptr = pNode->m_hzp[i];
222                         if ( hptr )
223                             plist.push_back( hptr );
224                     }
225                     pNode = pNode->m_pNext;
226                 }
227             }
228
229             // Sort plist to simplify search in
230             std::sort( plist.begin(), plist.end() );
231
232             // Stage 3: Deletes all nodes for refCount == 0 and that do not declare as Hazard in other thread
233             {
234                 details::retired_vector& arr =  pRec->m_arrRetired;
235
236                 hazard_ptr_list::iterator itHPBegin = plist.begin();
237                 hazard_ptr_list::iterator itHPEnd = plist.end();
238
239                 details::retired_vector::iterator itEnd = arr.end();
240                 details::retired_vector::iterator it = arr.begin();
241
242                 for ( size_t nRetired = 0; it != itEnd; ++nRetired, ++it ) {
243                     details::retired_node& node = *it;
244                     ContainerNode * pNode = node.m_pNode.load(CDS_ATOMIC::memory_order_acquire);
245                     if ( !pNode )
246                         continue;
247
248                     if ( pNode->m_RC.value() == 0 && pNode->m_bTrace.load(CDS_ATOMIC::memory_order_acquire) && !std::binary_search( itHPBegin, itHPEnd, pNode ) ) {
249                         // pNode may be destructed safely
250
251                         node.m_bDone.store( true, CDS_ATOMIC::memory_order_release );
252                         if ( node.m_nClaim.load( CDS_ATOMIC::memory_order_acquire ) == 0 ) {
253                             pNode->terminate( pThreadGC, false );
254                             pNode->clean( CDS_ATOMIC::memory_order_relaxed );
255                             node.m_funcFree( pNode );
256
257                             arr.pop( nRetired );
258                             CDS_HRC_STATISTIC( ++m_Stat.m_DeletedNode );
259                             continue;
260                         }
261
262                         pNode->terminate( pThreadGC, true );
263                         //node.m_bDone.store( true, CDS_ATOMIC::memory_order_release );
264                         CDS_HRC_STATISTIC( ++m_Stat.m_ScanClaimGuarded );
265                     }
266                     else {
267                         CDS_HRC_STATISTIC( ++m_Stat.m_ScanGuarded );
268                     }
269                 }
270             }
271         }
272
273         void GarbageCollector::HelpScan( ThreadGC * pThis )
274         {
275             if ( pThis->m_pDesc->m_arrRetired.isFull() )
276                 return;
277
278             CDS_HRC_STATISTIC( ++m_Stat.m_HelpScanCalls );
279
280             const cds::OS::ThreadId nullThreadId = cds::OS::nullThreadId();
281             const cds::OS::ThreadId curThreadId  = cds::OS::getCurrentThreadId();
282
283             for ( thread_list_node * pRec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_pNext )
284             {
285                 // If threadDesc is free then own its
286                 cds::OS::ThreadId expectedThreadId = nullThreadId;
287                 if ( !pRec->m_idOwner.compare_exchange_strong(expectedThreadId, curThreadId, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed) )
288                 {
289                     continue;
290                 }
291
292                 // We own threadDesc.
293                 assert( pRec->m_pOwner == null_ptr<ThreadGC *>() );
294
295                 if ( !pRec->m_bFree ) {
296                     // All undeleted pointers is moved to pThis (it is private for the current thread)
297
298                     details::retired_vector& src = pRec->m_arrRetired;
299                     details::retired_vector& dest = pThis->m_pDesc->m_arrRetired;
300                     assert( !dest.isFull());
301
302                     details::retired_vector::iterator itEnd = src.end();
303                     details::retired_vector::iterator it = src.begin();
304
305                     for ( size_t nRetired = 0; it != itEnd; ++nRetired, ++it ) {
306                         if ( it->m_pNode.load(CDS_ATOMIC::memory_order_relaxed) == null_ptr<ContainerNode *>() )
307                             continue;
308
309                         dest.push( it->m_pNode.load(CDS_ATOMIC::memory_order_relaxed), it->m_funcFree );
310                         src.pop( nRetired );
311
312                         while ( dest.isFull() ) {
313                             pThis->cleanUpLocal();
314                             if ( dest.isFull() )
315                                 Scan( pThis );
316                             if ( dest.isFull() )
317                                 CleanUpAll( pThis );
318                             else
319                                 break;
320                         }
321                     }
322                     pRec->m_bFree = true;
323                 }
324                 pRec->m_idOwner.store( nullThreadId, CDS_ATOMIC::memory_order_release );
325             }
326         }
327
328         void GarbageCollector::CleanUpAll( ThreadGC * pThis )
329         {
330             CDS_HRC_STATISTIC( ++m_Stat.m_CleanUpAllCalls );
331
332             //const cds::OS::ThreadId nullThreadId = cds::OS::nullThreadId();
333             thread_list_node * pThread = m_pListHead.load(CDS_ATOMIC::memory_order_acquire);
334             while ( pThread ) {
335                 for ( size_t i = 0; i < pThread->m_arrRetired.capacity(); ++i ) {
336                     details::retired_node& rRetiredNode = pThread->m_arrRetired[i];
337                     ContainerNode * pNode = rRetiredNode.m_pNode.load(CDS_ATOMIC::memory_order_acquire);
338                     if ( pNode && !rRetiredNode.m_bDone.load(CDS_ATOMIC::memory_order_acquire) ) {
339                         rRetiredNode.m_nClaim.fetch_add( 1, CDS_ATOMIC::memory_order_release );
340                         if ( !rRetiredNode.m_bDone.load(CDS_ATOMIC::memory_order_acquire)
341                             && pNode == rRetiredNode.m_pNode.load(CDS_ATOMIC::memory_order_acquire) )
342                         {
343                             pNode->cleanUp( pThis );
344                         }
345                         rRetiredNode.m_nClaim.fetch_sub( 1, CDS_ATOMIC::memory_order_release );
346                     }
347                 }
348                 pThread = pThread->m_pNext;
349             }
350         }
351
352         GarbageCollector::internal_state& GarbageCollector::getInternalState( GarbageCollector::internal_state& stat) const
353         {
354             // Const
355             stat.nHPCount               = m_nHazardPointerCount;
356             stat.nMaxThreadCount        = m_nMaxThreadCount;
357             stat.nMaxRetiredPtrCount    = m_nMaxRetiredPtrCount;
358             stat.nHRCRecSize            = sizeof( thread_list_node )
359                                             + sizeof( details::retired_node) * m_nMaxRetiredPtrCount;
360             stat.nHRCRecAllocated            =
361                 stat.nHRCRecUsed             =
362                 stat.nTotalRetiredPtrCount   =
363                 stat.nRetiredPtrInFreeHRCRecs = 0;
364
365             // Walk through HRC records
366             for ( thread_list_node *hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = hprec->m_pNext ) {
367                 ++stat.nHRCRecAllocated;
368                 size_t nRetiredNodeCount = hprec->m_arrRetired.retiredNodeCount();
369                 if ( hprec->m_bFree ) {
370                     stat.nRetiredPtrInFreeHRCRecs += nRetiredNodeCount;
371                 }
372                 else {
373                     ++stat.nHRCRecUsed;
374                 }
375                 stat.nTotalRetiredPtrCount += nRetiredNodeCount;
376             }
377
378             // Events
379             stat.evcAllocHRCRec            = m_Stat.m_AllocHRCThreadDesc;
380             stat.evcRetireHRCRec        = m_Stat.m_RetireHRCThreadDesc;
381             stat.evcAllocNewHRCRec        = m_Stat.m_AllocNewHRCThreadDesc;
382             stat.evcDeleteHRCRec        = m_Stat.m_DeleteHRCThreadDesc;
383             stat.evcScanCall            = m_Stat.m_ScanCalls;
384             stat.evcHelpScanCalls       = m_Stat.m_HelpScanCalls;
385             stat.evcCleanUpAllCalls     = m_Stat.m_CleanUpAllCalls;
386             stat.evcDeletedNode         = m_Stat.m_DeletedNode;
387             stat.evcScanGuarded         = m_Stat.m_ScanGuarded;
388             stat.evcScanClaimGuarded    = m_Stat.m_ScanClaimGuarded;
389
390 #       ifdef CDS_DEBUG
391             stat.evcNodeConstruct       = m_Stat.m_NodeConstructed;
392             stat.evcNodeDestruct        = m_Stat.m_NodeDestructed;
393 #       endif
394
395             return stat;
396         }
397
398         void ContainerNode::cleanUp( ThreadGC * /*pGC*/ )
399         {
400             CDS_PURE_VIRTUAL_FUNCTION_CALLED_("cds::gc::hrc::ContainerNode::cleanUp");
401         }
402         void ContainerNode::terminate( ThreadGC * /*pGC*/, bool /*bConcurrent*/ )
403         {
404             CDS_PURE_VIRTUAL_FUNCTION_CALLED_("cds::gc::hrc::ContainerNode::terminate");
405         }
406
407     }    // namespace hrc
408 }} // namespace cds::gc