Renaming cds/cxx11_atomic.h to cds/algo/atomic.h
[libcds.git] / cds / gc / details / dhp.h
1 //$$CDS-header$$
2
3 #ifndef __CDS_GC_DETAILS_DHP_H
4 #define __CDS_GC_DETAILS_DHP_H
5
6 #include <mutex>        // unique_lock
7 #include <cds/algo/atomic.h>
8 #include <cds/gc/details/retired_ptr.h>
9 #include <cds/details/aligned_allocator.h>
10 #include <cds/details/allocator.h>
11 #include <cds/lock/spinlock.h>
12
13 #if CDS_COMPILER == CDS_COMPILER_MSVC
14 #   pragma warning(push)
15 #   pragma warning(disable:4251)    // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
16 #endif
17
18 //@cond
19 namespace cds { namespace gc {
20
21     /// Dynamic Hazard Pointer reclamation schema
22     /**
23         The cds::gc::dhp namespace and its members are internal representation of the GC and should not be used directly.
24         Use cds::gc::DHP class in your code.
25
26         Dynamic Hazard Pointer (DHP) garbage collector is a singleton. The main user-level part of DHP schema is
27         GC class and its nested classes. Before use any DHP-related class you must initialize DHP garbage collector
28         by contructing cds::gc::DHP object in beginning of your main().
29         See cds::gc::DHP class for explanation.
30
31         \par Implementation issues
32             The global list of free guards (\p cds::gc::dhp::details::guard_allocator) is protected by a spin-lock (i.e. serialized).
33             It seems that this solution should not introduce significant performance bottleneck, because each thread has its own set
34             of guards allocated from the global list of free guards and the access to the global list is occurred only when
35             all thread's guard is busy. In this case the thread allocates a next block of guards from the global list.
36             Guards allocated for the thread is push back to the global list only when the thread terminates.
37     */
38     namespace dhp {
39
40         // Forward declarations
41         class Guard;
42         template <size_t Count> class GuardArray;
43         class ThreadGC;
44         class GarbageCollector;
45
46         /// Retired pointer type
47         typedef cds::gc::details::retired_ptr retired_ptr;
48
49         using cds::gc::details::free_retired_ptr_func;
50
51         /// Details of Dynamic Hazard Pointer algorithm
52         namespace details {
53
54             // Forward declaration
55             class liberate_set;
56
57             /// Retired pointer buffer node
58             struct retired_ptr_node {
59                 retired_ptr         m_ptr   ;   ///< retired pointer
60                 retired_ptr_node *  m_pNext     ;   ///< next retired pointer in buffer
61                 retired_ptr_node *  m_pNextFree ;   ///< next item in free list of retired_ptr_node
62             };
63
64             /// Internal guard representation
65             struct guard_data {
66                 typedef void * guarded_ptr;  ///< type of value guarded
67
68                 atomics::atomic<guarded_ptr>  pPost;       ///< pointer guarded
69                 atomics::atomic<guard_data *> pGlobalNext; ///< next item of global list of allocated guards
70                 atomics::atomic<guard_data *> pNextFree;   ///< pointer to the next item in global or thread-local free-list
71
72                 guard_data * pThreadNext; ///< next item of thread's local list of guards
73
74                 guard_data() CDS_NOEXCEPT
75                     : pPost( nullptr )
76                     , pGlobalNext( nullptr )
77                     , pNextFree( nullptr )
78                     , pThreadNext( nullptr )
79                 {}
80
81                 void init() CDS_NOEXCEPT
82                 {
83                     pPost.store( nullptr, atomics::memory_order_relaxed );
84                 }
85
86                 /// Checks if the guard is free, that is, it does not contain any pointer guarded
87                 bool isFree() const CDS_NOEXCEPT
88                 {
89                     return pPost.load( atomics::memory_order_acquire ) == nullptr;
90                 }
91             };
92
93             /// Guard allocator
94             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
95             class guard_allocator
96             {
97                 cds::details::Allocator<details::guard_data>  m_GuardAllocator    ;   ///< guard allocator
98
99                 atomics::atomic<guard_data *>    m_GuardList ;       ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
100                 atomics::atomic<guard_data *>    m_FreeGuardList ;   ///< Head of free guard list (linked by guard_data::pNextFree field)
101                 SpinLock                m_freeListLock  ;   ///< Access to m_FreeGuardList
102
103                 /*
104                     Unfortunately, access to the list of free guard is lock-based.
105                     Lock-free manipulations with guard free-list are ABA-prone.
106                     TODO: working with m_FreeGuardList in lock-free manner.
107                 */
108
109             private:
110                 /// Allocates new guard from the heap. The function uses aligned allocator
111                 guard_data * allocNew()
112                 {
113                     //TODO: the allocator should make block allocation
114
115                     details::guard_data * pGuard = m_GuardAllocator.New();
116
117                     // Link guard to the list
118                     // m_GuardList is an accumulating list and it cannot support concurrent deletion,
119                     // so, ABA problem is impossible for it
120                     details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
121                     do {
122                         pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
123                         // pHead is changed by compare_exchange_weak
124                     } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
125
126                     pGuard->init();
127                     return pGuard;
128                 }
129
130             public:
131                 // Default ctor
132                 guard_allocator() CDS_NOEXCEPT
133                     : m_GuardList( nullptr )
134                     , m_FreeGuardList( nullptr )
135                 {}
136
137                 // Destructor
138                 ~guard_allocator()
139                 {
140                     guard_data * pNext;
141                     for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
142                         pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
143                         m_GuardAllocator.Delete( pData );
144                     }
145                 }
146
147                 /// Allocates a guard from free list or from heap if free list is empty
148                 guard_data * alloc()
149                 {
150                     // Try to pop a guard from free-list
151                     details::guard_data * pGuard;
152
153                     {
154                         std::unique_lock<SpinLock> al( m_freeListLock );
155                         pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
156                         if ( pGuard )
157                             m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
158                     }
159                     if ( !pGuard )
160                         return allocNew();
161
162                     pGuard->init();
163                     return pGuard;
164                 }
165
166                 /// Frees guard \p pGuard
167                 /**
168                     The function places the guard \p pGuard into free-list
169                 */
170                 void free( guard_data * pGuard ) CDS_NOEXCEPT
171                 {
172                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
173
174                     std::unique_lock<SpinLock> al( m_freeListLock );
175                     pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
176                     m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
177                 }
178
179                 /// Allocates list of guard
180                 /**
181                     The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
182
183                     cds::gc::dhp::ThreadGC supporting method
184                 */
185                 guard_data * allocList( size_t nCount )
186                 {
187                     assert( nCount != 0 );
188
189                     guard_data * pHead;
190                     guard_data * pLast;
191
192                     pHead =
193                         pLast = alloc();
194
195                     // The guard list allocated is private for the thread,
196                     // so, we can use relaxed memory order
197                     while ( --nCount ) {
198                         guard_data * p = alloc();
199                         pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
200                         pLast = p;
201                     }
202
203                     pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
204
205                     return pHead;
206                 }
207
208                 /// Frees list of guards
209                 /**
210                     The list \p pList is linked by guard's \p pThreadNext field.
211
212                     cds::gc::dhp::ThreadGC supporting method
213                 */
214                 void freeList( guard_data * pList ) CDS_NOEXCEPT
215                 {
216                     assert( pList != nullptr );
217
218                     guard_data * pLast = pList;
219                     while ( pLast->pThreadNext ) {
220                         pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
221                         guard_data * p;
222                         pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
223                         pLast = p;
224                     }
225
226                     std::unique_lock<SpinLock> al( m_freeListLock );
227                     pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
228                     m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
229                 }
230
231                 /// Returns the list's head of guards allocated
232                 guard_data * begin() CDS_NOEXCEPT
233                 {
234                     return m_GuardList.load(atomics::memory_order_acquire);
235                 }
236             };
237
238             /// Retired pointer buffer
239             /**
240                 The buffer of retired nodes ready for liberating.
241                 When size of buffer exceeds a threshold the GC calls \p scan() procedure to free
242                 retired nodes.
243             */
244             class retired_ptr_buffer
245             {
246                 atomics::atomic<retired_ptr_node *>  m_pHead     ;   ///< head of buffer
247                 atomics::atomic<size_t>              m_nItemCount;   ///< buffer's item count
248
249             public:
250                 retired_ptr_buffer() CDS_NOEXCEPT
251                     : m_pHead( nullptr )
252                     , m_nItemCount(0)
253                 {}
254
255                 ~retired_ptr_buffer() CDS_NOEXCEPT
256                 {
257                     assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
258                 }
259
260                 /// Pushes new node into the buffer. Returns current buffer size
261                 size_t push( retired_ptr_node& node ) CDS_NOEXCEPT
262                 {
263                     retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
264                     do {
265                         node.m_pNext = pHead;
266                         // pHead is changed by compare_exchange_weak
267                     } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
268
269                     return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
270                 }
271
272                 /// Result of \ref dhp_gc_privatve "privatize" function.
273                 /**
274                     The \p privatize function returns retired node list as \p first and the size of that list as \p second.
275                 */
276                 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
277
278                 /// Gets current list of retired pointer and clears the list
279                 /**@anchor dhp_gc_privatve
280                 */
281                 privatize_result privatize() CDS_NOEXCEPT
282                 {
283                     privatize_result res;
284                     res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
285
286                     // Item counter is needed only as a threshold for \p scan() function
287                     // So, we may clear the item counter without synchronization with m_pHead
288                     res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
289                     return res;
290                 }
291
292                 /// Returns current size of buffer (approximate)
293                 size_t size() const CDS_NOEXCEPT
294                 {
295                     return m_nItemCount.load(atomics::memory_order_relaxed);
296                 }
297             };
298
299             /// Pool of retired pointers
300             /**
301                 The class acts as an allocator of retired node.
302                 Retired pointers are linked in the lock-free list.
303             */
304             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
305             class retired_ptr_pool {
306                 /// Pool item
307                 typedef retired_ptr_node    item;
308
309                 /// Count of items in block
310                 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
311
312                 /// Pool block
313                 struct block {
314                     block *     pNext   ;   ///< next block
315                     item        items[m_nItemPerBlock]  ;   ///< item array
316                 };
317
318                 atomics::atomic<block *> m_pBlockListHead    ;   ///< head of of allocated block list
319
320                 // To solve ABA problem we use epoch-based approach
321                 static const unsigned int c_nEpochCount = 4     ;   ///< Max epoch count
322                 atomics::atomic<unsigned int>    m_nCurEpoch ;   ///< Current epoch
323                 atomics::atomic<item *>  m_pEpochFree[c_nEpochCount]  ;   ///< List of free item per epoch
324                 atomics::atomic<item *>  m_pGlobalFreeHead   ;   ///< Head of unallocated item list
325
326                 cds::details::Allocator< block, Alloc > m_BlockAllocator    ;   ///< block allocator
327
328             private:
329                 void allocNewBlock()
330                 {
331                     // allocate new block
332                     block * pNew = m_BlockAllocator.New();
333
334                     // link items within the block
335                     item * pLastItem = pNew->items + m_nItemPerBlock - 1;
336                     for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
337                         pItem->m_pNextFree = pItem + 1;
338                         CDS_STRICT_DO( pItem->m_pNext = nullptr );
339                     }
340
341                     // link new block to block list
342                     {
343                         block * pHead = m_pBlockListHead.load(atomics::memory_order_acquire);
344                         do {
345                             pNew->pNext = pHead;
346                             // pHead is changed by compare_exchange_weak
347                         } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_relaxed ));
348                     }
349
350                     // link block's items to free list
351                     {
352                         item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_acquire);
353                         do {
354                             pLastItem->m_pNextFree = pHead;
355                             // pHead is changed by compare_exchange_weak
356                         } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
357                     }
358                 }
359
360                 unsigned int current_epoch() const CDS_NOEXCEPT
361                 {
362                     return m_nCurEpoch.load(atomics::memory_order_acquire) & (c_nEpochCount - 1);
363                 }
364
365                 unsigned int next_epoch() const CDS_NOEXCEPT
366                 {
367                     return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & (c_nEpochCount - 1);
368                 }
369
370             public:
371                 retired_ptr_pool()
372                     : m_pBlockListHead( nullptr )
373                     , m_nCurEpoch(0)
374                     , m_pGlobalFreeHead( nullptr )
375                 {
376                     for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i )
377                         m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
378
379                     allocNewBlock();
380                 }
381
382                 ~retired_ptr_pool()
383                 {
384                     block * p;
385                     for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
386                         p = pBlock->pNext;
387                         m_BlockAllocator.Delete( pBlock );
388                     }
389                 }
390
391                 /// Increments current epoch
392                 void inc_epoch() CDS_NOEXCEPT
393                 {
394                     m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
395                 }
396
397                 /// Allocates new retired pointer
398                 retired_ptr_node&  alloc()
399                 {
400                     unsigned int nEpoch;
401                     item * pItem;
402                     for (;;) {
403                         pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
404                         if ( !pItem )
405                             goto retry;
406                         if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
407                             goto success;
408                     }
409
410                     // Epoch free list is empty
411                     // Alloc from global free list
412                 retry:
413                     pItem = m_pGlobalFreeHead.load( atomics::memory_order_acquire );
414                     do {
415                         if ( !pItem ) {
416                             allocNewBlock();
417                             goto retry;
418                         }
419                         // pItem is changed by compare_exchange_weak
420                     } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ));
421
422                 success:
423                     CDS_STRICT_DO( pItem->m_pNextFree = nullptr );
424                     return *pItem;
425                 }
426
427                 /// Allocates and initializes new retired pointer
428                 retired_ptr_node& alloc( const retired_ptr& p )
429                 {
430                     retired_ptr_node& node = alloc();
431                     node.m_ptr = p;
432                     return node;
433                 }
434
435                 /// Places the list (pHead, pTail) of retired pointers to pool (frees retired pointers)
436                 /**
437                     The list is linked on the m_pNextFree field
438                 */
439                 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail ) CDS_NOEXCEPT
440                 {
441                     assert( pHead != nullptr );
442                     assert( pTail != nullptr );
443
444                     unsigned int nEpoch;
445                     item * pCurHead;
446                     do {
447                         pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
448                         pTail->m_pNextFree = pCurHead;
449                     } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
450                 }
451             };
452
453             /// Uninitialized guard
454             class guard
455             {
456                 friend class dhp::ThreadGC;
457             protected:
458                 details::guard_data * m_pGuard ;    ///< Pointer to guard data
459
460             public:
461                 /// Initialize empty guard.
462                 CDS_CONSTEXPR guard() CDS_NOEXCEPT
463                     : m_pGuard( nullptr )
464                 {}
465
466                 /// Ã‘opy-ctor is disabled
467                 guard( guard const& ) = delete;
468
469                 /// Move-ctor is disabled
470                 guard( guard&& ) = delete;
471
472                 /// Object destructor, does nothing
473                 ~guard() CDS_NOEXCEPT
474                 {}
475
476                 /// Get current guarded pointer
477                 void * get( atomics::memory_order order = atomics::memory_order_acquire ) const CDS_NOEXCEPT
478                 {
479                     assert( m_pGuard != nullptr );
480                     return m_pGuard->pPost.load( order );
481                 }
482
483                 /// Guards pointer \p p
484                 void set( void * p, atomics::memory_order order = atomics::memory_order_release ) CDS_NOEXCEPT
485                 {
486                     assert( m_pGuard != nullptr );
487                     m_pGuard->pPost.store( p, order );
488                 }
489
490                 /// Clears the guard
491                 void clear( atomics::memory_order order = atomics::memory_order_relaxed ) CDS_NOEXCEPT
492                 {
493                     assert( m_pGuard != nullptr );
494                     m_pGuard->pPost.store( nullptr, order );
495                 }
496
497                 /// Guards pointer \p p
498                 template <typename T>
499                 T * operator =(T * p) CDS_NOEXCEPT
500                 {
501                     set( reinterpret_cast<void *>( const_cast<T *>(p) ));
502                     return p;
503                 }
504
505                 std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
506                 {
507                     clear();
508                     return nullptr;
509                 }
510
511             public: // for ThreadGC.
512                 /*
513                     GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard,
514                     the compiler produces error: \91cds::gc::dhp::details::guard_data* cds::gc::dhp::details::guard::m_pGuard\92 is protected
515                     despite the fact that ThreadGC is declared as friend for guard class.
516                     Therefore, we have to add set_guard/get_guard public functions
517                 */
518                 /// Set guard data
519                 void set_guard( details::guard_data * pGuard ) CDS_NOEXCEPT
520                 {
521                     assert( m_pGuard == nullptr );
522                     m_pGuard = pGuard;
523                 }
524
525                 /// Get current guard data
526                 details::guard_data * get_guard() CDS_NOEXCEPT
527                 {
528                     return m_pGuard;
529                 }
530                 /// Get current guard data
531                 details::guard_data * get_guard() const CDS_NOEXCEPT
532                 {
533                     return m_pGuard;
534                 }
535
536                 details::guard_data * release_guard() CDS_NOEXCEPT
537                 {
538                     details::guard_data * p = m_pGuard;
539                     m_pGuard = nullptr;
540                     return p;
541                 }
542
543                 bool is_initialized() const
544                 {
545                     return m_pGuard != nullptr;
546                 }
547             };
548
549         } // namespace details
550
551         /// Guard
552         /**
553             This class represents auto guard: ctor allocates a guard from guard pool,
554             dtor returns the guard back to the pool of free guard.
555         */
556         class Guard: public details::guard
557         {
558             typedef details::guard    base_class;
559             friend class ThreadGC;
560         public:
561             /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
562             Guard(); // inline in dhp_impl.h
563
564             /// Returns guard allocated back to pool of free guards
565             ~Guard();    // inline in dhp_impl.h
566
567             /// Guards pointer \p p
568             template <typename T>
569             T * operator =(T * p) CDS_NOEXCEPT
570             {
571                 return base_class::operator =<T>( p );
572             }
573
574             std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
575             {
576                 return base_class::operator =(nullptr);
577             }
578         };
579
580         /// Array of guards
581         /**
582             This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
583             dtor returns the guards allocated back to the pool.
584         */
585         template <size_t Count>
586         class GuardArray
587         {
588             details::guard      m_arr[Count]    ;    ///< array of guard
589             const static size_t c_nCapacity = Count ;   ///< Array capacity (equal to \p Count template parameter)
590
591         public:
592             /// Rebind array for other size \p OtherCount
593             template <size_t OtherCount>
594             struct rebind {
595                 typedef GuardArray<OtherCount>  other   ;   ///< rebinding result
596             };
597
598         public:
599             /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
600             GuardArray();    // inline in dhp_impl.h
601
602             /// The object is not copy-constructible
603             GuardArray( GuardArray const& ) = delete;
604
605             /// The object is not move-constructible
606             GuardArray( GuardArray&& ) = delete;
607
608             /// Returns guards allocated back to pool
609             ~GuardArray();    // inline in dh_impl.h
610
611             /// Returns the capacity of array
612             CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
613             {
614                 return c_nCapacity;
615             }
616
617             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
618             details::guard& operator []( size_t nIndex ) CDS_NOEXCEPT
619             {
620                 assert( nIndex < capacity() );
621                 return m_arr[nIndex];
622             }
623
624             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
625             const details::guard& operator []( size_t nIndex ) const CDS_NOEXCEPT
626             {
627                 assert( nIndex < capacity() );
628                 return m_arr[nIndex];
629             }
630
631             /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
632             template <typename T>
633             void set( size_t nIndex, T * p ) CDS_NOEXCEPT
634             {
635                 assert( nIndex < capacity() );
636                 m_arr[nIndex].set( p );
637             }
638
639             /// Clears (sets to \p nullptr) the guard \p nIndex
640             void clear( size_t nIndex ) CDS_NOEXCEPT
641             {
642                 assert( nIndex < capacity() );
643                 m_arr[nIndex].clear();
644             }
645
646             /// Clears all guards in the array
647             void clearAll() CDS_NOEXCEPT
648             {
649                 for ( size_t i = 0; i < capacity(); ++i )
650                     clear(i);
651             }
652         };
653
654         /// Memory manager (Garbage collector)
655         class CDS_EXPORT_API GarbageCollector
656         {
657         private:
658             friend class ThreadGC;
659
660             /// Internal GC statistics
661             struct internal_stat
662             {
663                 atomics::atomic<size_t>  m_nGuardCount       ;   ///< Total guard count
664                 atomics::atomic<size_t>  m_nFreeGuardCount   ;   ///< Count of free guard
665
666                 internal_stat()
667                     : m_nGuardCount(0)
668                     , m_nFreeGuardCount(0)
669                 {}
670             };
671
672         public:
673             /// Exception "No GarbageCollector object is created"
674             CDS_DECLARE_EXCEPTION( DHPManagerEmpty, "Global DHP GarbageCollector is NULL" );
675
676             /// Internal GC statistics
677             struct InternalState
678             {
679                 size_t m_nGuardCount       ;   ///< Total guard count
680                 size_t m_nFreeGuardCount   ;   ///< Count of free guard
681
682                 InternalState()
683                     : m_nGuardCount(0)
684                     , m_nFreeGuardCount(0)
685                 {}
686
687                 InternalState& operator =( internal_stat const& s )
688                 {
689                     m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
690                     m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
691
692                     return *this;
693                 }
694             };
695
696         private:
697             static GarbageCollector * m_pManager    ;   ///< GC global instance
698
699             details::guard_allocator<>      m_GuardPool         ;   ///< Guard pool
700             details::retired_ptr_pool<>     m_RetiredAllocator  ;   ///< Pool of free retired pointers
701             details::retired_ptr_buffer     m_RetiredBuffer     ;   ///< Retired pointer buffer for liberating
702
703             atomics::atomic<size_t>      m_nLiberateThreshold;   ///< Max size of retired pointer buffer to call \p scan()
704             const size_t    m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
705
706             internal_stat   m_stat  ;   ///< Internal statistics
707             bool            m_bStatEnabled  ;   ///< Internal Statistics enabled
708
709         public:
710             /// Initializes DHP memory manager singleton
711             /**
712                 This member function creates and initializes DHP global object.
713                 The function should be called before using CDS data structure based on cds::gc::DHP GC. Usually,
714                 this member function is called in the \p main() function. See cds::gc::dhp for example.
715                 After calling of this function you may use CDS data structures based on cds::gc::DHP.
716
717                 \par Parameters
718                 \li \p nLiberateThreshold - \p scan() threshold. When count of retired pointers reaches this value,
719                     the \ref dhp_gc_liberate "scan()" member function would be called for freeing retired pointers.
720                     If \p nLiberateThreshold <= 1, \p scan() would called after each \ref dhp_gc_retirePtr "retirePtr" call.
721                 \li \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
722                     is initialized the GC allocates local guard pool for the thread from common guard pool.
723                     By perforce the local thread's guard pool is grown automatically from common pool.
724                     When the thread terminated its guard pool is backed to common GC's pool.
725
726             */
727             static void CDS_STDCALL Construct(
728                 size_t nLiberateThreshold = 1024
729                 , size_t nInitialThreadGuardCount = 8
730             );
731
732             /// Destroys DHP memory manager
733             /**
734                 The member function destroys DHP global object. After calling of this function you may \b NOT
735                 use CDS data structures based on cds::gc::DHP. Usually, the \p Destruct function is called
736                 at the end of your \p main(). See cds::gc::dhp for example.
737             */
738             static void CDS_STDCALL Destruct();
739
740             /// Returns pointer to GarbageCollector instance
741             /**
742                 If DHP GC is not initialized, \p DHPManagerEmpty exception is thrown
743             */
744             static GarbageCollector&   instance()
745             {
746                 if ( m_pManager == nullptr )
747                     throw DHPManagerEmpty();
748                 return *m_pManager;
749             }
750
751             /// Checks if global GC object is constructed and may be used
752             static bool isUsed() CDS_NOEXCEPT
753             {
754                 return m_pManager != nullptr;
755             }
756
757         public:
758             //@{
759             /// Internal interface
760
761             /// Allocates a guard
762             details::guard_data * allocGuard()
763             {
764                 return m_GuardPool.alloc();
765             }
766
767             /// Frees guard \p g for reusing in future
768             void freeGuard(details::guard_data * pGuard )
769             {
770                 m_GuardPool.free( pGuard );
771             }
772
773             /// Allocates guard list for a thread.
774             details::guard_data * allocGuardList( size_t nCount )
775             {
776                 return m_GuardPool.allocList( nCount );
777             }
778
779             /// Frees thread's guard list pointed by \p pList
780             void freeGuardList( details::guard_data * pList )
781             {
782                 m_GuardPool.freeList( pList );
783             }
784
785             /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
786             /**@anchor dhp_gc_retirePtr
787             */
788             template <typename T>
789             void retirePtr( T * p, void (* pFunc)(T *) )
790             {
791                 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
792             }
793
794             /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
795             void retirePtr( retired_ptr const& p )
796             {
797                 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
798                     scan();
799             }
800
801         protected:
802             /// Liberate function
803             /** @anchor dhp_gc_liberate
804                 The main function of Dynamic Hazard Pointer algorithm. It tries to free retired pointers if they are not
805                 trapped by any guard.
806             */
807             void scan();
808             //@}
809
810         public:
811             /// Get internal statistics
812             InternalState& getInternalState(InternalState& stat) const
813             {
814                 return stat = m_stat;
815             }
816
817             /// Checks if internal statistics enabled
818             bool              isStatisticsEnabled() const
819             {
820                 return m_bStatEnabled;
821             }
822
823             /// Enables/disables internal statistics
824             bool  enableStatistics( bool bEnable )
825             {
826                 bool bEnabled = m_bStatEnabled;
827                 m_bStatEnabled = bEnable;
828                 return bEnabled;
829             }
830
831         private:
832             GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount );
833             ~GarbageCollector();
834         };
835
836         /// Thread GC
837         /**
838             To use Dynamic Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class
839             that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
840             on the start of each thread that uses DHP GC. Before terminating the thread linked to DHP GC it is necessary to call
841             \ref cds_threading "cds::threading::Manager::detachThread()".
842
843             The ThreadGC object maintains two list:
844             \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
845             \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
846             Free guard list is a subset of thread guard list.
847         */
848         class ThreadGC
849         {
850             GarbageCollector&   m_gc    ;   ///< reference to GC singleton
851             details::guard_data *    m_pList ;   ///< Local list of guards owned by the thread
852             details::guard_data *    m_pFree ;   ///< The list of free guard from m_pList
853
854         public:
855             /// Default constructor
856             ThreadGC()
857                 : m_gc( GarbageCollector::instance() )
858                 , m_pList( nullptr )
859                 , m_pFree( nullptr )
860             {}
861
862             /// The object is not copy-constructible
863             ThreadGC( ThreadGC const& ) = delete;
864
865             /// Dtor calls fini()
866             ~ThreadGC()
867             {
868                 fini();
869             }
870
871             /// Initialization. Repeat call is available
872             void init()
873             {
874                 if ( !m_pList ) {
875                     m_pList =
876                         m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
877                 }
878             }
879
880             /// Finalization. Repeat call is available
881             void fini()
882             {
883                 if ( m_pList ) {
884                     m_gc.freeGuardList( m_pList );
885                     m_pList =
886                         m_pFree = nullptr;
887                 }
888             }
889
890         public:
891             /// Initializes guard \p g
892             void allocGuard( dhp::details::guard& g )
893             {
894                 assert( m_pList != nullptr );
895                 if ( !g.m_pGuard ) {
896                     if ( m_pFree ) {
897                         g.m_pGuard = m_pFree;
898                         m_pFree = m_pFree->pNextFree.load( atomics::memory_order_relaxed );
899                     }
900                     else {
901                         g.m_pGuard = m_gc.allocGuard();
902                         g.m_pGuard->pThreadNext = m_pList;
903                         m_pList = g.m_pGuard;
904                     }
905                 }
906             }
907
908             /// Frees guard \p g
909             void freeGuard( dhp::details::guard& g )
910             {
911                 assert( m_pList != nullptr );
912                 if ( g.m_pGuard ) {
913                     g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
914                     g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
915                     m_pFree = g.m_pGuard;
916                     g.m_pGuard = nullptr;
917                 }
918             }
919
920             /// Initializes guard array \p arr
921             template <size_t Count>
922             void allocGuard( GuardArray<Count>& arr )
923             {
924                 assert( m_pList != nullptr );
925                 size_t nCount = 0;
926
927                 while ( m_pFree && nCount < Count ) {
928                     arr[nCount].set_guard( m_pFree );
929                     m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
930                     ++nCount;
931                 }
932
933                 while ( nCount < Count ) {
934                     details::guard& g = arr[nCount++];
935                     g.set_guard( m_gc.allocGuard() );
936                     g.get_guard()->pThreadNext = m_pList;
937                     m_pList = g.get_guard();
938                 }
939             }
940
941             /// Frees guard array \p arr
942             template <size_t Count>
943             void freeGuard( GuardArray<Count>& arr )
944             {
945                 assert( m_pList != nullptr );
946
947                 details::guard_data * pGuard;
948                 for ( size_t i = 0; i < Count - 1; ++i ) {
949                     pGuard = arr[i].get_guard();
950                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
951                     pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
952                 }
953                 pGuard = arr[Count-1].get_guard();
954                 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
955                 pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
956                 m_pFree = arr[0].get_guard();
957             }
958
959             /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
960             template <typename T>
961             void retirePtr( T * p, void (* pFunc)(T *) )
962             {
963                 m_gc.retirePtr( p, pFunc );
964             }
965
966             void scan()
967             {
968                 m_gc.scan();
969             }
970         };
971     }   // namespace dhp
972 }}  // namespace cds::gc
973 //@endcond
974
975 #if CDS_COMPILER == CDS_COMPILER_MSVC
976 #   pragma warning(pop)
977 #endif
978
979 #endif // #ifndef __CDS_GC_DETAILS_DHP_H