7cf050dc8ffde7e3468b2c8622e44525746cbe3d
[libcds.git] / cds / gc / dhp / dhp.h
1 //$$CDS-header$$
2
3 #ifndef __CDS_GC_DHP_DHP_H
4 #define __CDS_GC_DHP_DHP_H
5
6 #include <mutex>        // unique_lock
7 #include <cds/cxx11_atomic.h>
8 #include <cds/gc/details/retired_ptr.h>
9 #include <cds/details/aligned_allocator.h>
10 #include <cds/details/allocator.h>
11 #include <cds/lock/spinlock.h>
12
13 #if CDS_COMPILER == CDS_COMPILER_MSVC
14 #   pragma warning(push)
15 #   pragma warning(disable:4251)    // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
16 #endif
17
18 namespace cds { namespace gc {
19
20     /// Dynamic Hazard Pointer reclamation schema
21     /**
22         The cds::gc::dhp namespace and its members are internal representation of the GC and should not be used directly.
23         Use cds::gc::DHP class in your code.
24
25         Dynamic Hazard Pointer (DHP) garbage collector is a singleton. The main user-level part of DHP schema is
26         GC class and its nested classes. Before use any DHP-related class you must initialize DHP garbage collector
27         by contructing cds::gc::DHP object in beginning of your main().
28         See cds::gc::DHP class for explanation.
29
30         \par Implementation issues
31             The global list of free guards (\p cds::gc::dhp::details::guard_allocator) is protected by a spin-lock (i.e. serialized).
32             It seems that this solution should not introduce significant performance bottleneck, because each thread has its own set
33             of guards allocated from the global list of free guards and the access to the global list is occurred only when
34             all thread's guard is busy. In this case the thread allocates a next block of guards from the global list.
35             Guards allocated for the thread is push back to the global list only when the thread terminates.
36     */
37     namespace dhp {
38
39         // Forward declarations
40         class Guard;
41         template <size_t Count> class GuardArray;
42         class ThreadGC;
43         class GarbageCollector;
44
45         /// Retired pointer type
46         typedef cds::gc::details::retired_ptr retired_ptr;
47
48         using cds::gc::details::free_retired_ptr_func;
49
50         /// Details of Dynamic Hazard Pointer algorithm
51         namespace details {
52
53             // Forward declaration
54             class liberate_set;
55
56             /// Retired pointer buffer node
57             struct retired_ptr_node {
58                 retired_ptr         m_ptr   ;   ///< retired pointer
59                 retired_ptr_node *  m_pNext     ;   ///< next retired pointer in buffer
60                 retired_ptr_node *  m_pNextFree ;   ///< next item in free list of retired_ptr_node
61             };
62
63             /// Internal guard representation
64             struct guard_data {
65                 typedef retired_ptr_node *      handoff_ptr ;   ///< trapped value type
66                 typedef void *  guarded_ptr  ;   ///< type of value guarded
67
68                 atomics::atomic<guarded_ptr>         pPost   ;   ///< pointer guarded
69
70 #if 0
71                 typedef cds::SpinLock   handoff_spin ;  ///< type of spin-lock for accessing to \p pHandOff field
72                 handoff_spin    spinHandOff     ;   ///< access to \p pHandOff field
73                 handoff_ptr     pHandOff        ;   ///< trapped pointer
74 #endif
75
76                 atomics::atomic<guard_data *>     pGlobalNext ;   ///< next item of global list of allocated guards
77                 atomics::atomic<guard_data *>     pNextFree   ;   ///< pointer to the next item in global or thread-local free-list
78
79                 guard_data *             pThreadNext ;   ///< next item of thread's local list of guards
80
81                 //@cond
82                 guard_data()
83                     : pPost( nullptr )
84 #if 0
85                     , pHandOff( nullptr )
86 #endif
87                     , pGlobalNext( nullptr )
88                     , pNextFree( nullptr )
89                     , pThreadNext( nullptr )
90                 {}
91
92                 void init()
93                 {
94                     pPost.store( nullptr, atomics::memory_order_relaxed );
95                 }
96                 //@endcond
97
98                 /// Checks if the guard is free, that is, it does not contain any pointer guarded
99                 bool isFree() const
100                 {
101                     return pPost.load( atomics::memory_order_acquire ) == nullptr;
102                 }
103             };
104
105             /// Guard allocator
106             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
107             class guard_allocator
108             {
109                 cds::details::Allocator<details::guard_data>  m_GuardAllocator    ;   ///< guard allocator
110
111                 atomics::atomic<guard_data *>    m_GuardList ;       ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
112                 atomics::atomic<guard_data *>    m_FreeGuardList ;   ///< Head of free guard list (linked by guard_data::pNextFree field)
113                 SpinLock                m_freeListLock  ;   ///< Access to m_FreeGuardList
114
115                 /*
116                     Unfortunately, access to the list of free guard is lock-based.
117                     Lock-free manipulations with guard free-list are ABA-prone.
118                     TODO: working with m_FreeGuardList in lock-free manner.
119                 */
120
121             private:
122                 /// Allocates new guard from the heap. The function uses aligned allocator
123                 guard_data * allocNew()
124                 {
125                     //TODO: the allocator should make block allocation
126
127                     details::guard_data * pGuard = m_GuardAllocator.New();
128
129                     // Link guard to the list
130                     // m_GuardList is accumulated list and it cannot support concurrent deletion,
131                     // so, ABA problem is impossible for it
132                     details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
133                     do {
134                         pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
135                         // pHead is changed by compare_exchange_weak
136                     } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
137
138                     pGuard->init();
139                     return pGuard;
140                 }
141
142             public:
143                 // Default ctor
144                 guard_allocator()
145                     : m_GuardList( nullptr )
146                     , m_FreeGuardList( nullptr )
147                 {}
148
149                 // Destructor
150                 ~guard_allocator()
151                 {
152                     guard_data * pNext;
153                     for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
154                         pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
155                         m_GuardAllocator.Delete( pData );
156                     }
157                 }
158
159                 /// Allocates a guard from free list or from heap if free list is empty
160                 guard_data * alloc()
161                 {
162                     // Try to pop a guard from free-list
163                     details::guard_data * pGuard;
164
165                     {
166                         std::unique_lock<SpinLock> al( m_freeListLock );
167                         pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
168                         if ( pGuard )
169                             m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
170                     }
171                     if ( !pGuard )
172                         return allocNew();
173
174                     pGuard->init();
175                     return pGuard;
176                 }
177
178                 /// Frees guard \p pGuard
179                 /**
180                     The function places the guard \p pGuard into free-list
181                 */
182                 void free( guard_data * pGuard )
183                 {
184                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
185
186                     std::unique_lock<SpinLock> al( m_freeListLock );
187                     pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
188                     m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
189                 }
190
191                 /// Allocates list of guard
192                 /**
193                     The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
194
195                     cds::gc::dhp::ThreadGC supporting method
196                 */
197                 guard_data * allocList( size_t nCount )
198                 {
199                     assert( nCount != 0 );
200
201                     guard_data * pHead;
202                     guard_data * pLast;
203
204                     pHead =
205                         pLast = alloc();
206
207                     // The guard list allocated is private for the thread,
208                     // so, we can use relaxed memory order
209                     while ( --nCount ) {
210                         guard_data * p = alloc();
211                         pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
212                         pLast = p;
213                     }
214
215                     pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
216
217                     return pHead;
218                 }
219
220                 /// Frees list of guards
221                 /**
222                     The list \p pList is linked by guard's \p pThreadNext field.
223
224                     cds::gc::dhp::ThreadGC supporting method
225                 */
226                 void freeList( guard_data * pList )
227                 {
228                     assert( pList != nullptr );
229
230                     guard_data * pLast = pList;
231                     while ( pLast->pThreadNext ) {
232                         pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
233                         guard_data * p;
234                         pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
235                         pLast = p;
236                     }
237
238                     std::unique_lock<SpinLock> al( m_freeListLock );
239                     pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
240                     m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
241                 }
242
243                 /// Returns the list's head of guards allocated
244                 guard_data * begin()
245                 {
246                     return m_GuardList.load(atomics::memory_order_acquire);
247                 }
248             };
249
250             /// Retired pointer buffer
251             /**
252                 The buffer of retired nodes ready for liberating.
253                 When size of buffer exceeds a threshold the GC calls \p liberate procedure to free
254                 retired nodes.
255             */
256             class retired_ptr_buffer
257             {
258                 atomics::atomic<retired_ptr_node *>  m_pHead     ;   ///< head of buffer
259                 atomics::atomic<size_t>              m_nItemCount;   ///< buffer's item count
260
261             public:
262                 //@cond
263                 retired_ptr_buffer()
264                     : m_pHead( nullptr )
265                     , m_nItemCount(0)
266                 {}
267
268                 ~retired_ptr_buffer()
269                 {
270                     assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
271                 }
272                 //@endcond
273
274                 /// Pushes new node into the buffer. Returns current buffer size
275                 size_t push( retired_ptr_node& node )
276                 {
277                     retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
278                     do {
279                         node.m_pNext = pHead;
280                         // pHead is changed by compare_exchange_weak
281                     } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
282
283                     return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
284                 }
285
286                 /// Result of \ref dhp_gc_privatve "privatize" function.
287                 /**
288                     The \p privatize function returns retired node list as \p first and the size of that list as \p second.
289                 */
290                 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
291
292                 /// Gets current list of retired pointer and clears the list
293                 /**@anchor dhp_gc_privatve
294                 */
295                 privatize_result privatize()
296                 {
297                     privatize_result res;
298                     res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
299
300                     // Item counter is needed only as a threshold for liberate function
301                     // So, we may clear the item counter without synchronization with m_pHead
302                     res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
303                     return res;
304                 }
305
306                 /// Returns current size of buffer (approximate)
307                 size_t size() const
308                 {
309                     return m_nItemCount.load(atomics::memory_order_relaxed);
310                 }
311             };
312
313             /// Pool of retired pointers
314             /**
315                 The class acts as an allocator of retired node.
316                 Retired pointers are linked in the lock-free list.
317             */
318             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
319             class retired_ptr_pool {
320                 /// Pool item
321                 typedef retired_ptr_node    item;
322
323                 /// Count of items in block
324                 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
325
326                 /// Pool block
327                 struct block {
328                     block *     pNext   ;   ///< next block
329                     item        items[m_nItemPerBlock]  ;   ///< item array
330                 };
331
332                 atomics::atomic<block *> m_pBlockListHead    ;   ///< head of of allocated block list
333
334                 // To solve ABA problem we use epoch-based approach
335                 static const unsigned int c_nEpochCount = 4     ;   ///< Max epoch count
336                 atomics::atomic<unsigned int>    m_nCurEpoch ;   ///< Current epoch
337                 atomics::atomic<item *>  m_pEpochFree[c_nEpochCount]  ;   ///< List of free item per epoch
338                 atomics::atomic<item *>  m_pGlobalFreeHead   ;   ///< Head of unallocated item list
339
340                 cds::details::Allocator< block, Alloc > m_BlockAllocator    ;   ///< block allocator
341
342             private:
343                 //@cond
344                 void allocNewBlock()
345                 {
346                     // allocate new block
347                     block * pNew = m_BlockAllocator.New();
348
349                     // link items within the block
350                     item * pLastItem = pNew->items + m_nItemPerBlock - 1;
351                     for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
352                         pItem->m_pNextFree = pItem + 1;
353                         CDS_STRICT_DO( pItem->m_pNext = nullptr );
354                     }
355
356                     // link new block to block list
357                     {
358                         block * pHead = m_pBlockListHead.load(atomics::memory_order_acquire);
359                         do {
360                             pNew->pNext = pHead;
361                             // pHead is changed by compare_exchange_weak
362                         } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_relaxed ));
363                     }
364
365                     // link block's items to free list
366                     {
367                         item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_acquire);
368                         do {
369                             pLastItem->m_pNextFree = pHead;
370                             // pHead is changed by compare_exchange_weak
371                         } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
372                     }
373                 }
374
375                 unsigned int current_epoch() const
376                 {
377                     return m_nCurEpoch.load(atomics::memory_order_acquire) & (c_nEpochCount - 1);
378                 }
379                 unsigned int next_epoch() const
380                 {
381                     return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & (c_nEpochCount - 1);
382                 }
383                 //@endcond
384
385             public:
386                 //@cond
387                 retired_ptr_pool()
388                     : m_pBlockListHead( nullptr )
389                     , m_nCurEpoch(0)
390                     , m_pGlobalFreeHead( nullptr )
391                 {
392                     for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i )
393                         m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
394
395                     allocNewBlock();
396                 }
397
398                 ~retired_ptr_pool()
399                 {
400                     block * p;
401                     for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
402                         p = pBlock->pNext;
403                         m_BlockAllocator.Delete( pBlock );
404                     }
405                 }
406
407                 /// Increments current epoch
408                 void inc_epoch()
409                 {
410                     m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
411                 }
412
413                 //@endcond
414
415                 /// Allocates new retired pointer
416                 retired_ptr_node&  alloc()
417                 {
418                     unsigned int nEpoch;
419                     item * pItem;
420                     for (;;) {
421                         pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
422                         if ( !pItem )
423                             goto retry;
424                         if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
425                             goto success;
426                     }
427
428                     /*
429                     item * pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
430                     while ( pItem ) {
431                         if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
432                             goto success;
433                     }
434                     */
435
436                     // Epoch free list is empty
437                     // Alloc from global free list
438                 retry:
439                     pItem = m_pGlobalFreeHead.load( atomics::memory_order_acquire );
440                     do {
441                         if ( !pItem ) {
442                             allocNewBlock();
443                             goto retry;
444                         }
445                         // pItem is changed by compare_exchange_weak
446                     } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ));
447
448                 success:
449                     CDS_STRICT_DO( pItem->m_pNextFree = nullptr );
450                     return *pItem;
451                 }
452
453                 /// Allocates and initializes new retired pointer
454                 retired_ptr_node& alloc( const retired_ptr& p )
455                 {
456                     retired_ptr_node& node = alloc();
457                     node.m_ptr = p;
458                     return node;
459                 }
460
461                 /// Places the list (pHead, pTail) of retired pointers to pool (frees retired pointers)
462                 /**
463                     The list is linked on the m_pNextFree field
464                 */
465                 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail )
466                 {
467                     assert( pHead != nullptr );
468                     assert( pTail != nullptr );
469
470                     unsigned int nEpoch;
471                     item * pCurHead;
472                     do {
473                         pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
474                         pTail->m_pNextFree = pCurHead;
475                     } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
476                 }
477             };
478
479             /// Uninitialized guard
480             class guard
481             {
482                 friend class ThreadGC;
483             protected:
484                 details::guard_data * m_pGuard ;    ///< Pointer to guard data
485             public:
486                 /// Initialize empty guard.
487                 guard()
488                     : m_pGuard( nullptr )
489                 {}
490
491                 /// The object is not copy-constructible
492                 guard( guard const& ) = delete;
493
494                 /// Object destructor, does nothing
495                 ~guard()
496                 {}
497
498                 /// Guards pointer \p p
499                 void set( void * p )
500                 {
501                     assert( m_pGuard != nullptr );
502                     m_pGuard->pPost.store( p, atomics::memory_order_release );
503                     //CDS_COMPILER_RW_BARRIER;
504                 }
505
506                 /// Clears the guard
507                 void clear()
508                 {
509                     assert( m_pGuard != nullptr );
510                     m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
511                     CDS_STRICT_DO( CDS_COMPILER_RW_BARRIER );
512                 }
513
514                 /// Guards pointer \p p
515                 template <typename T>
516                 T * operator =( T * p )
517                 {
518                     set( reinterpret_cast<void *>( const_cast<T *>(p) ));
519                     return p;
520                 }
521
522                 //@cond
523                 std::nullptr_t operator=(std::nullptr_t)
524                 {
525                     clear();
526                     return nullptr;
527                 }
528                 //@endcond
529
530             public: // for ThreadGC.
531                 /*
532                     GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard,
533                     the compiler produces error: \91cds::gc::dhp::details::guard_data* cds::gc::dhp::details::guard::m_pGuard\92 is protected
534                     despite the fact that ThreadGC is declared as friend for guard class.
535                     We should not like to declare m_pGuard member as public one.
536                     Therefore, we have to add set_guard/get_guard public functions
537                 */
538                 /// Set guard data
539                 void set_guard( details::guard_data * pGuard )
540                 {
541                     assert( m_pGuard == nullptr );
542                     m_pGuard = pGuard;
543                 }
544
545                 /// Get current guard data
546                 details::guard_data * get_guard()
547                 {
548                     return m_pGuard;
549                 }
550                 /// Get current guard data
551                 details::guard_data * get_guard() const
552                 {
553                     return m_pGuard;
554                 }
555             };
556
557         } // namespace details
558
559         /// Guard
560         /**
561             This class represents auto guard: ctor allocates a guard from guard pool,
562             dtor returns the guard back to the pool of free guard.
563         */
564         class Guard: public details::guard
565         {
566             //@cond
567             typedef details::guard    base_class;
568             friend class ThreadGC;
569             //@endcond
570
571             ThreadGC&    m_gc    ;    ///< ThreadGC object of current thread
572         public:
573             /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
574             Guard(ThreadGC& gc);
575
576             /// Returns guard allocated back to pool of free guards
577             ~Guard();    // inline after GarbageCollector
578
579             /// Returns DHP GC object
580             ThreadGC& getGC()
581             {
582                 return m_gc;
583             }
584
585             /// Guards pointer \p p
586             template <typename T>
587             T * operator =( T * p )
588             {
589                 return base_class::operator =<T>( p );
590             }
591
592             //@cond
593             std::nullptr_t operator=(std::nullptr_t)
594             {
595                 return base_class::operator =(nullptr);
596             }
597             //@endcond
598         };
599
600         /// Array of guards
601         /**
602             This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
603             dtor returns the guards allocated back to the pool.
604         */
605         template <size_t Count>
606         class GuardArray
607         {
608             details::guard      m_arr[Count]    ;    ///< array of guard
609             ThreadGC&           m_gc    ;            ///< ThreadGC object of current thread
610             const static size_t c_nCapacity = Count ;   ///< Array capacity (equal to \p Count template parameter)
611
612         public:
613             /// Rebind array for other size \p OtherCount
614             template <size_t OtherCount>
615             struct rebind {
616                 typedef GuardArray<OtherCount>  other   ;   ///< rebinding result
617             };
618
619         public:
620             /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
621             GuardArray( ThreadGC& gc )    ;    // inline below
622
623             /// The object is not default-constructible
624             GuardArray() = delete;
625
626             /// The object is not copy-constructible
627             GuardArray( GuardArray const& ) = delete;
628
629             /// Returns guards allocated back to pool
630             ~GuardArray()    ;    // inline below
631
632             /// Returns the capacity of array
633             CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
634             {
635                 return c_nCapacity;
636             }
637
638             /// Returns DHP ThreadGC object
639             ThreadGC& getGC() CDS_NOEXCEPT
640             {
641                 return m_gc;
642             }
643
644             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
645             details::guard& operator []( size_t nIndex )
646             {
647                 assert( nIndex < capacity() );
648                 return m_arr[nIndex];
649             }
650
651             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
652             const details::guard& operator []( size_t nIndex ) const
653             {
654                 assert( nIndex < capacity() );
655                 return m_arr[nIndex];
656             }
657
658             /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
659             template <typename T>
660             void set( size_t nIndex, T * p )
661             {
662                 assert( nIndex < capacity() );
663                 m_arr[nIndex].set( p );
664             }
665
666             /// Clears (sets to \p nullptr) the guard \p nIndex
667             void clear( size_t nIndex )
668             {
669                 assert( nIndex < capacity() );
670                 m_arr[nIndex].clear();
671             }
672
673             /// Clears all guards in the array
674             void clearAll()
675             {
676                 for ( size_t i = 0; i < capacity(); ++i )
677                     clear(i);
678             }
679         };
680
681         /// Memory manager (Garbage collector)
682         class CDS_EXPORT_API GarbageCollector
683         {
684         private:
685             //@cond
686             friend class ThreadGC;
687
688             /// Internal GC statistics
689             struct internal_stat
690             {
691                 atomics::atomic<size_t>  m_nGuardCount       ;   ///< Total guard count
692                 atomics::atomic<size_t>  m_nFreeGuardCount   ;   ///< Count of free guard
693
694                 internal_stat()
695                     : m_nGuardCount(0)
696                     , m_nFreeGuardCount(0)
697                 {}
698             };
699             //@endcond
700
701         public:
702             /// Exception "No GarbageCollector object is created"
703             CDS_DECLARE_EXCEPTION( DHPManagerEmpty, "Global DHP GarbageCollector is NULL" );
704
705             /// Internal GC statistics
706             struct InternalState
707             {
708                 size_t m_nGuardCount       ;   ///< Total guard count
709                 size_t m_nFreeGuardCount   ;   ///< Count of free guard
710
711                 //@cond
712                 InternalState()
713                     : m_nGuardCount(0)
714                     , m_nFreeGuardCount(0)
715                 {}
716
717                 InternalState& operator =( internal_stat const& s )
718                 {
719                     m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
720                     m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
721
722                     return *this;
723                 }
724                 //@endcond
725             };
726
727         private:
728             static GarbageCollector * m_pManager    ;   ///< GC global instance
729
730             details::guard_allocator<>      m_GuardPool         ;   ///< Guard pool
731             details::retired_ptr_pool<>     m_RetiredAllocator  ;   ///< Pool of free retired pointers
732             details::retired_ptr_buffer     m_RetiredBuffer     ;   ///< Retired pointer buffer for liberating
733             //atomics::atomic<size_t>      m_nInLiberate       ;   ///< number of parallel \p liberate fnction call
734
735             atomics::atomic<size_t>      m_nLiberateThreshold;   ///< Max size of retired pointer buffer to call liberate
736             const size_t    m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
737
738             internal_stat   m_stat  ;   ///< Internal statistics
739             bool            m_bStatEnabled  ;   ///< Internal Statistics enabled
740
741         public:
742             /// Initializes DHP memory manager singleton
743             /**
744                 This member function creates and initializes DHP global object.
745                 The function should be called before using CDS data structure based on cds::gc::DHP GC. Usually,
746                 this member function is called in the \p main() function. See cds::gc::dhp for example.
747                 After calling of this function you may use CDS data structures based on cds::gc::DHP.
748
749                 \par Parameters
750                 \li \p nLiberateThreshold - the liberate threshold. When count of retired pointers reaches this value,
751                     the \ref dhp_gc_liberate "liberate" member function would be called for freeing retired pointers.
752                     If \p nLiberateThreshold <= 1, \p liberate would called after each \ref dhp_gc_retirePtr "retirePtr" call.
753                 \li \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
754                     is initialized the GC allocates local guard pool for the thread from common guard pool.
755                     By perforce the local thread's guard pool is grown automatically from common pool.
756                     When the thread terminated its guard pool is backed to common GC's pool.
757
758             */
759             static void CDS_STDCALL Construct(
760                 size_t nLiberateThreshold = 1024
761                 , size_t nInitialThreadGuardCount = 8
762             );
763
764             /// Destroys DHP memory manager
765             /**
766                 The member function destroys DHP global object. After calling of this function you may \b NOT
767                 use CDS data structures based on cds::gc::DHP. Usually, the \p Destruct function is called
768                 at the end of your \p main(). See cds::gc::dhp for example.
769             */
770             static void CDS_STDCALL Destruct();
771
772             /// Returns pointer to GarbageCollector instance
773             /**
774                 If DHP GC is not initialized, \p DHPManagerEmpty exception is thrown
775             */
776             static GarbageCollector&   instance()
777             {
778                 if ( m_pManager == nullptr )
779                     throw DHPManagerEmpty();
780                 return *m_pManager;
781             }
782
783             /// Checks if global GC object is constructed and may be used
784             static bool isUsed() CDS_NOEXCEPT
785             {
786                 return m_pManager != nullptr;
787             }
788
789         public:
790             //@{
791             /// Internal interface
792
793             /// Allocates a guard
794             details::guard_data * allocGuard()
795             {
796                 return m_GuardPool.alloc();
797             }
798
799             /// Frees guard \p g for reusing in future
800             void freeGuard(details::guard_data * pGuard )
801             {
802                 m_GuardPool.free( pGuard );
803             }
804
805             /// Allocates guard list for a thread.
806             details::guard_data * allocGuardList( size_t nCount )
807             {
808                 return m_GuardPool.allocList( nCount );
809             }
810
811             /// Frees thread's guard list pointed by \p pList
812             void freeGuardList( details::guard_data * pList )
813             {
814                 m_GuardPool.freeList( pList );
815             }
816
817             /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
818             /**@anchor dhp_gc_retirePtr
819             */
820             template <typename T>
821             void retirePtr( T * p, void (* pFunc)(T *) )
822             {
823                 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
824             }
825
826             /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
827             void retirePtr( retired_ptr const& p )
828             {
829                 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
830                     liberate();
831             }
832
833         protected:
834             /// Liberate function
835             /** @anchor dhp_gc_liberate
836                 The main function of Dynamic Hazard Pointer algorithm. It tries to free retired pointers if they are not
837                 trapped by any guard.
838             */
839             void liberate();
840
841             //@}
842
843         private:
844             //@cond
845 #if 0
846             void liberate( details::liberate_set& set );
847 #endif
848             //@endcond
849
850         public:
851             /// Get internal statistics
852             InternalState& getInternalState(InternalState& stat) const
853             {
854                 return stat = m_stat;
855             }
856
857             /// Checks if internal statistics enabled
858             bool              isStatisticsEnabled() const
859             {
860                 return m_bStatEnabled;
861             }
862
863             /// Enables/disables internal statistics
864             bool  enableStatistics( bool bEnable )
865             {
866                 bool bEnabled = m_bStatEnabled;
867                 m_bStatEnabled = bEnable;
868                 return bEnabled;
869             }
870
871         private:
872             //@cond none
873             GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount );
874             ~GarbageCollector();
875             //@endcond
876         };
877
878         /// Thread GC
879         /**
880             To use Dynamic Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class
881             that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
882             on the start of each thread that uses DHP GC. Before terminating the thread linked to DHP GC it is necessary to call
883             \ref cds_threading "cds::threading::Manager::detachThread()".
884
885             The ThreadGC object maintains two list:
886             \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
887             \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
888             Free guard list is a subset of thread guard list.
889         */
890         class ThreadGC
891         {
892             GarbageCollector&   m_gc    ;   ///< reference to GC singleton
893             details::guard_data *    m_pList ;   ///< Local list of guards owned by the thread
894             details::guard_data *    m_pFree ;   ///< The list of free guard from m_pList
895
896         public:
897             /// Default constructor
898             ThreadGC()
899                 : m_gc( GarbageCollector::instance() )
900                 , m_pList( nullptr )
901                 , m_pFree( nullptr )
902             {}
903
904             /// The object is not copy-constructible
905             ThreadGC( ThreadGC const& ) = delete;
906
907             /// Dtor calls fini()
908             ~ThreadGC()
909             {
910                 fini();
911             }
912
913             /// Initialization. Repeat call is available
914             void init()
915             {
916                 if ( !m_pList ) {
917                     m_pList =
918                         m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
919                 }
920             }
921
922             /// Finalization. Repeat call is available
923             void fini()
924             {
925                 if ( m_pList ) {
926                     m_gc.freeGuardList( m_pList );
927                     m_pList =
928                         m_pFree = nullptr;
929                 }
930             }
931
932         public:
933             /// Initializes guard \p g
934             void allocGuard( Guard& g )
935             {
936                 assert( m_pList != nullptr );
937                 if ( m_pFree ) {
938                     g.m_pGuard = m_pFree;
939                     m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
940                 }
941                 else {
942                     g.m_pGuard = m_gc.allocGuard();
943                     g.m_pGuard->pThreadNext = m_pList;
944                     m_pList = g.m_pGuard;
945                 }
946             }
947
948             /// Frees guard \p g
949             void freeGuard( Guard& g )
950             {
951                 assert( m_pList != nullptr );
952                 g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
953                 g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
954                 m_pFree = g.m_pGuard;
955             }
956
957             /// Initializes guard array \p arr
958             template <size_t Count>
959             void allocGuard( GuardArray<Count>& arr )
960             {
961                 assert( m_pList != nullptr );
962                 size_t nCount = 0;
963
964                 while ( m_pFree && nCount < Count ) {
965                     arr[nCount].set_guard( m_pFree );
966                     m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
967                     ++nCount;
968                 }
969
970                 while ( nCount < Count ) {
971                     details::guard& g = arr[nCount++];
972                     g.set_guard( m_gc.allocGuard() );
973                     g.get_guard()->pThreadNext = m_pList;
974                     m_pList = g.get_guard();
975                 }
976             }
977
978             /// Frees guard array \p arr
979             template <size_t Count>
980             void freeGuard( GuardArray<Count>& arr )
981             {
982                 assert( m_pList != nullptr );
983
984                 details::guard_data * pGuard;
985                 for ( size_t i = 0; i < Count - 1; ++i ) {
986                     pGuard = arr[i].get_guard();
987                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
988                     pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
989                 }
990                 pGuard = arr[Count-1].get_guard();
991                 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
992                 pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
993                 m_pFree = arr[0].get_guard();
994             }
995
996             /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
997             template <typename T>
998             void retirePtr( T * p, void (* pFunc)(T *) )
999             {
1000                 m_gc.retirePtr( p, pFunc );
1001             }
1002
1003             //@cond
1004             void scan()
1005             {
1006                 m_gc.liberate();
1007             }
1008             //@endcond
1009
1010         };
1011
1012         //////////////////////////////////////////////////////////
1013         // Inlines
1014
1015         inline Guard::Guard(ThreadGC& gc)
1016             : m_gc( gc )
1017         {
1018             getGC().allocGuard( *this );
1019         }
1020         inline Guard::~Guard()
1021         {
1022             getGC().freeGuard( *this );
1023         }
1024
1025         template <size_t Count>
1026         inline GuardArray<Count>::GuardArray( ThreadGC& gc )
1027             : m_gc( gc )
1028         {
1029             getGC().allocGuard( *this );
1030         }
1031         template <size_t Count>
1032         inline GuardArray<Count>::~GuardArray()
1033         {
1034             getGC().freeGuard( *this );
1035         }
1036
1037     }   // namespace dhp
1038 }}  // namespace cds::gc
1039
1040 #if CDS_COMPILER == CDS_COMPILER_MSVC
1041 #   pragma warning(pop)
1042 #endif
1043
1044
1045 #endif // #ifndef __CDS_GC_DHP_DHP_H