Hazard Pointer docfix
[libcds.git] / cds / gc / details / dhp.h
1 //$$CDS-header$$
2
3 #ifndef __CDS_GC_DETAILS_DHP_H
4 #define __CDS_GC_DETAILS_DHP_H
5
6 #include <mutex>        // unique_lock
7 #include <cds/cxx11_atomic.h>
8 #include <cds/gc/details/retired_ptr.h>
9 #include <cds/details/aligned_allocator.h>
10 #include <cds/details/allocator.h>
11 #include <cds/lock/spinlock.h>
12
13 #if CDS_COMPILER == CDS_COMPILER_MSVC
14 #   pragma warning(push)
15 #   pragma warning(disable:4251)    // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
16 #endif
17
18 //@cond
19 namespace cds { namespace gc {
20
21     /// Dynamic Hazard Pointer reclamation schema
22     /**
23         The cds::gc::dhp namespace and its members are internal representation of the GC and should not be used directly.
24         Use cds::gc::DHP class in your code.
25
26         Dynamic Hazard Pointer (DHP) garbage collector is a singleton. The main user-level part of DHP schema is
27         GC class and its nested classes. Before use any DHP-related class you must initialize DHP garbage collector
28         by contructing cds::gc::DHP object in beginning of your main().
29         See cds::gc::DHP class for explanation.
30
31         \par Implementation issues
32             The global list of free guards (\p cds::gc::dhp::details::guard_allocator) is protected by a spin-lock (i.e. serialized).
33             It seems that this solution should not introduce significant performance bottleneck, because each thread has its own set
34             of guards allocated from the global list of free guards and the access to the global list is occurred only when
35             all thread's guard is busy. In this case the thread allocates a next block of guards from the global list.
36             Guards allocated for the thread is push back to the global list only when the thread terminates.
37     */
38     namespace dhp {
39
40         // Forward declarations
41         class Guard;
42         template <size_t Count> class GuardArray;
43         class ThreadGC;
44         class GarbageCollector;
45
46         /// Retired pointer type
47         typedef cds::gc::details::retired_ptr retired_ptr;
48
49         using cds::gc::details::free_retired_ptr_func;
50
51         /// Details of Dynamic Hazard Pointer algorithm
52         namespace details {
53
54             // Forward declaration
55             class liberate_set;
56
57             /// Retired pointer buffer node
58             struct retired_ptr_node {
59                 retired_ptr         m_ptr   ;   ///< retired pointer
60                 retired_ptr_node *  m_pNext     ;   ///< next retired pointer in buffer
61                 retired_ptr_node *  m_pNextFree ;   ///< next item in free list of retired_ptr_node
62             };
63
64             /// Internal guard representation
65             struct guard_data {
66                 typedef retired_ptr_node *      handoff_ptr ;   ///< trapped value type
67                 typedef void *  guarded_ptr  ;   ///< type of value guarded
68
69                 atomics::atomic<guarded_ptr>         pPost   ;   ///< pointer guarded
70
71                 atomics::atomic<guard_data *>     pGlobalNext ;   ///< next item of global list of allocated guards
72                 atomics::atomic<guard_data *>     pNextFree   ;   ///< pointer to the next item in global or thread-local free-list
73
74                 guard_data *             pThreadNext ;   ///< next item of thread's local list of guards
75
76                 guard_data() CDS_NOEXCEPT
77                     : pPost( nullptr )
78                     , pGlobalNext( nullptr )
79                     , pNextFree( nullptr )
80                     , pThreadNext( nullptr )
81                 {}
82
83                 void init() CDS_NOEXCEPT
84                 {
85                     pPost.store( nullptr, atomics::memory_order_relaxed );
86                 }
87
88                 /// Checks if the guard is free, that is, it does not contain any pointer guarded
89                 bool isFree() const CDS_NOEXCEPT
90                 {
91                     return pPost.load( atomics::memory_order_acquire ) == nullptr;
92                 }
93             };
94
95             /// Guard allocator
96             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
97             class guard_allocator
98             {
99                 cds::details::Allocator<details::guard_data>  m_GuardAllocator    ;   ///< guard allocator
100
101                 atomics::atomic<guard_data *>    m_GuardList ;       ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
102                 atomics::atomic<guard_data *>    m_FreeGuardList ;   ///< Head of free guard list (linked by guard_data::pNextFree field)
103                 SpinLock                m_freeListLock  ;   ///< Access to m_FreeGuardList
104
105                 /*
106                     Unfortunately, access to the list of free guard is lock-based.
107                     Lock-free manipulations with guard free-list are ABA-prone.
108                     TODO: working with m_FreeGuardList in lock-free manner.
109                 */
110
111             private:
112                 /// Allocates new guard from the heap. The function uses aligned allocator
113                 guard_data * allocNew()
114                 {
115                     //TODO: the allocator should make block allocation
116
117                     details::guard_data * pGuard = m_GuardAllocator.New();
118
119                     // Link guard to the list
120                     // m_GuardList is accumulated list and it cannot support concurrent deletion,
121                     // so, ABA problem is impossible for it
122                     details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
123                     do {
124                         pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
125                         // pHead is changed by compare_exchange_weak
126                     } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
127
128                     pGuard->init();
129                     return pGuard;
130                 }
131
132             public:
133                 // Default ctor
134                 guard_allocator() CDS_NOEXCEPT
135                     : m_GuardList( nullptr )
136                     , m_FreeGuardList( nullptr )
137                 {}
138
139                 // Destructor
140                 ~guard_allocator()
141                 {
142                     guard_data * pNext;
143                     for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
144                         pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
145                         m_GuardAllocator.Delete( pData );
146                     }
147                 }
148
149                 /// Allocates a guard from free list or from heap if free list is empty
150                 guard_data * alloc()
151                 {
152                     // Try to pop a guard from free-list
153                     details::guard_data * pGuard;
154
155                     {
156                         std::unique_lock<SpinLock> al( m_freeListLock );
157                         pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
158                         if ( pGuard )
159                             m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
160                     }
161                     if ( !pGuard )
162                         return allocNew();
163
164                     pGuard->init();
165                     return pGuard;
166                 }
167
168                 /// Frees guard \p pGuard
169                 /**
170                     The function places the guard \p pGuard into free-list
171                 */
172                 void free( guard_data * pGuard ) CDS_NOEXCEPT
173                 {
174                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
175
176                     std::unique_lock<SpinLock> al( m_freeListLock );
177                     pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
178                     m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
179                 }
180
181                 /// Allocates list of guard
182                 /**
183                     The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
184
185                     cds::gc::dhp::ThreadGC supporting method
186                 */
187                 guard_data * allocList( size_t nCount )
188                 {
189                     assert( nCount != 0 );
190
191                     guard_data * pHead;
192                     guard_data * pLast;
193
194                     pHead =
195                         pLast = alloc();
196
197                     // The guard list allocated is private for the thread,
198                     // so, we can use relaxed memory order
199                     while ( --nCount ) {
200                         guard_data * p = alloc();
201                         pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
202                         pLast = p;
203                     }
204
205                     pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
206
207                     return pHead;
208                 }
209
210                 /// Frees list of guards
211                 /**
212                     The list \p pList is linked by guard's \p pThreadNext field.
213
214                     cds::gc::dhp::ThreadGC supporting method
215                 */
216                 void freeList( guard_data * pList ) CDS_NOEXCEPT
217                 {
218                     assert( pList != nullptr );
219
220                     guard_data * pLast = pList;
221                     while ( pLast->pThreadNext ) {
222                         pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
223                         guard_data * p;
224                         pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
225                         pLast = p;
226                     }
227
228                     std::unique_lock<SpinLock> al( m_freeListLock );
229                     pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
230                     m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
231                 }
232
233                 /// Returns the list's head of guards allocated
234                 guard_data * begin() CDS_NOEXCEPT
235                 {
236                     return m_GuardList.load(atomics::memory_order_acquire);
237                 }
238             };
239
240             /// Retired pointer buffer
241             /**
242                 The buffer of retired nodes ready for liberating.
243                 When size of buffer exceeds a threshold the GC calls \p scan() procedure to free
244                 retired nodes.
245             */
246             class retired_ptr_buffer
247             {
248                 atomics::atomic<retired_ptr_node *>  m_pHead     ;   ///< head of buffer
249                 atomics::atomic<size_t>              m_nItemCount;   ///< buffer's item count
250
251             public:
252                 CDS_CONSTEXPR retired_ptr_buffer() CDS_NOEXCEPT
253                     : m_pHead( nullptr )
254                     , m_nItemCount(0)
255                 {}
256
257                 ~retired_ptr_buffer() CDS_NOEXCEPT
258                 {
259                     assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
260                 }
261
262                 /// Pushes new node into the buffer. Returns current buffer size
263                 size_t push( retired_ptr_node& node ) CDS_NOEXCEPT
264                 {
265                     retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
266                     do {
267                         node.m_pNext = pHead;
268                         // pHead is changed by compare_exchange_weak
269                     } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
270
271                     return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
272                 }
273
274                 /// Result of \ref dhp_gc_privatve "privatize" function.
275                 /**
276                     The \p privatize function returns retired node list as \p first and the size of that list as \p second.
277                 */
278                 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
279
280                 /// Gets current list of retired pointer and clears the list
281                 /**@anchor dhp_gc_privatve
282                 */
283                 privatize_result privatize() CDS_NOEXCEPT
284                 {
285                     privatize_result res;
286                     res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
287
288                     // Item counter is needed only as a threshold for \p scan() function
289                     // So, we may clear the item counter without synchronization with m_pHead
290                     res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
291                     return res;
292                 }
293
294                 /// Returns current size of buffer (approximate)
295                 size_t size() const CDS_NOEXCEPT
296                 {
297                     return m_nItemCount.load(atomics::memory_order_relaxed);
298                 }
299             };
300
301             /// Pool of retired pointers
302             /**
303                 The class acts as an allocator of retired node.
304                 Retired pointers are linked in the lock-free list.
305             */
306             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
307             class retired_ptr_pool {
308                 /// Pool item
309                 typedef retired_ptr_node    item;
310
311                 /// Count of items in block
312                 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
313
314                 /// Pool block
315                 struct block {
316                     block *     pNext   ;   ///< next block
317                     item        items[m_nItemPerBlock]  ;   ///< item array
318                 };
319
320                 atomics::atomic<block *> m_pBlockListHead    ;   ///< head of of allocated block list
321
322                 // To solve ABA problem we use epoch-based approach
323                 static const unsigned int c_nEpochCount = 4     ;   ///< Max epoch count
324                 atomics::atomic<unsigned int>    m_nCurEpoch ;   ///< Current epoch
325                 atomics::atomic<item *>  m_pEpochFree[c_nEpochCount]  ;   ///< List of free item per epoch
326                 atomics::atomic<item *>  m_pGlobalFreeHead   ;   ///< Head of unallocated item list
327
328                 cds::details::Allocator< block, Alloc > m_BlockAllocator    ;   ///< block allocator
329
330             private:
331                 void allocNewBlock()
332                 {
333                     // allocate new block
334                     block * pNew = m_BlockAllocator.New();
335
336                     // link items within the block
337                     item * pLastItem = pNew->items + m_nItemPerBlock - 1;
338                     for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
339                         pItem->m_pNextFree = pItem + 1;
340                         CDS_STRICT_DO( pItem->m_pNext = nullptr );
341                     }
342
343                     // link new block to block list
344                     {
345                         block * pHead = m_pBlockListHead.load(atomics::memory_order_acquire);
346                         do {
347                             pNew->pNext = pHead;
348                             // pHead is changed by compare_exchange_weak
349                         } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_relaxed ));
350                     }
351
352                     // link block's items to free list
353                     {
354                         item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_acquire);
355                         do {
356                             pLastItem->m_pNextFree = pHead;
357                             // pHead is changed by compare_exchange_weak
358                         } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
359                     }
360                 }
361
362                 unsigned int current_epoch() const CDS_NOEXCEPT
363                 {
364                     return m_nCurEpoch.load(atomics::memory_order_acquire) & (c_nEpochCount - 1);
365                 }
366
367                 unsigned int next_epoch() const CDS_NOEXCEPT
368                 {
369                     return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & (c_nEpochCount - 1);
370                 }
371
372             public:
373                 retired_ptr_pool()
374                     : m_pBlockListHead( nullptr )
375                     , m_nCurEpoch(0)
376                     , m_pGlobalFreeHead( nullptr )
377                 {
378                     for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i )
379                         m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
380
381                     allocNewBlock();
382                 }
383
384                 ~retired_ptr_pool()
385                 {
386                     block * p;
387                     for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
388                         p = pBlock->pNext;
389                         m_BlockAllocator.Delete( pBlock );
390                     }
391                 }
392
393                 /// Increments current epoch
394                 void inc_epoch() CDS_NOEXCEPT
395                 {
396                     m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
397                 }
398
399                 /// Allocates new retired pointer
400                 retired_ptr_node&  alloc()
401                 {
402                     unsigned int nEpoch;
403                     item * pItem;
404                     for (;;) {
405                         pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
406                         if ( !pItem )
407                             goto retry;
408                         if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
409                             goto success;
410                     }
411
412                     // Epoch free list is empty
413                     // Alloc from global free list
414                 retry:
415                     pItem = m_pGlobalFreeHead.load( atomics::memory_order_acquire );
416                     do {
417                         if ( !pItem ) {
418                             allocNewBlock();
419                             goto retry;
420                         }
421                         // pItem is changed by compare_exchange_weak
422                     } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ));
423
424                 success:
425                     CDS_STRICT_DO( pItem->m_pNextFree = nullptr );
426                     return *pItem;
427                 }
428
429                 /// Allocates and initializes new retired pointer
430                 retired_ptr_node& alloc( const retired_ptr& p )
431                 {
432                     retired_ptr_node& node = alloc();
433                     node.m_ptr = p;
434                     return node;
435                 }
436
437                 /// Places the list (pHead, pTail) of retired pointers to pool (frees retired pointers)
438                 /**
439                     The list is linked on the m_pNextFree field
440                 */
441                 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail ) CDS_NOEXCEPT
442                 {
443                     assert( pHead != nullptr );
444                     assert( pTail != nullptr );
445
446                     unsigned int nEpoch;
447                     item * pCurHead;
448                     do {
449                         pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
450                         pTail->m_pNextFree = pCurHead;
451                     } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
452                 }
453             };
454
455             /// Uninitialized guard
456             class guard
457             {
458                 friend class ThreadGC;
459             protected:
460                 details::guard_data * m_pGuard ;    ///< Pointer to guard data
461             public:
462                 /// Initialize empty guard.
463                 CDS_CONSTEXPR guard() CDS_NOEXCEPT
464                     : m_pGuard( nullptr )
465                 {}
466
467                 /// The object is not copy-constructible
468                 guard( guard const& ) = delete;
469
470                 /// Object destructor, does nothing
471                 ~guard() CDS_NOEXCEPT
472                 {}
473
474                 /// Guards pointer \p p
475                 void set( void * p ) CDS_NOEXCEPT
476                 {
477                     assert( m_pGuard != nullptr );
478                     m_pGuard->pPost.store( p, atomics::memory_order_release );
479                     //CDS_COMPILER_RW_BARRIER;
480                 }
481
482                 /// Clears the guard
483                 void clear() CDS_NOEXCEPT
484                 {
485                     assert( m_pGuard != nullptr );
486                     m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
487                     CDS_STRICT_DO( CDS_COMPILER_RW_BARRIER );
488                 }
489
490                 /// Guards pointer \p p
491                 template <typename T>
492                 T * operator =(T * p) CDS_NOEXCEPT
493                 {
494                     set( reinterpret_cast<void *>( const_cast<T *>(p) ));
495                     return p;
496                 }
497
498                 std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
499                 {
500                     clear();
501                     return nullptr;
502                 }
503
504             public: // for ThreadGC.
505                 /*
506                     GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard,
507                     the compiler produces error: \91cds::gc::dhp::details::guard_data* cds::gc::dhp::details::guard::m_pGuard\92 is protected
508                     despite the fact that ThreadGC is declared as friend for guard class.
509                     We should not like to declare m_pGuard member as public one.
510                     Therefore, we have to add set_guard/get_guard public functions
511                 */
512                 /// Set guard data
513                 void set_guard( details::guard_data * pGuard ) CDS_NOEXCEPT
514                 {
515                     assert( m_pGuard == nullptr );
516                     m_pGuard = pGuard;
517                 }
518
519                 /// Get current guard data
520                 details::guard_data * get_guard() CDS_NOEXCEPT
521                 {
522                     return m_pGuard;
523                 }
524                 /// Get current guard data
525                 details::guard_data * get_guard() const CDS_NOEXCEPT
526                 {
527                     return m_pGuard;
528                 }
529             };
530
531         } // namespace details
532
533         /// Guard
534         /**
535             This class represents auto guard: ctor allocates a guard from guard pool,
536             dtor returns the guard back to the pool of free guard.
537         */
538         class Guard: public details::guard
539         {
540             typedef details::guard    base_class;
541             friend class ThreadGC;
542
543             ThreadGC&    m_gc    ;    ///< ThreadGC object of current thread
544         public:
545             /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
546             Guard( ThreadGC& gc ) CDS_NOEXCEPT;
547
548             /// Returns guard allocated back to pool of free guards
549             ~Guard() CDS_NOEXCEPT;    // inline after GarbageCollector
550
551             /// Returns DHP GC object
552             ThreadGC& getGC() CDS_NOEXCEPT
553             {
554                 return m_gc;
555             }
556
557             /// Guards pointer \p p
558             template <typename T>
559             T * operator =(T * p) CDS_NOEXCEPT
560             {
561                 return base_class::operator =<T>( p );
562             }
563
564             std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
565             {
566                 return base_class::operator =(nullptr);
567             }
568         };
569
570         /// Array of guards
571         /**
572             This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
573             dtor returns the guards allocated back to the pool.
574         */
575         template <size_t Count>
576         class GuardArray
577         {
578             details::guard      m_arr[Count]    ;    ///< array of guard
579             ThreadGC&           m_gc    ;            ///< ThreadGC object of current thread
580             const static size_t c_nCapacity = Count ;   ///< Array capacity (equal to \p Count template parameter)
581
582         public:
583             /// Rebind array for other size \p OtherCount
584             template <size_t OtherCount>
585             struct rebind {
586                 typedef GuardArray<OtherCount>  other   ;   ///< rebinding result
587             };
588
589         public:
590             /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
591             GuardArray( ThreadGC& gc ) CDS_NOEXCEPT;    // inline below
592
593             /// The object is not default-constructible
594             GuardArray() = delete;
595
596             /// The object is not copy-constructible
597             GuardArray( GuardArray const& ) = delete;
598
599             /// Returns guards allocated back to pool
600             ~GuardArray() CDS_NOEXCEPT;    // inline below
601
602             /// Returns the capacity of array
603             CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
604             {
605                 return c_nCapacity;
606             }
607
608             /// Returns DHP ThreadGC object
609             ThreadGC& getGC() CDS_NOEXCEPT
610             {
611                 return m_gc;
612             }
613
614             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
615             details::guard& operator []( size_t nIndex ) CDS_NOEXCEPT
616             {
617                 assert( nIndex < capacity() );
618                 return m_arr[nIndex];
619             }
620
621             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
622             const details::guard& operator []( size_t nIndex ) const CDS_NOEXCEPT
623             {
624                 assert( nIndex < capacity() );
625                 return m_arr[nIndex];
626             }
627
628             /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
629             template <typename T>
630             void set( size_t nIndex, T * p ) CDS_NOEXCEPT
631             {
632                 assert( nIndex < capacity() );
633                 m_arr[nIndex].set( p );
634             }
635
636             /// Clears (sets to \p nullptr) the guard \p nIndex
637             void clear( size_t nIndex ) CDS_NOEXCEPT
638             {
639                 assert( nIndex < capacity() );
640                 m_arr[nIndex].clear();
641             }
642
643             /// Clears all guards in the array
644             void clearAll() CDS_NOEXCEPT
645             {
646                 for ( size_t i = 0; i < capacity(); ++i )
647                     clear(i);
648             }
649         };
650
651         /// Memory manager (Garbage collector)
652         class CDS_EXPORT_API GarbageCollector
653         {
654         private:
655             friend class ThreadGC;
656
657             /// Internal GC statistics
658             struct internal_stat
659             {
660                 atomics::atomic<size_t>  m_nGuardCount       ;   ///< Total guard count
661                 atomics::atomic<size_t>  m_nFreeGuardCount   ;   ///< Count of free guard
662
663                 internal_stat()
664                     : m_nGuardCount(0)
665                     , m_nFreeGuardCount(0)
666                 {}
667             };
668
669         public:
670             /// Exception "No GarbageCollector object is created"
671             CDS_DECLARE_EXCEPTION( DHPManagerEmpty, "Global DHP GarbageCollector is NULL" );
672
673             /// Internal GC statistics
674             struct InternalState
675             {
676                 size_t m_nGuardCount       ;   ///< Total guard count
677                 size_t m_nFreeGuardCount   ;   ///< Count of free guard
678
679                 InternalState()
680                     : m_nGuardCount(0)
681                     , m_nFreeGuardCount(0)
682                 {}
683
684                 InternalState& operator =( internal_stat const& s )
685                 {
686                     m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
687                     m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
688
689                     return *this;
690                 }
691             };
692
693         private:
694             static GarbageCollector * m_pManager    ;   ///< GC global instance
695
696             details::guard_allocator<>      m_GuardPool         ;   ///< Guard pool
697             details::retired_ptr_pool<>     m_RetiredAllocator  ;   ///< Pool of free retired pointers
698             details::retired_ptr_buffer     m_RetiredBuffer     ;   ///< Retired pointer buffer for liberating
699
700             atomics::atomic<size_t>      m_nLiberateThreshold;   ///< Max size of retired pointer buffer to call \p scan()
701             const size_t    m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
702
703             internal_stat   m_stat  ;   ///< Internal statistics
704             bool            m_bStatEnabled  ;   ///< Internal Statistics enabled
705
706         public:
707             /// Initializes DHP memory manager singleton
708             /**
709                 This member function creates and initializes DHP global object.
710                 The function should be called before using CDS data structure based on cds::gc::DHP GC. Usually,
711                 this member function is called in the \p main() function. See cds::gc::dhp for example.
712                 After calling of this function you may use CDS data structures based on cds::gc::DHP.
713
714                 \par Parameters
715                 \li \p nLiberateThreshold - \p scan() threshold. When count of retired pointers reaches this value,
716                     the \ref dhp_gc_liberate "scan()" member function would be called for freeing retired pointers.
717                     If \p nLiberateThreshold <= 1, \p scan() would called after each \ref dhp_gc_retirePtr "retirePtr" call.
718                 \li \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
719                     is initialized the GC allocates local guard pool for the thread from common guard pool.
720                     By perforce the local thread's guard pool is grown automatically from common pool.
721                     When the thread terminated its guard pool is backed to common GC's pool.
722
723             */
724             static void CDS_STDCALL Construct(
725                 size_t nLiberateThreshold = 1024
726                 , size_t nInitialThreadGuardCount = 8
727             );
728
729             /// Destroys DHP memory manager
730             /**
731                 The member function destroys DHP global object. After calling of this function you may \b NOT
732                 use CDS data structures based on cds::gc::DHP. Usually, the \p Destruct function is called
733                 at the end of your \p main(). See cds::gc::dhp for example.
734             */
735             static void CDS_STDCALL Destruct();
736
737             /// Returns pointer to GarbageCollector instance
738             /**
739                 If DHP GC is not initialized, \p DHPManagerEmpty exception is thrown
740             */
741             static GarbageCollector&   instance()
742             {
743                 if ( m_pManager == nullptr )
744                     throw DHPManagerEmpty();
745                 return *m_pManager;
746             }
747
748             /// Checks if global GC object is constructed and may be used
749             static bool isUsed() CDS_NOEXCEPT
750             {
751                 return m_pManager != nullptr;
752             }
753
754         public:
755             //@{
756             /// Internal interface
757
758             /// Allocates a guard
759             details::guard_data * allocGuard()
760             {
761                 return m_GuardPool.alloc();
762             }
763
764             /// Frees guard \p g for reusing in future
765             void freeGuard(details::guard_data * pGuard )
766             {
767                 m_GuardPool.free( pGuard );
768             }
769
770             /// Allocates guard list for a thread.
771             details::guard_data * allocGuardList( size_t nCount )
772             {
773                 return m_GuardPool.allocList( nCount );
774             }
775
776             /// Frees thread's guard list pointed by \p pList
777             void freeGuardList( details::guard_data * pList )
778             {
779                 m_GuardPool.freeList( pList );
780             }
781
782             /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
783             /**@anchor dhp_gc_retirePtr
784             */
785             template <typename T>
786             void retirePtr( T * p, void (* pFunc)(T *) )
787             {
788                 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
789             }
790
791             /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
792             void retirePtr( retired_ptr const& p )
793             {
794                 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
795                     scan();
796             }
797
798         protected:
799             /// Liberate function
800             /** @anchor dhp_gc_liberate
801                 The main function of Dynamic Hazard Pointer algorithm. It tries to free retired pointers if they are not
802                 trapped by any guard.
803             */
804             void scan();
805             //@}
806
807         public:
808             /// Get internal statistics
809             InternalState& getInternalState(InternalState& stat) const
810             {
811                 return stat = m_stat;
812             }
813
814             /// Checks if internal statistics enabled
815             bool              isStatisticsEnabled() const
816             {
817                 return m_bStatEnabled;
818             }
819
820             /// Enables/disables internal statistics
821             bool  enableStatistics( bool bEnable )
822             {
823                 bool bEnabled = m_bStatEnabled;
824                 m_bStatEnabled = bEnable;
825                 return bEnabled;
826             }
827
828         private:
829             GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount );
830             ~GarbageCollector();
831         };
832
833         /// Thread GC
834         /**
835             To use Dynamic Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class
836             that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
837             on the start of each thread that uses DHP GC. Before terminating the thread linked to DHP GC it is necessary to call
838             \ref cds_threading "cds::threading::Manager::detachThread()".
839
840             The ThreadGC object maintains two list:
841             \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
842             \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
843             Free guard list is a subset of thread guard list.
844         */
845         class ThreadGC
846         {
847             GarbageCollector&   m_gc    ;   ///< reference to GC singleton
848             details::guard_data *    m_pList ;   ///< Local list of guards owned by the thread
849             details::guard_data *    m_pFree ;   ///< The list of free guard from m_pList
850
851         public:
852             /// Default constructor
853             ThreadGC()
854                 : m_gc( GarbageCollector::instance() )
855                 , m_pList( nullptr )
856                 , m_pFree( nullptr )
857             {}
858
859             /// The object is not copy-constructible
860             ThreadGC( ThreadGC const& ) = delete;
861
862             /// Dtor calls fini()
863             ~ThreadGC()
864             {
865                 fini();
866             }
867
868             /// Initialization. Repeat call is available
869             void init()
870             {
871                 if ( !m_pList ) {
872                     m_pList =
873                         m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
874                 }
875             }
876
877             /// Finalization. Repeat call is available
878             void fini()
879             {
880                 if ( m_pList ) {
881                     m_gc.freeGuardList( m_pList );
882                     m_pList =
883                         m_pFree = nullptr;
884                 }
885             }
886
887         public:
888             /// Initializes guard \p g
889             void allocGuard( Guard& g )
890             {
891                 assert( m_pList != nullptr );
892                 if ( m_pFree ) {
893                     g.m_pGuard = m_pFree;
894                     m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
895                 }
896                 else {
897                     g.m_pGuard = m_gc.allocGuard();
898                     g.m_pGuard->pThreadNext = m_pList;
899                     m_pList = g.m_pGuard;
900                 }
901             }
902
903             /// Frees guard \p g
904             void freeGuard( Guard& g )
905             {
906                 assert( m_pList != nullptr );
907                 g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
908                 g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
909                 m_pFree = g.m_pGuard;
910             }
911
912             /// Initializes guard array \p arr
913             template <size_t Count>
914             void allocGuard( GuardArray<Count>& arr )
915             {
916                 assert( m_pList != nullptr );
917                 size_t nCount = 0;
918
919                 while ( m_pFree && nCount < Count ) {
920                     arr[nCount].set_guard( m_pFree );
921                     m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
922                     ++nCount;
923                 }
924
925                 while ( nCount < Count ) {
926                     details::guard& g = arr[nCount++];
927                     g.set_guard( m_gc.allocGuard() );
928                     g.get_guard()->pThreadNext = m_pList;
929                     m_pList = g.get_guard();
930                 }
931             }
932
933             /// Frees guard array \p arr
934             template <size_t Count>
935             void freeGuard( GuardArray<Count>& arr )
936             {
937                 assert( m_pList != nullptr );
938
939                 details::guard_data * pGuard;
940                 for ( size_t i = 0; i < Count - 1; ++i ) {
941                     pGuard = arr[i].get_guard();
942                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
943                     pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
944                 }
945                 pGuard = arr[Count-1].get_guard();
946                 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
947                 pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
948                 m_pFree = arr[0].get_guard();
949             }
950
951             /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
952             template <typename T>
953             void retirePtr( T * p, void (* pFunc)(T *) )
954             {
955                 m_gc.retirePtr( p, pFunc );
956             }
957
958             void scan()
959             {
960                 m_gc.scan();
961             }
962         };
963
964         //////////////////////////////////////////////////////////
965         // Inlines
966
967         inline Guard::Guard(ThreadGC& gc)
968             : m_gc( gc )
969         {
970             getGC().allocGuard( *this );
971         }
972         inline Guard::~Guard()
973         {
974             getGC().freeGuard( *this );
975         }
976
977         template <size_t Count>
978         inline GuardArray<Count>::GuardArray( ThreadGC& gc )
979             : m_gc( gc )
980         {
981             getGC().allocGuard( *this );
982         }
983         template <size_t Count>
984         inline GuardArray<Count>::~GuardArray()
985         {
986             getGC().freeGuard( *this );
987         }
988
989     }   // namespace dhp
990 }}  // namespace cds::gc
991 //@endcond
992
993 #if CDS_COMPILER == CDS_COMPILER_MSVC
994 #   pragma warning(pop)
995 #endif
996
997 #endif // #ifndef __CDS_GC_DETAILS_DHP_H