Dynamic Hazard Pointer GC refactoring
[libcds.git] / cds / gc / details / dhp.h
1 //$$CDS-header$$
2
3 #ifndef __CDS_GC_DETAILS_DHP_H
4 #define __CDS_GC_DETAILS_DHP_H
5
6 #include <mutex>        // unique_lock
7 #include <cds/cxx11_atomic.h>
8 #include <cds/gc/details/retired_ptr.h>
9 #include <cds/details/aligned_allocator.h>
10 #include <cds/details/allocator.h>
11 #include <cds/lock/spinlock.h>
12
13 #if CDS_COMPILER == CDS_COMPILER_MSVC
14 #   pragma warning(push)
15 #   pragma warning(disable:4251)    // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
16 #endif
17
18 namespace cds { namespace gc {
19
20     /// Dynamic Hazard Pointer reclamation schema
21     /**
22         The cds::gc::dhp namespace and its members are internal representation of the GC and should not be used directly.
23         Use cds::gc::DHP class in your code.
24
25         Dynamic Hazard Pointer (DHP) garbage collector is a singleton. The main user-level part of DHP schema is
26         GC class and its nested classes. Before use any DHP-related class you must initialize DHP garbage collector
27         by contructing cds::gc::DHP object in beginning of your main().
28         See cds::gc::DHP class for explanation.
29
30         \par Implementation issues
31             The global list of free guards (\p cds::gc::dhp::details::guard_allocator) is protected by a spin-lock (i.e. serialized).
32             It seems that this solution should not introduce significant performance bottleneck, because each thread has its own set
33             of guards allocated from the global list of free guards and the access to the global list is occurred only when
34             all thread's guard is busy. In this case the thread allocates a next block of guards from the global list.
35             Guards allocated for the thread is push back to the global list only when the thread terminates.
36     */
37     namespace dhp {
38
39         // Forward declarations
40         class Guard;
41         template <size_t Count> class GuardArray;
42         class ThreadGC;
43         class GarbageCollector;
44
45         /// Retired pointer type
46         typedef cds::gc::details::retired_ptr retired_ptr;
47
48         using cds::gc::details::free_retired_ptr_func;
49
50         /// Details of Dynamic Hazard Pointer algorithm
51         namespace details {
52
53             // Forward declaration
54             class liberate_set;
55
56             /// Retired pointer buffer node
57             struct retired_ptr_node {
58                 retired_ptr         m_ptr   ;   ///< retired pointer
59                 retired_ptr_node *  m_pNext     ;   ///< next retired pointer in buffer
60                 retired_ptr_node *  m_pNextFree ;   ///< next item in free list of retired_ptr_node
61             };
62
63             /// Internal guard representation
64             struct guard_data {
65                 typedef retired_ptr_node *      handoff_ptr ;   ///< trapped value type
66                 typedef void *  guarded_ptr  ;   ///< type of value guarded
67
68                 atomics::atomic<guarded_ptr>         pPost   ;   ///< pointer guarded
69
70                 atomics::atomic<guard_data *>     pGlobalNext ;   ///< next item of global list of allocated guards
71                 atomics::atomic<guard_data *>     pNextFree   ;   ///< pointer to the next item in global or thread-local free-list
72
73                 guard_data *             pThreadNext ;   ///< next item of thread's local list of guards
74
75                 //@cond
76                 guard_data() CDS_NOEXCEPT
77                     : pPost( nullptr )
78                     , pGlobalNext( nullptr )
79                     , pNextFree( nullptr )
80                     , pThreadNext( nullptr )
81                 {}
82
83                 void init() CDS_NOEXCEPT
84                 {
85                     pPost.store( nullptr, atomics::memory_order_relaxed );
86                 }
87                 //@endcond
88
89                 /// Checks if the guard is free, that is, it does not contain any pointer guarded
90                 bool isFree() const CDS_NOEXCEPT
91                 {
92                     return pPost.load( atomics::memory_order_acquire ) == nullptr;
93                 }
94             };
95
96             /// Guard allocator
97             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
98             class guard_allocator
99             {
100                 cds::details::Allocator<details::guard_data>  m_GuardAllocator    ;   ///< guard allocator
101
102                 atomics::atomic<guard_data *>    m_GuardList ;       ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
103                 atomics::atomic<guard_data *>    m_FreeGuardList ;   ///< Head of free guard list (linked by guard_data::pNextFree field)
104                 SpinLock                m_freeListLock  ;   ///< Access to m_FreeGuardList
105
106                 /*
107                     Unfortunately, access to the list of free guard is lock-based.
108                     Lock-free manipulations with guard free-list are ABA-prone.
109                     TODO: working with m_FreeGuardList in lock-free manner.
110                 */
111
112             private:
113                 /// Allocates new guard from the heap. The function uses aligned allocator
114                 guard_data * allocNew()
115                 {
116                     //TODO: the allocator should make block allocation
117
118                     details::guard_data * pGuard = m_GuardAllocator.New();
119
120                     // Link guard to the list
121                     // m_GuardList is accumulated list and it cannot support concurrent deletion,
122                     // so, ABA problem is impossible for it
123                     details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
124                     do {
125                         pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
126                         // pHead is changed by compare_exchange_weak
127                     } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
128
129                     pGuard->init();
130                     return pGuard;
131                 }
132
133             public:
134                 // Default ctor
135                 guard_allocator() CDS_NOEXCEPT
136                     : m_GuardList( nullptr )
137                     , m_FreeGuardList( nullptr )
138                 {}
139
140                 // Destructor
141                 ~guard_allocator()
142                 {
143                     guard_data * pNext;
144                     for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
145                         pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
146                         m_GuardAllocator.Delete( pData );
147                     }
148                 }
149
150                 /// Allocates a guard from free list or from heap if free list is empty
151                 guard_data * alloc()
152                 {
153                     // Try to pop a guard from free-list
154                     details::guard_data * pGuard;
155
156                     {
157                         std::unique_lock<SpinLock> al( m_freeListLock );
158                         pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
159                         if ( pGuard )
160                             m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
161                     }
162                     if ( !pGuard )
163                         return allocNew();
164
165                     pGuard->init();
166                     return pGuard;
167                 }
168
169                 /// Frees guard \p pGuard
170                 /**
171                     The function places the guard \p pGuard into free-list
172                 */
173                 void free( guard_data * pGuard ) CDS_NOEXCEPT
174                 {
175                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
176
177                     std::unique_lock<SpinLock> al( m_freeListLock );
178                     pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
179                     m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
180                 }
181
182                 /// Allocates list of guard
183                 /**
184                     The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
185
186                     cds::gc::dhp::ThreadGC supporting method
187                 */
188                 guard_data * allocList( size_t nCount )
189                 {
190                     assert( nCount != 0 );
191
192                     guard_data * pHead;
193                     guard_data * pLast;
194
195                     pHead =
196                         pLast = alloc();
197
198                     // The guard list allocated is private for the thread,
199                     // so, we can use relaxed memory order
200                     while ( --nCount ) {
201                         guard_data * p = alloc();
202                         pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
203                         pLast = p;
204                     }
205
206                     pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
207
208                     return pHead;
209                 }
210
211                 /// Frees list of guards
212                 /**
213                     The list \p pList is linked by guard's \p pThreadNext field.
214
215                     cds::gc::dhp::ThreadGC supporting method
216                 */
217                 void freeList( guard_data * pList ) CDS_NOEXCEPT
218                 {
219                     assert( pList != nullptr );
220
221                     guard_data * pLast = pList;
222                     while ( pLast->pThreadNext ) {
223                         pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
224                         guard_data * p;
225                         pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
226                         pLast = p;
227                     }
228
229                     std::unique_lock<SpinLock> al( m_freeListLock );
230                     pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
231                     m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
232                 }
233
234                 /// Returns the list's head of guards allocated
235                 guard_data * begin() CDS_NOEXCEPT
236                 {
237                     return m_GuardList.load(atomics::memory_order_acquire);
238                 }
239             };
240
241             /// Retired pointer buffer
242             /**
243                 The buffer of retired nodes ready for liberating.
244                 When size of buffer exceeds a threshold the GC calls \p scan() procedure to free
245                 retired nodes.
246             */
247             class retired_ptr_buffer
248             {
249                 atomics::atomic<retired_ptr_node *>  m_pHead     ;   ///< head of buffer
250                 atomics::atomic<size_t>              m_nItemCount;   ///< buffer's item count
251
252             public:
253                 //@cond
254                 CDS_CONSTEXPR retired_ptr_buffer() CDS_NOEXCEPT
255                     : m_pHead( nullptr )
256                     , m_nItemCount(0)
257                 {}
258
259                 ~retired_ptr_buffer() CDS_NOEXCEPT
260                 {
261                     assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
262                 }
263                 //@endcond
264
265                 /// Pushes new node into the buffer. Returns current buffer size
266                 size_t push( retired_ptr_node& node ) CDS_NOEXCEPT
267                 {
268                     retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
269                     do {
270                         node.m_pNext = pHead;
271                         // pHead is changed by compare_exchange_weak
272                     } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
273
274                     return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
275                 }
276
277                 /// Result of \ref dhp_gc_privatve "privatize" function.
278                 /**
279                     The \p privatize function returns retired node list as \p first and the size of that list as \p second.
280                 */
281                 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
282
283                 /// Gets current list of retired pointer and clears the list
284                 /**@anchor dhp_gc_privatve
285                 */
286                 privatize_result privatize() CDS_NOEXCEPT
287                 {
288                     privatize_result res;
289                     res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
290
291                     // Item counter is needed only as a threshold for \p scan() function
292                     // So, we may clear the item counter without synchronization with m_pHead
293                     res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
294                     return res;
295                 }
296
297                 /// Returns current size of buffer (approximate)
298                 size_t size() const CDS_NOEXCEPT
299                 {
300                     return m_nItemCount.load(atomics::memory_order_relaxed);
301                 }
302             };
303
304             /// Pool of retired pointers
305             /**
306                 The class acts as an allocator of retired node.
307                 Retired pointers are linked in the lock-free list.
308             */
309             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
310             class retired_ptr_pool {
311                 /// Pool item
312                 typedef retired_ptr_node    item;
313
314                 /// Count of items in block
315                 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
316
317                 /// Pool block
318                 struct block {
319                     block *     pNext   ;   ///< next block
320                     item        items[m_nItemPerBlock]  ;   ///< item array
321                 };
322
323                 atomics::atomic<block *> m_pBlockListHead    ;   ///< head of of allocated block list
324
325                 // To solve ABA problem we use epoch-based approach
326                 static const unsigned int c_nEpochCount = 4     ;   ///< Max epoch count
327                 atomics::atomic<unsigned int>    m_nCurEpoch ;   ///< Current epoch
328                 atomics::atomic<item *>  m_pEpochFree[c_nEpochCount]  ;   ///< List of free item per epoch
329                 atomics::atomic<item *>  m_pGlobalFreeHead   ;   ///< Head of unallocated item list
330
331                 cds::details::Allocator< block, Alloc > m_BlockAllocator    ;   ///< block allocator
332
333             private:
334                 //@cond
335                 void allocNewBlock()
336                 {
337                     // allocate new block
338                     block * pNew = m_BlockAllocator.New();
339
340                     // link items within the block
341                     item * pLastItem = pNew->items + m_nItemPerBlock - 1;
342                     for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
343                         pItem->m_pNextFree = pItem + 1;
344                         CDS_STRICT_DO( pItem->m_pNext = nullptr );
345                     }
346
347                     // link new block to block list
348                     {
349                         block * pHead = m_pBlockListHead.load(atomics::memory_order_acquire);
350                         do {
351                             pNew->pNext = pHead;
352                             // pHead is changed by compare_exchange_weak
353                         } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_relaxed ));
354                     }
355
356                     // link block's items to free list
357                     {
358                         item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_acquire);
359                         do {
360                             pLastItem->m_pNextFree = pHead;
361                             // pHead is changed by compare_exchange_weak
362                         } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
363                     }
364                 }
365
366                 unsigned int current_epoch() const CDS_NOEXCEPT
367                 {
368                     return m_nCurEpoch.load(atomics::memory_order_acquire) & (c_nEpochCount - 1);
369                 }
370
371                 unsigned int next_epoch() const CDS_NOEXCEPT
372                 {
373                     return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & (c_nEpochCount - 1);
374                 }
375                 //@endcond
376
377             public:
378                 //@cond
379                 retired_ptr_pool()
380                     : m_pBlockListHead( nullptr )
381                     , m_nCurEpoch(0)
382                     , m_pGlobalFreeHead( nullptr )
383                 {
384                     for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i )
385                         m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
386
387                     allocNewBlock();
388                 }
389
390                 ~retired_ptr_pool()
391                 {
392                     block * p;
393                     for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
394                         p = pBlock->pNext;
395                         m_BlockAllocator.Delete( pBlock );
396                     }
397                 }
398
399                 /// Increments current epoch
400                 void inc_epoch() CDS_NOEXCEPT
401                 {
402                     m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
403                 }
404
405                 //@endcond
406
407                 /// Allocates new retired pointer
408                 retired_ptr_node&  alloc()
409                 {
410                     unsigned int nEpoch;
411                     item * pItem;
412                     for (;;) {
413                         pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
414                         if ( !pItem )
415                             goto retry;
416                         if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
417                             goto success;
418                     }
419
420                     // Epoch free list is empty
421                     // Alloc from global free list
422                 retry:
423                     pItem = m_pGlobalFreeHead.load( atomics::memory_order_acquire );
424                     do {
425                         if ( !pItem ) {
426                             allocNewBlock();
427                             goto retry;
428                         }
429                         // pItem is changed by compare_exchange_weak
430                     } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ));
431
432                 success:
433                     CDS_STRICT_DO( pItem->m_pNextFree = nullptr );
434                     return *pItem;
435                 }
436
437                 /// Allocates and initializes new retired pointer
438                 retired_ptr_node& alloc( const retired_ptr& p )
439                 {
440                     retired_ptr_node& node = alloc();
441                     node.m_ptr = p;
442                     return node;
443                 }
444
445                 /// Places the list (pHead, pTail) of retired pointers to pool (frees retired pointers)
446                 /**
447                     The list is linked on the m_pNextFree field
448                 */
449                 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail ) CDS_NOEXCEPT
450                 {
451                     assert( pHead != nullptr );
452                     assert( pTail != nullptr );
453
454                     unsigned int nEpoch;
455                     item * pCurHead;
456                     do {
457                         pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
458                         pTail->m_pNextFree = pCurHead;
459                     } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
460                 }
461             };
462
463             /// Uninitialized guard
464             class guard
465             {
466                 friend class ThreadGC;
467             protected:
468                 details::guard_data * m_pGuard ;    ///< Pointer to guard data
469             public:
470                 /// Initialize empty guard.
471                 CDS_CONSTEXPR guard() CDS_NOEXCEPT
472                     : m_pGuard( nullptr )
473                 {}
474
475                 /// The object is not copy-constructible
476                 guard( guard const& ) = delete;
477
478                 /// Object destructor, does nothing
479                 ~guard() CDS_NOEXCEPT
480                 {}
481
482                 /// Guards pointer \p p
483                 void set( void * p ) CDS_NOEXCEPT
484                 {
485                     assert( m_pGuard != nullptr );
486                     m_pGuard->pPost.store( p, atomics::memory_order_release );
487                     //CDS_COMPILER_RW_BARRIER;
488                 }
489
490                 /// Clears the guard
491                 void clear() CDS_NOEXCEPT
492                 {
493                     assert( m_pGuard != nullptr );
494                     m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
495                     CDS_STRICT_DO( CDS_COMPILER_RW_BARRIER );
496                 }
497
498                 /// Guards pointer \p p
499                 template <typename T>
500                 T * operator =(T * p) CDS_NOEXCEPT
501                 {
502                     set( reinterpret_cast<void *>( const_cast<T *>(p) ));
503                     return p;
504                 }
505
506                 //@cond
507                 std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
508                 {
509                     clear();
510                     return nullptr;
511                 }
512                 //@endcond
513
514             public: // for ThreadGC.
515                 /*
516                     GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard,
517                     the compiler produces error: \91cds::gc::dhp::details::guard_data* cds::gc::dhp::details::guard::m_pGuard\92 is protected
518                     despite the fact that ThreadGC is declared as friend for guard class.
519                     We should not like to declare m_pGuard member as public one.
520                     Therefore, we have to add set_guard/get_guard public functions
521                 */
522                 /// Set guard data
523                 void set_guard( details::guard_data * pGuard ) CDS_NOEXCEPT
524                 {
525                     assert( m_pGuard == nullptr );
526                     m_pGuard = pGuard;
527                 }
528
529                 /// Get current guard data
530                 details::guard_data * get_guard() CDS_NOEXCEPT
531                 {
532                     return m_pGuard;
533                 }
534                 /// Get current guard data
535                 details::guard_data * get_guard() const CDS_NOEXCEPT
536                 {
537                     return m_pGuard;
538                 }
539             };
540
541         } // namespace details
542
543         /// Guard
544         /**
545             This class represents auto guard: ctor allocates a guard from guard pool,
546             dtor returns the guard back to the pool of free guard.
547         */
548         class Guard: public details::guard
549         {
550             //@cond
551             typedef details::guard    base_class;
552             friend class ThreadGC;
553             //@endcond
554
555             ThreadGC&    m_gc    ;    ///< ThreadGC object of current thread
556         public:
557             /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
558             Guard( ThreadGC& gc ) CDS_NOEXCEPT;
559
560             /// Returns guard allocated back to pool of free guards
561             ~Guard() CDS_NOEXCEPT;    // inline after GarbageCollector
562
563             /// Returns DHP GC object
564             ThreadGC& getGC() CDS_NOEXCEPT
565             {
566                 return m_gc;
567             }
568
569             /// Guards pointer \p p
570             template <typename T>
571             T * operator =(T * p) CDS_NOEXCEPT
572             {
573                 return base_class::operator =<T>( p );
574             }
575
576             //@cond
577             std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
578             {
579                 return base_class::operator =(nullptr);
580             }
581             //@endcond
582         };
583
584         /// Array of guards
585         /**
586             This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
587             dtor returns the guards allocated back to the pool.
588         */
589         template <size_t Count>
590         class GuardArray
591         {
592             details::guard      m_arr[Count]    ;    ///< array of guard
593             ThreadGC&           m_gc    ;            ///< ThreadGC object of current thread
594             const static size_t c_nCapacity = Count ;   ///< Array capacity (equal to \p Count template parameter)
595
596         public:
597             /// Rebind array for other size \p OtherCount
598             template <size_t OtherCount>
599             struct rebind {
600                 typedef GuardArray<OtherCount>  other   ;   ///< rebinding result
601             };
602
603         public:
604             /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
605             GuardArray( ThreadGC& gc ) CDS_NOEXCEPT;    // inline below
606
607             /// The object is not default-constructible
608             GuardArray() = delete;
609
610             /// The object is not copy-constructible
611             GuardArray( GuardArray const& ) = delete;
612
613             /// Returns guards allocated back to pool
614             ~GuardArray() CDS_NOEXCEPT;    // inline below
615
616             /// Returns the capacity of array
617             CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
618             {
619                 return c_nCapacity;
620             }
621
622             /// Returns DHP ThreadGC object
623             ThreadGC& getGC() CDS_NOEXCEPT
624             {
625                 return m_gc;
626             }
627
628             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
629             details::guard& operator []( size_t nIndex ) CDS_NOEXCEPT
630             {
631                 assert( nIndex < capacity() );
632                 return m_arr[nIndex];
633             }
634
635             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
636             const details::guard& operator []( size_t nIndex ) const CDS_NOEXCEPT
637             {
638                 assert( nIndex < capacity() );
639                 return m_arr[nIndex];
640             }
641
642             /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
643             template <typename T>
644             void set( size_t nIndex, T * p ) CDS_NOEXCEPT
645             {
646                 assert( nIndex < capacity() );
647                 m_arr[nIndex].set( p );
648             }
649
650             /// Clears (sets to \p nullptr) the guard \p nIndex
651             void clear( size_t nIndex ) CDS_NOEXCEPT
652             {
653                 assert( nIndex < capacity() );
654                 m_arr[nIndex].clear();
655             }
656
657             /// Clears all guards in the array
658             void clearAll() CDS_NOEXCEPT
659             {
660                 for ( size_t i = 0; i < capacity(); ++i )
661                     clear(i);
662             }
663         };
664
665         /// Memory manager (Garbage collector)
666         class CDS_EXPORT_API GarbageCollector
667         {
668         private:
669             //@cond
670             friend class ThreadGC;
671
672             /// Internal GC statistics
673             struct internal_stat
674             {
675                 atomics::atomic<size_t>  m_nGuardCount       ;   ///< Total guard count
676                 atomics::atomic<size_t>  m_nFreeGuardCount   ;   ///< Count of free guard
677
678                 internal_stat()
679                     : m_nGuardCount(0)
680                     , m_nFreeGuardCount(0)
681                 {}
682             };
683             //@endcond
684
685         public:
686             /// Exception "No GarbageCollector object is created"
687             CDS_DECLARE_EXCEPTION( DHPManagerEmpty, "Global DHP GarbageCollector is NULL" );
688
689             /// Internal GC statistics
690             struct InternalState
691             {
692                 size_t m_nGuardCount       ;   ///< Total guard count
693                 size_t m_nFreeGuardCount   ;   ///< Count of free guard
694
695                 //@cond
696                 InternalState()
697                     : m_nGuardCount(0)
698                     , m_nFreeGuardCount(0)
699                 {}
700
701                 InternalState& operator =( internal_stat const& s )
702                 {
703                     m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
704                     m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
705
706                     return *this;
707                 }
708                 //@endcond
709             };
710
711         private:
712             static GarbageCollector * m_pManager    ;   ///< GC global instance
713
714             details::guard_allocator<>      m_GuardPool         ;   ///< Guard pool
715             details::retired_ptr_pool<>     m_RetiredAllocator  ;   ///< Pool of free retired pointers
716             details::retired_ptr_buffer     m_RetiredBuffer     ;   ///< Retired pointer buffer for liberating
717
718             atomics::atomic<size_t>      m_nLiberateThreshold;   ///< Max size of retired pointer buffer to call \p scan()
719             const size_t    m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
720
721             internal_stat   m_stat  ;   ///< Internal statistics
722             bool            m_bStatEnabled  ;   ///< Internal Statistics enabled
723
724         public:
725             /// Initializes DHP memory manager singleton
726             /**
727                 This member function creates and initializes DHP global object.
728                 The function should be called before using CDS data structure based on cds::gc::DHP GC. Usually,
729                 this member function is called in the \p main() function. See cds::gc::dhp for example.
730                 After calling of this function you may use CDS data structures based on cds::gc::DHP.
731
732                 \par Parameters
733                 \li \p nLiberateThreshold - \p scan() threshold. When count of retired pointers reaches this value,
734                     the \ref dhp_gc_liberate "scan()" member function would be called for freeing retired pointers.
735                     If \p nLiberateThreshold <= 1, \p scan() would called after each \ref dhp_gc_retirePtr "retirePtr" call.
736                 \li \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
737                     is initialized the GC allocates local guard pool for the thread from common guard pool.
738                     By perforce the local thread's guard pool is grown automatically from common pool.
739                     When the thread terminated its guard pool is backed to common GC's pool.
740
741             */
742             static void CDS_STDCALL Construct(
743                 size_t nLiberateThreshold = 1024
744                 , size_t nInitialThreadGuardCount = 8
745             );
746
747             /// Destroys DHP memory manager
748             /**
749                 The member function destroys DHP global object. After calling of this function you may \b NOT
750                 use CDS data structures based on cds::gc::DHP. Usually, the \p Destruct function is called
751                 at the end of your \p main(). See cds::gc::dhp for example.
752             */
753             static void CDS_STDCALL Destruct();
754
755             /// Returns pointer to GarbageCollector instance
756             /**
757                 If DHP GC is not initialized, \p DHPManagerEmpty exception is thrown
758             */
759             static GarbageCollector&   instance()
760             {
761                 if ( m_pManager == nullptr )
762                     throw DHPManagerEmpty();
763                 return *m_pManager;
764             }
765
766             /// Checks if global GC object is constructed and may be used
767             static bool isUsed() CDS_NOEXCEPT
768             {
769                 return m_pManager != nullptr;
770             }
771
772         public:
773             //@{
774             /// Internal interface
775
776             /// Allocates a guard
777             details::guard_data * allocGuard()
778             {
779                 return m_GuardPool.alloc();
780             }
781
782             /// Frees guard \p g for reusing in future
783             void freeGuard(details::guard_data * pGuard )
784             {
785                 m_GuardPool.free( pGuard );
786             }
787
788             /// Allocates guard list for a thread.
789             details::guard_data * allocGuardList( size_t nCount )
790             {
791                 return m_GuardPool.allocList( nCount );
792             }
793
794             /// Frees thread's guard list pointed by \p pList
795             void freeGuardList( details::guard_data * pList )
796             {
797                 m_GuardPool.freeList( pList );
798             }
799
800             /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
801             /**@anchor dhp_gc_retirePtr
802             */
803             template <typename T>
804             void retirePtr( T * p, void (* pFunc)(T *) )
805             {
806                 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
807             }
808
809             /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
810             void retirePtr( retired_ptr const& p )
811             {
812                 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
813                     scan();
814             }
815
816         protected:
817             /// Liberate function
818             /** @anchor dhp_gc_liberate
819                 The main function of Dynamic Hazard Pointer algorithm. It tries to free retired pointers if they are not
820                 trapped by any guard.
821             */
822             void scan();
823             //@}
824
825         public:
826             /// Get internal statistics
827             InternalState& getInternalState(InternalState& stat) const
828             {
829                 return stat = m_stat;
830             }
831
832             /// Checks if internal statistics enabled
833             bool              isStatisticsEnabled() const
834             {
835                 return m_bStatEnabled;
836             }
837
838             /// Enables/disables internal statistics
839             bool  enableStatistics( bool bEnable )
840             {
841                 bool bEnabled = m_bStatEnabled;
842                 m_bStatEnabled = bEnable;
843                 return bEnabled;
844             }
845
846         private:
847             //@cond none
848             GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount );
849             ~GarbageCollector();
850             //@endcond
851         };
852
853         /// Thread GC
854         /**
855             To use Dynamic Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class
856             that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
857             on the start of each thread that uses DHP GC. Before terminating the thread linked to DHP GC it is necessary to call
858             \ref cds_threading "cds::threading::Manager::detachThread()".
859
860             The ThreadGC object maintains two list:
861             \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
862             \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
863             Free guard list is a subset of thread guard list.
864         */
865         class ThreadGC
866         {
867             GarbageCollector&   m_gc    ;   ///< reference to GC singleton
868             details::guard_data *    m_pList ;   ///< Local list of guards owned by the thread
869             details::guard_data *    m_pFree ;   ///< The list of free guard from m_pList
870
871         public:
872             /// Default constructor
873             ThreadGC()
874                 : m_gc( GarbageCollector::instance() )
875                 , m_pList( nullptr )
876                 , m_pFree( nullptr )
877             {}
878
879             /// The object is not copy-constructible
880             ThreadGC( ThreadGC const& ) = delete;
881
882             /// Dtor calls fini()
883             ~ThreadGC()
884             {
885                 fini();
886             }
887
888             /// Initialization. Repeat call is available
889             void init()
890             {
891                 if ( !m_pList ) {
892                     m_pList =
893                         m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
894                 }
895             }
896
897             /// Finalization. Repeat call is available
898             void fini()
899             {
900                 if ( m_pList ) {
901                     m_gc.freeGuardList( m_pList );
902                     m_pList =
903                         m_pFree = nullptr;
904                 }
905             }
906
907         public:
908             /// Initializes guard \p g
909             void allocGuard( Guard& g )
910             {
911                 assert( m_pList != nullptr );
912                 if ( m_pFree ) {
913                     g.m_pGuard = m_pFree;
914                     m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
915                 }
916                 else {
917                     g.m_pGuard = m_gc.allocGuard();
918                     g.m_pGuard->pThreadNext = m_pList;
919                     m_pList = g.m_pGuard;
920                 }
921             }
922
923             /// Frees guard \p g
924             void freeGuard( Guard& g )
925             {
926                 assert( m_pList != nullptr );
927                 g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
928                 g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
929                 m_pFree = g.m_pGuard;
930             }
931
932             /// Initializes guard array \p arr
933             template <size_t Count>
934             void allocGuard( GuardArray<Count>& arr )
935             {
936                 assert( m_pList != nullptr );
937                 size_t nCount = 0;
938
939                 while ( m_pFree && nCount < Count ) {
940                     arr[nCount].set_guard( m_pFree );
941                     m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
942                     ++nCount;
943                 }
944
945                 while ( nCount < Count ) {
946                     details::guard& g = arr[nCount++];
947                     g.set_guard( m_gc.allocGuard() );
948                     g.get_guard()->pThreadNext = m_pList;
949                     m_pList = g.get_guard();
950                 }
951             }
952
953             /// Frees guard array \p arr
954             template <size_t Count>
955             void freeGuard( GuardArray<Count>& arr )
956             {
957                 assert( m_pList != nullptr );
958
959                 details::guard_data * pGuard;
960                 for ( size_t i = 0; i < Count - 1; ++i ) {
961                     pGuard = arr[i].get_guard();
962                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
963                     pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
964                 }
965                 pGuard = arr[Count-1].get_guard();
966                 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
967                 pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
968                 m_pFree = arr[0].get_guard();
969             }
970
971             /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
972             template <typename T>
973             void retirePtr( T * p, void (* pFunc)(T *) )
974             {
975                 m_gc.retirePtr( p, pFunc );
976             }
977
978             //@cond
979             void scan()
980             {
981                 m_gc.scan();
982             }
983             //@endcond
984
985         };
986
987         //////////////////////////////////////////////////////////
988         // Inlines
989
990         inline Guard::Guard(ThreadGC& gc)
991             : m_gc( gc )
992         {
993             getGC().allocGuard( *this );
994         }
995         inline Guard::~Guard()
996         {
997             getGC().freeGuard( *this );
998         }
999
1000         template <size_t Count>
1001         inline GuardArray<Count>::GuardArray( ThreadGC& gc )
1002             : m_gc( gc )
1003         {
1004             getGC().allocGuard( *this );
1005         }
1006         template <size_t Count>
1007         inline GuardArray<Count>::~GuardArray()
1008         {
1009             getGC().freeGuard( *this );
1010         }
1011
1012     }   // namespace dhp
1013 }}  // namespace cds::gc
1014
1015 #if CDS_COMPILER == CDS_COMPILER_MSVC
1016 #   pragma warning(pop)
1017 #endif
1018
1019 #endif // #ifndef __CDS_GC_DETAILS_DHP_H