reimplement guarded_ptr from scratch
[libcds.git] / cds / gc / details / dhp.h
1 //$$CDS-header$$
2
3 #ifndef __CDS_GC_DETAILS_DHP_H
4 #define __CDS_GC_DETAILS_DHP_H
5
6 #include <mutex>        // unique_lock
7 #include <cds/cxx11_atomic.h>
8 #include <cds/gc/details/retired_ptr.h>
9 #include <cds/details/aligned_allocator.h>
10 #include <cds/details/allocator.h>
11 #include <cds/lock/spinlock.h>
12
13 #if CDS_COMPILER == CDS_COMPILER_MSVC
14 #   pragma warning(push)
15 #   pragma warning(disable:4251)    // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
16 #endif
17
18 //@cond
19 namespace cds { namespace gc {
20
21     /// Dynamic Hazard Pointer reclamation schema
22     /**
23         The cds::gc::dhp namespace and its members are internal representation of the GC and should not be used directly.
24         Use cds::gc::DHP class in your code.
25
26         Dynamic Hazard Pointer (DHP) garbage collector is a singleton. The main user-level part of DHP schema is
27         GC class and its nested classes. Before use any DHP-related class you must initialize DHP garbage collector
28         by contructing cds::gc::DHP object in beginning of your main().
29         See cds::gc::DHP class for explanation.
30
31         \par Implementation issues
32             The global list of free guards (\p cds::gc::dhp::details::guard_allocator) is protected by a spin-lock (i.e. serialized).
33             It seems that this solution should not introduce significant performance bottleneck, because each thread has its own set
34             of guards allocated from the global list of free guards and the access to the global list is occurred only when
35             all thread's guard is busy. In this case the thread allocates a next block of guards from the global list.
36             Guards allocated for the thread is push back to the global list only when the thread terminates.
37     */
38     namespace dhp {
39
40         // Forward declarations
41         class Guard;
42         template <size_t Count> class GuardArray;
43         class ThreadGC;
44         class GarbageCollector;
45
46         /// Retired pointer type
47         typedef cds::gc::details::retired_ptr retired_ptr;
48
49         using cds::gc::details::free_retired_ptr_func;
50
51         /// Details of Dynamic Hazard Pointer algorithm
52         namespace details {
53
54             // Forward declaration
55             class liberate_set;
56
57             /// Retired pointer buffer node
58             struct retired_ptr_node {
59                 retired_ptr         m_ptr   ;   ///< retired pointer
60                 retired_ptr_node *  m_pNext     ;   ///< next retired pointer in buffer
61                 retired_ptr_node *  m_pNextFree ;   ///< next item in free list of retired_ptr_node
62             };
63
64             /// Internal guard representation
65             struct guard_data {
66                 typedef void * guarded_ptr;  ///< type of value guarded
67
68                 atomics::atomic<guarded_ptr>  pPost;       ///< pointer guarded
69                 atomics::atomic<guard_data *> pGlobalNext; ///< next item of global list of allocated guards
70                 atomics::atomic<guard_data *> pNextFree;   ///< pointer to the next item in global or thread-local free-list
71
72                 guard_data * pThreadNext; ///< next item of thread's local list of guards
73
74                 guard_data() CDS_NOEXCEPT
75                     : pPost( nullptr )
76                     , pGlobalNext( nullptr )
77                     , pNextFree( nullptr )
78                     , pThreadNext( nullptr )
79                 {}
80
81                 void init() CDS_NOEXCEPT
82                 {
83                     pPost.store( nullptr, atomics::memory_order_relaxed );
84                 }
85
86                 /// Checks if the guard is free, that is, it does not contain any pointer guarded
87                 bool isFree() const CDS_NOEXCEPT
88                 {
89                     return pPost.load( atomics::memory_order_acquire ) == nullptr;
90                 }
91             };
92
93             /// Guard allocator
94             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
95             class guard_allocator
96             {
97                 cds::details::Allocator<details::guard_data>  m_GuardAllocator    ;   ///< guard allocator
98
99                 atomics::atomic<guard_data *>    m_GuardList ;       ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
100                 atomics::atomic<guard_data *>    m_FreeGuardList ;   ///< Head of free guard list (linked by guard_data::pNextFree field)
101                 SpinLock                m_freeListLock  ;   ///< Access to m_FreeGuardList
102
103                 /*
104                     Unfortunately, access to the list of free guard is lock-based.
105                     Lock-free manipulations with guard free-list are ABA-prone.
106                     TODO: working with m_FreeGuardList in lock-free manner.
107                 */
108
109             private:
110                 /// Allocates new guard from the heap. The function uses aligned allocator
111                 guard_data * allocNew()
112                 {
113                     //TODO: the allocator should make block allocation
114
115                     details::guard_data * pGuard = m_GuardAllocator.New();
116
117                     // Link guard to the list
118                     // m_GuardList is an accumulating list and it cannot support concurrent deletion,
119                     // so, ABA problem is impossible for it
120                     details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
121                     do {
122                         pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
123                         // pHead is changed by compare_exchange_weak
124                     } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
125
126                     pGuard->init();
127                     return pGuard;
128                 }
129
130             public:
131                 // Default ctor
132                 guard_allocator() CDS_NOEXCEPT
133                     : m_GuardList( nullptr )
134                     , m_FreeGuardList( nullptr )
135                 {}
136
137                 // Destructor
138                 ~guard_allocator()
139                 {
140                     guard_data * pNext;
141                     for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
142                         pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
143                         m_GuardAllocator.Delete( pData );
144                     }
145                 }
146
147                 /// Allocates a guard from free list or from heap if free list is empty
148                 guard_data * alloc()
149                 {
150                     // Try to pop a guard from free-list
151                     details::guard_data * pGuard;
152
153                     {
154                         std::unique_lock<SpinLock> al( m_freeListLock );
155                         pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
156                         if ( pGuard )
157                             m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
158                     }
159                     if ( !pGuard )
160                         return allocNew();
161
162                     pGuard->init();
163                     return pGuard;
164                 }
165
166                 /// Frees guard \p pGuard
167                 /**
168                     The function places the guard \p pGuard into free-list
169                 */
170                 void free( guard_data * pGuard ) CDS_NOEXCEPT
171                 {
172                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
173
174                     std::unique_lock<SpinLock> al( m_freeListLock );
175                     pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
176                     m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
177                 }
178
179                 /// Allocates list of guard
180                 /**
181                     The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
182
183                     cds::gc::dhp::ThreadGC supporting method
184                 */
185                 guard_data * allocList( size_t nCount )
186                 {
187                     assert( nCount != 0 );
188
189                     guard_data * pHead;
190                     guard_data * pLast;
191
192                     pHead =
193                         pLast = alloc();
194
195                     // The guard list allocated is private for the thread,
196                     // so, we can use relaxed memory order
197                     while ( --nCount ) {
198                         guard_data * p = alloc();
199                         pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
200                         pLast = p;
201                     }
202
203                     pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
204
205                     return pHead;
206                 }
207
208                 /// Frees list of guards
209                 /**
210                     The list \p pList is linked by guard's \p pThreadNext field.
211
212                     cds::gc::dhp::ThreadGC supporting method
213                 */
214                 void freeList( guard_data * pList ) CDS_NOEXCEPT
215                 {
216                     assert( pList != nullptr );
217
218                     guard_data * pLast = pList;
219                     while ( pLast->pThreadNext ) {
220                         pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
221                         guard_data * p;
222                         pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
223                         pLast = p;
224                     }
225
226                     std::unique_lock<SpinLock> al( m_freeListLock );
227                     pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
228                     m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
229                 }
230
231                 /// Returns the list's head of guards allocated
232                 guard_data * begin() CDS_NOEXCEPT
233                 {
234                     return m_GuardList.load(atomics::memory_order_acquire);
235                 }
236             };
237
238             /// Retired pointer buffer
239             /**
240                 The buffer of retired nodes ready for liberating.
241                 When size of buffer exceeds a threshold the GC calls \p scan() procedure to free
242                 retired nodes.
243             */
244             class retired_ptr_buffer
245             {
246                 atomics::atomic<retired_ptr_node *>  m_pHead     ;   ///< head of buffer
247                 atomics::atomic<size_t>              m_nItemCount;   ///< buffer's item count
248
249             public:
250                 retired_ptr_buffer() CDS_NOEXCEPT
251                     : m_pHead( nullptr )
252                     , m_nItemCount(0)
253                 {}
254
255                 ~retired_ptr_buffer() CDS_NOEXCEPT
256                 {
257                     assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
258                 }
259
260                 /// Pushes new node into the buffer. Returns current buffer size
261                 size_t push( retired_ptr_node& node ) CDS_NOEXCEPT
262                 {
263                     retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
264                     do {
265                         node.m_pNext = pHead;
266                         // pHead is changed by compare_exchange_weak
267                     } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
268
269                     return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
270                 }
271
272                 /// Result of \ref dhp_gc_privatve "privatize" function.
273                 /**
274                     The \p privatize function returns retired node list as \p first and the size of that list as \p second.
275                 */
276                 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
277
278                 /// Gets current list of retired pointer and clears the list
279                 /**@anchor dhp_gc_privatve
280                 */
281                 privatize_result privatize() CDS_NOEXCEPT
282                 {
283                     privatize_result res;
284                     res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
285
286                     // Item counter is needed only as a threshold for \p scan() function
287                     // So, we may clear the item counter without synchronization with m_pHead
288                     res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
289                     return res;
290                 }
291
292                 /// Returns current size of buffer (approximate)
293                 size_t size() const CDS_NOEXCEPT
294                 {
295                     return m_nItemCount.load(atomics::memory_order_relaxed);
296                 }
297             };
298
299             /// Pool of retired pointers
300             /**
301                 The class acts as an allocator of retired node.
302                 Retired pointers are linked in the lock-free list.
303             */
304             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
305             class retired_ptr_pool {
306                 /// Pool item
307                 typedef retired_ptr_node    item;
308
309                 /// Count of items in block
310                 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
311
312                 /// Pool block
313                 struct block {
314                     block *     pNext   ;   ///< next block
315                     item        items[m_nItemPerBlock]  ;   ///< item array
316                 };
317
318                 atomics::atomic<block *> m_pBlockListHead    ;   ///< head of of allocated block list
319
320                 // To solve ABA problem we use epoch-based approach
321                 static const unsigned int c_nEpochCount = 4     ;   ///< Max epoch count
322                 atomics::atomic<unsigned int>    m_nCurEpoch ;   ///< Current epoch
323                 atomics::atomic<item *>  m_pEpochFree[c_nEpochCount]  ;   ///< List of free item per epoch
324                 atomics::atomic<item *>  m_pGlobalFreeHead   ;   ///< Head of unallocated item list
325
326                 cds::details::Allocator< block, Alloc > m_BlockAllocator    ;   ///< block allocator
327
328             private:
329                 void allocNewBlock()
330                 {
331                     // allocate new block
332                     block * pNew = m_BlockAllocator.New();
333
334                     // link items within the block
335                     item * pLastItem = pNew->items + m_nItemPerBlock - 1;
336                     for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
337                         pItem->m_pNextFree = pItem + 1;
338                         CDS_STRICT_DO( pItem->m_pNext = nullptr );
339                     }
340
341                     // link new block to block list
342                     {
343                         block * pHead = m_pBlockListHead.load(atomics::memory_order_acquire);
344                         do {
345                             pNew->pNext = pHead;
346                             // pHead is changed by compare_exchange_weak
347                         } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_relaxed ));
348                     }
349
350                     // link block's items to free list
351                     {
352                         item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_acquire);
353                         do {
354                             pLastItem->m_pNextFree = pHead;
355                             // pHead is changed by compare_exchange_weak
356                         } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
357                     }
358                 }
359
360                 unsigned int current_epoch() const CDS_NOEXCEPT
361                 {
362                     return m_nCurEpoch.load(atomics::memory_order_acquire) & (c_nEpochCount - 1);
363                 }
364
365                 unsigned int next_epoch() const CDS_NOEXCEPT
366                 {
367                     return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & (c_nEpochCount - 1);
368                 }
369
370             public:
371                 retired_ptr_pool()
372                     : m_pBlockListHead( nullptr )
373                     , m_nCurEpoch(0)
374                     , m_pGlobalFreeHead( nullptr )
375                 {
376                     for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i )
377                         m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
378
379                     allocNewBlock();
380                 }
381
382                 ~retired_ptr_pool()
383                 {
384                     block * p;
385                     for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
386                         p = pBlock->pNext;
387                         m_BlockAllocator.Delete( pBlock );
388                     }
389                 }
390
391                 /// Increments current epoch
392                 void inc_epoch() CDS_NOEXCEPT
393                 {
394                     m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
395                 }
396
397                 /// Allocates new retired pointer
398                 retired_ptr_node&  alloc()
399                 {
400                     unsigned int nEpoch;
401                     item * pItem;
402                     for (;;) {
403                         pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
404                         if ( !pItem )
405                             goto retry;
406                         if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
407                             goto success;
408                     }
409
410                     // Epoch free list is empty
411                     // Alloc from global free list
412                 retry:
413                     pItem = m_pGlobalFreeHead.load( atomics::memory_order_acquire );
414                     do {
415                         if ( !pItem ) {
416                             allocNewBlock();
417                             goto retry;
418                         }
419                         // pItem is changed by compare_exchange_weak
420                     } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ));
421
422                 success:
423                     CDS_STRICT_DO( pItem->m_pNextFree = nullptr );
424                     return *pItem;
425                 }
426
427                 /// Allocates and initializes new retired pointer
428                 retired_ptr_node& alloc( const retired_ptr& p )
429                 {
430                     retired_ptr_node& node = alloc();
431                     node.m_ptr = p;
432                     return node;
433                 }
434
435                 /// Places the list (pHead, pTail) of retired pointers to pool (frees retired pointers)
436                 /**
437                     The list is linked on the m_pNextFree field
438                 */
439                 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail ) CDS_NOEXCEPT
440                 {
441                     assert( pHead != nullptr );
442                     assert( pTail != nullptr );
443
444                     unsigned int nEpoch;
445                     item * pCurHead;
446                     do {
447                         pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
448                         pTail->m_pNextFree = pCurHead;
449                     } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
450                 }
451             };
452
453             /// Uninitialized guard
454             class guard
455             {
456                 friend class ThreadGC;
457             protected:
458                 details::guard_data * m_pGuard ;    ///< Pointer to guard data
459
460             public:
461                 /// Initialize empty guard.
462                 CDS_CONSTEXPR guard() CDS_NOEXCEPT
463                     : m_pGuard( nullptr )
464                 {}
465
466                 /// Ã‘opy-ctor is disabled
467                 guard( guard const& ) = delete;
468
469                 /// Move-ctor is disabled
470                 guard( guard&& ) = delete;
471
472                 /// Object destructor, does nothing
473                 ~guard() CDS_NOEXCEPT
474                 {}
475
476                 /// Get current guarded pointer
477                 void * get( atomics::memory_order order = atomics::memory_order_acquire ) const CDS_NOEXCEPT
478                 {
479                     assert( m_pGuard != nullptr );
480                     return m_pGuard->pPost.load( order );
481                 }
482
483                 /// Guards pointer \p p
484                 void set( void * p, atomics::memory_order order = atomics::memory_order_release ) CDS_NOEXCEPT
485                 {
486                     assert( m_pGuard != nullptr );
487                     m_pGuard->pPost.store( p, order );
488                 }
489
490                 /// Clears the guard
491                 void clear( atomics::memory_order order = atomics::memory_order_relaxed ) CDS_NOEXCEPT
492                 {
493                     assert( m_pGuard != nullptr );
494                     m_pGuard->pPost.store( nullptr, order );
495                 }
496
497                 /// Guards pointer \p p
498                 template <typename T>
499                 T * operator =(T * p) CDS_NOEXCEPT
500                 {
501                     set( reinterpret_cast<void *>( const_cast<T *>(p) ));
502                     return p;
503                 }
504
505                 std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
506                 {
507                     clear();
508                     return nullptr;
509                 }
510
511             public: // for ThreadGC.
512                 /*
513                     GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard,
514                     the compiler produces error: \91cds::gc::dhp::details::guard_data* cds::gc::dhp::details::guard::m_pGuard\92 is protected
515                     despite the fact that ThreadGC is declared as friend for guard class.
516                     Therefore, we have to add set_guard/get_guard public functions
517                 */
518                 /// Set guard data
519                 void set_guard( details::guard_data * pGuard ) CDS_NOEXCEPT
520                 {
521                     assert( m_pGuard == nullptr );
522                     m_pGuard = pGuard;
523                 }
524
525                 /// Get current guard data
526                 details::guard_data * get_guard() CDS_NOEXCEPT
527                 {
528                     return m_pGuard;
529                 }
530                 /// Get current guard data
531                 details::guard_data * get_guard() const CDS_NOEXCEPT
532                 {
533                     return m_pGuard;
534                 }
535
536                 details::guard_data * release_guard() CDS_NOEXCEPT
537                 {
538                     details::guard_data * p = m_pGuard;
539                     m_pGuard = nullptr;
540                     return p;
541                 }
542
543                 bool is_initialized() const
544                 {
545                     return m_pGuard != nullptr;
546                 }
547             };
548
549         } // namespace details
550
551         /// Guard
552         /**
553             This class represents auto guard: ctor allocates a guard from guard pool,
554             dtor returns the guard back to the pool of free guard.
555         */
556         class Guard: public details::guard
557         {
558             typedef details::guard    base_class;
559             friend class ThreadGC;
560
561             ThreadGC&    m_gc    ;    ///< ThreadGC object of current thread
562         public:
563             /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
564             Guard( ThreadGC& gc );
565
566             /// Returns guard allocated back to pool of free guards
567             ~Guard();    // inline after GarbageCollector
568
569             /// Returns DHP GC object
570             ThreadGC& getGC() CDS_NOEXCEPT
571             {
572                 return m_gc;
573             }
574
575             /// Guards pointer \p p
576             template <typename T>
577             T * operator =(T * p) CDS_NOEXCEPT
578             {
579                 return base_class::operator =<T>( p );
580             }
581
582             std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
583             {
584                 return base_class::operator =(nullptr);
585             }
586         };
587
588         /// Array of guards
589         /**
590             This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
591             dtor returns the guards allocated back to the pool.
592         */
593         template <size_t Count>
594         class GuardArray
595         {
596             details::guard      m_arr[Count]    ;    ///< array of guard
597             ThreadGC&           m_gc    ;            ///< ThreadGC object of current thread
598             const static size_t c_nCapacity = Count ;   ///< Array capacity (equal to \p Count template parameter)
599
600         public:
601             /// Rebind array for other size \p OtherCount
602             template <size_t OtherCount>
603             struct rebind {
604                 typedef GuardArray<OtherCount>  other   ;   ///< rebinding result
605             };
606
607         public:
608             /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
609             GuardArray( ThreadGC& gc );    // inline below
610
611             /// The object is not default-constructible
612             GuardArray() = delete;
613
614             /// The object is not copy-constructible
615             GuardArray( GuardArray const& ) = delete;
616
617             /// Returns guards allocated back to pool
618             ~GuardArray();    // inline below
619
620             /// Returns the capacity of array
621             CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
622             {
623                 return c_nCapacity;
624             }
625
626             /// Returns DHP ThreadGC object
627             ThreadGC& getGC() CDS_NOEXCEPT
628             {
629                 return m_gc;
630             }
631
632             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
633             details::guard& operator []( size_t nIndex ) CDS_NOEXCEPT
634             {
635                 assert( nIndex < capacity() );
636                 return m_arr[nIndex];
637             }
638
639             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
640             const details::guard& operator []( size_t nIndex ) const CDS_NOEXCEPT
641             {
642                 assert( nIndex < capacity() );
643                 return m_arr[nIndex];
644             }
645
646             /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
647             template <typename T>
648             void set( size_t nIndex, T * p ) CDS_NOEXCEPT
649             {
650                 assert( nIndex < capacity() );
651                 m_arr[nIndex].set( p );
652             }
653
654             /// Clears (sets to \p nullptr) the guard \p nIndex
655             void clear( size_t nIndex ) CDS_NOEXCEPT
656             {
657                 assert( nIndex < capacity() );
658                 m_arr[nIndex].clear();
659             }
660
661             /// Clears all guards in the array
662             void clearAll() CDS_NOEXCEPT
663             {
664                 for ( size_t i = 0; i < capacity(); ++i )
665                     clear(i);
666             }
667         };
668
669         /// Memory manager (Garbage collector)
670         class CDS_EXPORT_API GarbageCollector
671         {
672         private:
673             friend class ThreadGC;
674
675             /// Internal GC statistics
676             struct internal_stat
677             {
678                 atomics::atomic<size_t>  m_nGuardCount       ;   ///< Total guard count
679                 atomics::atomic<size_t>  m_nFreeGuardCount   ;   ///< Count of free guard
680
681                 internal_stat()
682                     : m_nGuardCount(0)
683                     , m_nFreeGuardCount(0)
684                 {}
685             };
686
687         public:
688             /// Exception "No GarbageCollector object is created"
689             CDS_DECLARE_EXCEPTION( DHPManagerEmpty, "Global DHP GarbageCollector is NULL" );
690
691             /// Internal GC statistics
692             struct InternalState
693             {
694                 size_t m_nGuardCount       ;   ///< Total guard count
695                 size_t m_nFreeGuardCount   ;   ///< Count of free guard
696
697                 InternalState()
698                     : m_nGuardCount(0)
699                     , m_nFreeGuardCount(0)
700                 {}
701
702                 InternalState& operator =( internal_stat const& s )
703                 {
704                     m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
705                     m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
706
707                     return *this;
708                 }
709             };
710
711         private:
712             static GarbageCollector * m_pManager    ;   ///< GC global instance
713
714             details::guard_allocator<>      m_GuardPool         ;   ///< Guard pool
715             details::retired_ptr_pool<>     m_RetiredAllocator  ;   ///< Pool of free retired pointers
716             details::retired_ptr_buffer     m_RetiredBuffer     ;   ///< Retired pointer buffer for liberating
717
718             atomics::atomic<size_t>      m_nLiberateThreshold;   ///< Max size of retired pointer buffer to call \p scan()
719             const size_t    m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
720
721             internal_stat   m_stat  ;   ///< Internal statistics
722             bool            m_bStatEnabled  ;   ///< Internal Statistics enabled
723
724         public:
725             /// Initializes DHP memory manager singleton
726             /**
727                 This member function creates and initializes DHP global object.
728                 The function should be called before using CDS data structure based on cds::gc::DHP GC. Usually,
729                 this member function is called in the \p main() function. See cds::gc::dhp for example.
730                 After calling of this function you may use CDS data structures based on cds::gc::DHP.
731
732                 \par Parameters
733                 \li \p nLiberateThreshold - \p scan() threshold. When count of retired pointers reaches this value,
734                     the \ref dhp_gc_liberate "scan()" member function would be called for freeing retired pointers.
735                     If \p nLiberateThreshold <= 1, \p scan() would called after each \ref dhp_gc_retirePtr "retirePtr" call.
736                 \li \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
737                     is initialized the GC allocates local guard pool for the thread from common guard pool.
738                     By perforce the local thread's guard pool is grown automatically from common pool.
739                     When the thread terminated its guard pool is backed to common GC's pool.
740
741             */
742             static void CDS_STDCALL Construct(
743                 size_t nLiberateThreshold = 1024
744                 , size_t nInitialThreadGuardCount = 8
745             );
746
747             /// Destroys DHP memory manager
748             /**
749                 The member function destroys DHP global object. After calling of this function you may \b NOT
750                 use CDS data structures based on cds::gc::DHP. Usually, the \p Destruct function is called
751                 at the end of your \p main(). See cds::gc::dhp for example.
752             */
753             static void CDS_STDCALL Destruct();
754
755             /// Returns pointer to GarbageCollector instance
756             /**
757                 If DHP GC is not initialized, \p DHPManagerEmpty exception is thrown
758             */
759             static GarbageCollector&   instance()
760             {
761                 if ( m_pManager == nullptr )
762                     throw DHPManagerEmpty();
763                 return *m_pManager;
764             }
765
766             /// Checks if global GC object is constructed and may be used
767             static bool isUsed() CDS_NOEXCEPT
768             {
769                 return m_pManager != nullptr;
770             }
771
772         public:
773             //@{
774             /// Internal interface
775
776             /// Allocates a guard
777             details::guard_data * allocGuard()
778             {
779                 return m_GuardPool.alloc();
780             }
781
782             /// Frees guard \p g for reusing in future
783             void freeGuard(details::guard_data * pGuard )
784             {
785                 m_GuardPool.free( pGuard );
786             }
787
788             /// Allocates guard list for a thread.
789             details::guard_data * allocGuardList( size_t nCount )
790             {
791                 return m_GuardPool.allocList( nCount );
792             }
793
794             /// Frees thread's guard list pointed by \p pList
795             void freeGuardList( details::guard_data * pList )
796             {
797                 m_GuardPool.freeList( pList );
798             }
799
800             /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
801             /**@anchor dhp_gc_retirePtr
802             */
803             template <typename T>
804             void retirePtr( T * p, void (* pFunc)(T *) )
805             {
806                 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
807             }
808
809             /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
810             void retirePtr( retired_ptr const& p )
811             {
812                 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
813                     scan();
814             }
815
816         protected:
817             /// Liberate function
818             /** @anchor dhp_gc_liberate
819                 The main function of Dynamic Hazard Pointer algorithm. It tries to free retired pointers if they are not
820                 trapped by any guard.
821             */
822             void scan();
823             //@}
824
825         public:
826             /// Get internal statistics
827             InternalState& getInternalState(InternalState& stat) const
828             {
829                 return stat = m_stat;
830             }
831
832             /// Checks if internal statistics enabled
833             bool              isStatisticsEnabled() const
834             {
835                 return m_bStatEnabled;
836             }
837
838             /// Enables/disables internal statistics
839             bool  enableStatistics( bool bEnable )
840             {
841                 bool bEnabled = m_bStatEnabled;
842                 m_bStatEnabled = bEnable;
843                 return bEnabled;
844             }
845
846         private:
847             GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount );
848             ~GarbageCollector();
849         };
850
851         /// Thread GC
852         /**
853             To use Dynamic Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class
854             that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
855             on the start of each thread that uses DHP GC. Before terminating the thread linked to DHP GC it is necessary to call
856             \ref cds_threading "cds::threading::Manager::detachThread()".
857
858             The ThreadGC object maintains two list:
859             \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
860             \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
861             Free guard list is a subset of thread guard list.
862         */
863         class ThreadGC
864         {
865             GarbageCollector&   m_gc    ;   ///< reference to GC singleton
866             details::guard_data *    m_pList ;   ///< Local list of guards owned by the thread
867             details::guard_data *    m_pFree ;   ///< The list of free guard from m_pList
868
869         public:
870             /// Default constructor
871             ThreadGC()
872                 : m_gc( GarbageCollector::instance() )
873                 , m_pList( nullptr )
874                 , m_pFree( nullptr )
875             {}
876
877             /// The object is not copy-constructible
878             ThreadGC( ThreadGC const& ) = delete;
879
880             /// Dtor calls fini()
881             ~ThreadGC()
882             {
883                 fini();
884             }
885
886             /// Initialization. Repeat call is available
887             void init()
888             {
889                 if ( !m_pList ) {
890                     m_pList =
891                         m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
892                 }
893             }
894
895             /// Finalization. Repeat call is available
896             void fini()
897             {
898                 if ( m_pList ) {
899                     m_gc.freeGuardList( m_pList );
900                     m_pList =
901                         m_pFree = nullptr;
902                 }
903             }
904
905         public:
906             /// Initializes guard \p g
907             void allocGuard( dhp::details::guard& g )
908             {
909                 assert( m_pList != nullptr );
910                 if ( !g.m_pGuard ) {
911                     if ( m_pFree ) {
912                         g.m_pGuard = m_pFree;
913                         m_pFree = m_pFree->pNextFree.load( atomics::memory_order_relaxed );
914                     }
915                     else {
916                         g.m_pGuard = m_gc.allocGuard();
917                         g.m_pGuard->pThreadNext = m_pList;
918                         m_pList = g.m_pGuard;
919                     }
920                 }
921             }
922
923             /// Frees guard \p g
924             void freeGuard( dhp::details::guard& g )
925             {
926                 assert( m_pList != nullptr );
927                 if ( g.m_pGuard ) {
928                     g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
929                     g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
930                     m_pFree = g.m_pGuard;
931                     g.m_pGuard = nullptr;
932                 }
933             }
934
935             /// Initializes guard array \p arr
936             template <size_t Count>
937             void allocGuard( GuardArray<Count>& arr )
938             {
939                 assert( m_pList != nullptr );
940                 size_t nCount = 0;
941
942                 while ( m_pFree && nCount < Count ) {
943                     arr[nCount].set_guard( m_pFree );
944                     m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
945                     ++nCount;
946                 }
947
948                 while ( nCount < Count ) {
949                     details::guard& g = arr[nCount++];
950                     g.set_guard( m_gc.allocGuard() );
951                     g.get_guard()->pThreadNext = m_pList;
952                     m_pList = g.get_guard();
953                 }
954             }
955
956             /// Frees guard array \p arr
957             template <size_t Count>
958             void freeGuard( GuardArray<Count>& arr )
959             {
960                 assert( m_pList != nullptr );
961
962                 details::guard_data * pGuard;
963                 for ( size_t i = 0; i < Count - 1; ++i ) {
964                     pGuard = arr[i].get_guard();
965                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
966                     pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
967                 }
968                 pGuard = arr[Count-1].get_guard();
969                 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
970                 pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
971                 m_pFree = arr[0].get_guard();
972             }
973
974             /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
975             template <typename T>
976             void retirePtr( T * p, void (* pFunc)(T *) )
977             {
978                 m_gc.retirePtr( p, pFunc );
979             }
980
981             void scan()
982             {
983                 m_gc.scan();
984             }
985         };
986
987         //////////////////////////////////////////////////////////
988         // Inlines
989
990         inline Guard::Guard(ThreadGC& gc)
991             : m_gc( gc )
992         {
993             getGC().allocGuard( *this );
994         }
995         inline Guard::~Guard()
996         {
997             getGC().freeGuard( *this );
998         }
999
1000         template <size_t Count>
1001         inline GuardArray<Count>::GuardArray( ThreadGC& gc )
1002             : m_gc( gc )
1003         {
1004             getGC().allocGuard( *this );
1005         }
1006         template <size_t Count>
1007         inline GuardArray<Count>::~GuardArray()
1008         {
1009             getGC().freeGuard( *this );
1010         }
1011
1012     }   // namespace dhp
1013 }}  // namespace cds::gc
1014 //@endcond
1015
1016 #if CDS_COMPILER == CDS_COMPILER_MSVC
1017 #   pragma warning(pop)
1018 #endif
1019
1020 #endif // #ifndef __CDS_GC_DETAILS_DHP_H