Fix typos, no functional changes
[libcds.git] / cds / gc / details / dhp.h
1 //$$CDS-header$$
2
3 #ifndef CDSLIB_GC_DETAILS_DHP_H
4 #define CDSLIB_GC_DETAILS_DHP_H
5
6 #include <mutex>        // unique_lock
7 #include <cds/algo/atomic.h>
8 #include <cds/gc/details/retired_ptr.h>
9 #include <cds/details/aligned_allocator.h>
10 #include <cds/details/allocator.h>
11 #include <cds/sync/spinlock.h>
12
13 #if CDS_COMPILER == CDS_COMPILER_MSVC
14 #   pragma warning(push)
15 #   pragma warning(disable:4251)    // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
16 #endif
17
18 //@cond
19 namespace cds { namespace gc {
20
21     /// Dynamic Hazard Pointer reclamation schema
22     /**
23         The cds::gc::dhp namespace and its members are internal representation of the GC and should not be used directly.
24         Use cds::gc::DHP class in your code.
25
26         Dynamic Hazard Pointer (DHP) garbage collector is a singleton. The main user-level part of DHP schema is
27         GC class and its nested classes. Before use any DHP-related class you must initialize DHP garbage collector
28         by contructing cds::gc::DHP object in beginning of your main().
29         See cds::gc::DHP class for explanation.
30
31         \par Implementation issues
32             The global list of free guards (\p cds::gc::dhp::details::guard_allocator) is protected by a spin-lock (i.e. serialized).
33             It seems that this solution should not introduce significant performance bottleneck, because each thread has its own set
34             of guards allocated from the global list of free guards and the access to the global list is occurred only when
35             all thread's guard is busy. In this case the thread allocates a next block of guards from the global list.
36             Guards allocated for the thread is push back to the global list only when the thread terminates.
37     */
38     namespace dhp {
39
40         // Forward declarations
41         class Guard;
42         template <size_t Count> class GuardArray;
43         class ThreadGC;
44         class GarbageCollector;
45
46         /// Retired pointer type
47         typedef cds::gc::details::retired_ptr retired_ptr;
48
49         using cds::gc::details::free_retired_ptr_func;
50
51         /// Details of Dynamic Hazard Pointer algorithm
52         namespace details {
53
54             // Forward declaration
55             class liberate_set;
56
57             /// Retired pointer buffer node
58             struct retired_ptr_node {
59                 retired_ptr         m_ptr   ;   ///< retired pointer
60                 retired_ptr_node *  m_pNext     ;   ///< next retired pointer in buffer
61                 retired_ptr_node *  m_pNextFree ;   ///< next item in free list of retired_ptr_node
62             };
63
64             /// Internal guard representation
65             struct guard_data {
66                 typedef void * guarded_ptr;  ///< type of value guarded
67
68                 atomics::atomic<guarded_ptr>  pPost;       ///< pointer guarded
69                 atomics::atomic<guard_data *> pGlobalNext; ///< next item of global list of allocated guards
70                 atomics::atomic<guard_data *> pNextFree;   ///< pointer to the next item in global or thread-local free-list
71
72                 guard_data * pThreadNext; ///< next item of thread's local list of guards
73
74                 guard_data() CDS_NOEXCEPT
75                     : pPost( nullptr )
76                     , pGlobalNext( nullptr )
77                     , pNextFree( nullptr )
78                     , pThreadNext( nullptr )
79                 {}
80
81                 void init() CDS_NOEXCEPT
82                 {
83                     pPost.store( nullptr, atomics::memory_order_relaxed );
84                 }
85
86                 /// Checks if the guard is free, that is, it does not contain any pointer guarded
87                 bool isFree() const CDS_NOEXCEPT
88                 {
89                     return pPost.load( atomics::memory_order_acquire ) == nullptr;
90                 }
91             };
92
93             /// Guard allocator
94             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
95             class guard_allocator
96             {
97                 cds::details::Allocator<details::guard_data>  m_GuardAllocator    ;   ///< guard allocator
98
99                 atomics::atomic<guard_data *>  m_GuardList;     ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
100                 atomics::atomic<guard_data *>  m_FreeGuardList; ///< Head of free guard list (linked by guard_data::pNextFree field)
101                 cds::sync::spin                m_freeListLock;  ///< Access to m_FreeGuardList
102
103                 /*
104                     Unfortunately, access to the list of free guard is lock-based.
105                     Lock-free manipulations with guard free-list are ABA-prone.
106                     TODO: working with m_FreeGuardList in lock-free manner.
107                 */
108
109             private:
110                 /// Allocates new guard from the heap. The function uses aligned allocator
111                 guard_data * allocNew()
112                 {
113                     //TODO: the allocator should make block allocation
114
115                     details::guard_data * pGuard = m_GuardAllocator.New();
116
117                     // Link guard to the list
118                     // m_GuardList is an accumulating list and it cannot support concurrent deletion,
119                     // so, ABA problem is impossible for it
120                     details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
121                     do {
122                         pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
123                         // pHead is changed by compare_exchange_weak
124                     } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
125
126                     pGuard->init();
127                     return pGuard;
128                 }
129
130             public:
131                 // Default ctor
132                 guard_allocator() CDS_NOEXCEPT
133                     : m_GuardList( nullptr )
134                     , m_FreeGuardList( nullptr )
135                 {}
136
137                 // Destructor
138                 ~guard_allocator()
139                 {
140                     guard_data * pNext;
141                     for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
142                         pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
143                         m_GuardAllocator.Delete( pData );
144                     }
145                 }
146
147                 /// Allocates a guard from free list or from heap if free list is empty
148                 guard_data * alloc()
149                 {
150                     // Try to pop a guard from free-list
151                     details::guard_data * pGuard;
152
153                     {
154                         std::unique_lock<cds::sync::spin> al( m_freeListLock );
155                         pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
156                         if ( pGuard )
157                             m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
158                     }
159                     if ( !pGuard )
160                         return allocNew();
161
162                     pGuard->init();
163                     return pGuard;
164                 }
165
166                 /// Frees guard \p pGuard
167                 /**
168                     The function places the guard \p pGuard into free-list
169                 */
170                 void free( guard_data * pGuard ) CDS_NOEXCEPT
171                 {
172                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
173
174                     std::unique_lock<cds::sync::spin> al( m_freeListLock );
175                     pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
176                     m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
177                 }
178
179                 /// Allocates list of guard
180                 /**
181                     The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
182
183                     cds::gc::dhp::ThreadGC supporting method
184                 */
185                 guard_data * allocList( size_t nCount )
186                 {
187                     assert( nCount != 0 );
188
189                     guard_data * pHead;
190                     guard_data * pLast;
191
192                     pHead =
193                         pLast = alloc();
194
195                     // The guard list allocated is private for the thread,
196                     // so, we can use relaxed memory order
197                     while ( --nCount ) {
198                         guard_data * p = alloc();
199                         pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
200                         pLast = p;
201                     }
202
203                     pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
204
205                     return pHead;
206                 }
207
208                 /// Frees list of guards
209                 /**
210                     The list \p pList is linked by guard's \p pThreadNext field.
211
212                     cds::gc::dhp::ThreadGC supporting method
213                 */
214                 void freeList( guard_data * pList ) CDS_NOEXCEPT
215                 {
216                     assert( pList != nullptr );
217
218                     guard_data * pLast = pList;
219                     while ( pLast->pThreadNext ) {
220                         pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
221                         guard_data * p;
222                         pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
223                         pLast = p;
224                     }
225
226                     std::unique_lock<cds::sync::spin> al( m_freeListLock );
227                     pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
228                     m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
229                 }
230
231                 /// Returns the list's head of guards allocated
232                 guard_data * begin() CDS_NOEXCEPT
233                 {
234                     return m_GuardList.load(atomics::memory_order_acquire);
235                 }
236             };
237
238             /// Retired pointer buffer
239             /**
240                 The buffer of retired nodes ready for liberating.
241                 When size of buffer exceeds a threshold the GC calls \p scan() procedure to free
242                 retired nodes.
243             */
244             class retired_ptr_buffer
245             {
246                 atomics::atomic<retired_ptr_node *>  m_pHead     ;   ///< head of buffer
247                 atomics::atomic<size_t>              m_nItemCount;   ///< buffer's item count
248
249             public:
250                 retired_ptr_buffer() CDS_NOEXCEPT
251                     : m_pHead( nullptr )
252                     , m_nItemCount(0)
253                 {}
254
255                 ~retired_ptr_buffer() CDS_NOEXCEPT
256                 {
257                     assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
258                 }
259
260                 /// Pushes new node into the buffer. Returns current buffer size
261                 size_t push( retired_ptr_node& node ) CDS_NOEXCEPT
262                 {
263                     retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
264                     do {
265                         node.m_pNext = pHead;
266                         // pHead is changed by compare_exchange_weak
267                     } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
268
269                     return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
270                 }
271
272                 /// Pushes [pFirst, pLast] list linked by pNext field.
273                 size_t push_list( retired_ptr_node* pFirst, retired_ptr_node* pLast, size_t nSize )
274                 {
275                     assert( pFirst );
276                     assert( pLast );
277
278                     retired_ptr_node * pHead = m_pHead.load( atomics::memory_order_acquire );
279                     do {
280                         pLast->m_pNext = pHead;
281                         // pHead is changed by compare_exchange_weak
282                     } while ( !m_pHead.compare_exchange_weak( pHead, pFirst, atomics::memory_order_release, atomics::memory_order_relaxed ) );
283
284                     return m_nItemCount.fetch_add( nSize, atomics::memory_order_relaxed ) + 1;
285                 }
286
287                 /// Result of \ref dhp_gc_privatve "privatize" function.
288                 /**
289                     The \p privatize function returns retired node list as \p first and the size of that list as \p second.
290                 */
291                 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
292
293                 /// Gets current list of retired pointer and clears the list
294                 /**@anchor dhp_gc_privatve
295                 */
296                 privatize_result privatize() CDS_NOEXCEPT
297                 {
298                     privatize_result res;
299                     res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
300
301                     // Item counter is needed only as a threshold for \p scan() function
302                     // So, we may clear the item counter without synchronization with m_pHead
303                     res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
304                     return res;
305                 }
306
307                 /// Returns current size of buffer (approximate)
308                 size_t size() const CDS_NOEXCEPT
309                 {
310                     return m_nItemCount.load(atomics::memory_order_relaxed);
311                 }
312             };
313
314             /// Pool of retired pointers
315             /**
316                 The class acts as an allocator of retired node.
317                 Retired pointers are linked in the lock-free list.
318             */
319             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
320             class retired_ptr_pool {
321                 /// Pool item
322                 typedef retired_ptr_node    item;
323
324                 /// Count of items in block
325                 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
326
327                 /// Pool block
328                 struct block {
329                     block *     pNext;  ///< next block
330                     item        items[m_nItemPerBlock];   ///< item array
331                 };
332
333                 atomics::atomic<block *> m_pBlockListHead;   ///< head of of allocated block list
334
335                 // To solve ABA problem we use epoch-based approach
336                 static const unsigned int c_nEpochCount = 4;    ///< Max epoch count
337                 atomics::atomic<unsigned int> m_nCurEpoch;      ///< Current epoch
338                 atomics::atomic<item *>  m_pEpochFree[c_nEpochCount];   ///< List of free item per epoch
339                 atomics::atomic<item *>  m_pGlobalFreeHead;     ///< Head of unallocated item list
340
341                 cds::details::Allocator< block, Alloc > m_BlockAllocator    ;   ///< block allocator
342
343             private:
344                 void allocNewBlock()
345                 {
346                     // allocate new block
347                     block * pNew = m_BlockAllocator.New();
348
349                     // link items within the block
350                     item * pLastItem = pNew->items + m_nItemPerBlock - 1;
351                     for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
352                         pItem->m_pNextFree = pItem + 1;
353                         CDS_STRICT_DO( pItem->m_pNext = nullptr );
354                     }
355
356                     // links new block to the block list
357                     {
358                         block * pHead = m_pBlockListHead.load(atomics::memory_order_acquire);
359                         do {
360                             pNew->pNext = pHead;
361                             // pHead is changed by compare_exchange_weak
362                         } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_relaxed ));
363                     }
364
365                     // links block's items to the free list
366                     {
367                         item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_acquire);
368                         do {
369                             pLastItem->m_pNextFree = pHead;
370                             // pHead is changed by compare_exchange_weak
371                         } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
372                     }
373                 }
374
375                 unsigned int current_epoch() const CDS_NOEXCEPT
376                 {
377                     return m_nCurEpoch.load(atomics::memory_order_acquire) & (c_nEpochCount - 1);
378                 }
379
380                 unsigned int next_epoch() const CDS_NOEXCEPT
381                 {
382                     return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & (c_nEpochCount - 1);
383                 }
384
385             public:
386                 retired_ptr_pool()
387                     : m_pBlockListHead( nullptr )
388                     , m_nCurEpoch(0)
389                     , m_pGlobalFreeHead( nullptr )
390                 {
391                     for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i )
392                         m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
393
394                     allocNewBlock();
395                 }
396
397                 ~retired_ptr_pool()
398                 {
399                     block * p;
400                     for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
401                         p = pBlock->pNext;
402                         m_BlockAllocator.Delete( pBlock );
403                     }
404                 }
405
406                 /// Increments current epoch
407                 void inc_epoch() CDS_NOEXCEPT
408                 {
409                     m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
410                 }
411
412                 /// Allocates the new retired pointer
413                 retired_ptr_node&  alloc()
414                 {
415                     unsigned int nEpoch;
416                     item * pItem;
417                     for (;;) {
418                         pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
419                         if ( !pItem )
420                             goto retry;
421                         if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
422                             goto success;
423                     }
424
425                     // Epoch free list is empty
426                     // Alloc from global free list
427                 retry:
428                     pItem = m_pGlobalFreeHead.load( atomics::memory_order_acquire );
429                     do {
430                         if ( !pItem ) {
431                             allocNewBlock();
432                             goto retry;
433                         }
434                         // pItem is changed by compare_exchange_weak
435                     } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ));
436
437                 success:
438                     CDS_STRICT_DO( pItem->m_pNextFree = nullptr );
439                     return *pItem;
440                 }
441
442                 /// Allocates and initializes new retired pointer
443                 retired_ptr_node& alloc( const retired_ptr& p )
444                 {
445                     retired_ptr_node& node = alloc();
446                     node.m_ptr = p;
447                     return node;
448                 }
449
450                 /// Places the list [pHead, pTail] of retired pointers to pool (frees retired pointers)
451                 /**
452                     The list is linked on the m_pNextFree field
453                 */
454                 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail ) CDS_NOEXCEPT
455                 {
456                     assert( pHead != nullptr );
457                     assert( pTail != nullptr );
458
459                     unsigned int nEpoch;
460                     item * pCurHead;
461                     do {
462                         pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
463                         pTail->m_pNextFree = pCurHead;
464                     } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
465                 }
466             };
467
468             /// Uninitialized guard
469             class guard
470             {
471                 friend class dhp::ThreadGC;
472             protected:
473                 details::guard_data * m_pGuard ;    ///< Pointer to guard data
474
475             public:
476                 /// Initialize empty guard.
477                 CDS_CONSTEXPR guard() CDS_NOEXCEPT
478                     : m_pGuard( nullptr )
479                 {}
480
481                 /// Copy-ctor is disabled
482                 guard( guard const& ) = delete;
483
484                 /// Move-ctor is disabled
485                 guard( guard&& ) = delete;
486
487                 /// Object destructor, does nothing
488                 ~guard() CDS_NOEXCEPT
489                 {}
490
491                 /// Get current guarded pointer
492                 void * get( atomics::memory_order order = atomics::memory_order_acquire ) const CDS_NOEXCEPT
493                 {
494                     assert( m_pGuard != nullptr );
495                     return m_pGuard->pPost.load( order );
496                 }
497
498                 /// Guards pointer \p p
499                 void set( void * p, atomics::memory_order order = atomics::memory_order_release ) CDS_NOEXCEPT
500                 {
501                     assert( m_pGuard != nullptr );
502                     m_pGuard->pPost.store( p, order );
503                 }
504
505                 /// Clears the guard
506                 void clear( atomics::memory_order order = atomics::memory_order_relaxed ) CDS_NOEXCEPT
507                 {
508                     assert( m_pGuard != nullptr );
509                     m_pGuard->pPost.store( nullptr, order );
510                 }
511
512                 /// Guards pointer \p p
513                 template <typename T>
514                 T * operator =(T * p) CDS_NOEXCEPT
515                 {
516                     set( reinterpret_cast<void *>( const_cast<T *>(p) ));
517                     return p;
518                 }
519
520                 std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
521                 {
522                     clear();
523                     return nullptr;
524                 }
525
526             public: // for ThreadGC.
527                 /*
528                     GCC cannot compile code for template versions of ThreadGC::allocGuard/freeGuard,
529                     the compiler produces error: \91cds::gc::dhp::details::guard_data* cds::gc::dhp::details::guard::m_pGuard\92 is protected
530                     despite the fact that ThreadGC is declared as friend for guard class.
531                     Therefore, we have to add set_guard/get_guard public functions
532                 */
533                 /// Set guard data
534                 void set_guard( details::guard_data * pGuard ) CDS_NOEXCEPT
535                 {
536                     assert( m_pGuard == nullptr );
537                     m_pGuard = pGuard;
538                 }
539
540                 /// Get current guard data
541                 details::guard_data * get_guard() CDS_NOEXCEPT
542                 {
543                     return m_pGuard;
544                 }
545                 /// Get current guard data
546                 details::guard_data * get_guard() const CDS_NOEXCEPT
547                 {
548                     return m_pGuard;
549                 }
550
551                 details::guard_data * release_guard() CDS_NOEXCEPT
552                 {
553                     details::guard_data * p = m_pGuard;
554                     m_pGuard = nullptr;
555                     return p;
556                 }
557
558                 bool is_initialized() const
559                 {
560                     return m_pGuard != nullptr;
561                 }
562             };
563
564         } // namespace details
565
566         /// Guard
567         /**
568             This class represents auto guard: ctor allocates a guard from guard pool,
569             dtor returns the guard back to the pool of free guard.
570         */
571         class Guard: public details::guard
572         {
573             typedef details::guard    base_class;
574             friend class ThreadGC;
575         public:
576             /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
577             Guard(); // inline in dhp_impl.h
578
579             /// Returns guard allocated back to pool of free guards
580             ~Guard();    // inline in dhp_impl.h
581
582             /// Guards pointer \p p
583             template <typename T>
584             T * operator =(T * p) CDS_NOEXCEPT
585             {
586                 return base_class::operator =<T>( p );
587             }
588
589             std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
590             {
591                 return base_class::operator =(nullptr);
592             }
593         };
594
595         /// Array of guards
596         /**
597             This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
598             dtor returns the guards allocated back to the pool.
599         */
600         template <size_t Count>
601         class GuardArray
602         {
603             details::guard      m_arr[Count]    ;    ///< array of guard
604             const static size_t c_nCapacity = Count ;   ///< Array capacity (equal to \p Count template parameter)
605
606         public:
607             /// Rebind array for other size \p OtherCount
608             template <size_t OtherCount>
609             struct rebind {
610                 typedef GuardArray<OtherCount>  other   ;   ///< rebinding result
611             };
612
613         public:
614             /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
615             GuardArray();    // inline in dhp_impl.h
616
617             /// The object is not copy-constructible
618             GuardArray( GuardArray const& ) = delete;
619
620             /// The object is not move-constructible
621             GuardArray( GuardArray&& ) = delete;
622
623             /// Returns guards allocated back to pool
624             ~GuardArray();    // inline in dh_impl.h
625
626             /// Returns the capacity of array
627             CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
628             {
629                 return c_nCapacity;
630             }
631
632             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
633             details::guard& operator []( size_t nIndex ) CDS_NOEXCEPT
634             {
635                 assert( nIndex < capacity() );
636                 return m_arr[nIndex];
637             }
638
639             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
640             const details::guard& operator []( size_t nIndex ) const CDS_NOEXCEPT
641             {
642                 assert( nIndex < capacity() );
643                 return m_arr[nIndex];
644             }
645
646             /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
647             template <typename T>
648             void set( size_t nIndex, T * p ) CDS_NOEXCEPT
649             {
650                 assert( nIndex < capacity() );
651                 m_arr[nIndex].set( p );
652             }
653
654             /// Clears (sets to \p nullptr) the guard \p nIndex
655             void clear( size_t nIndex ) CDS_NOEXCEPT
656             {
657                 assert( nIndex < capacity() );
658                 m_arr[nIndex].clear();
659             }
660
661             /// Clears all guards in the array
662             void clearAll() CDS_NOEXCEPT
663             {
664                 for ( size_t i = 0; i < capacity(); ++i )
665                     clear(i);
666             }
667         };
668
669         /// Memory manager (Garbage collector)
670         class CDS_EXPORT_API GarbageCollector
671         {
672         private:
673             friend class ThreadGC;
674
675             /// Internal GC statistics
676             struct internal_stat
677             {
678                 atomics::atomic<size_t>  m_nGuardCount       ;   ///< Total guard count
679                 atomics::atomic<size_t>  m_nFreeGuardCount   ;   ///< Count of free guard
680
681                 internal_stat()
682                     : m_nGuardCount(0)
683                     , m_nFreeGuardCount(0)
684                 {}
685             };
686
687         public:
688             /// Exception "No GarbageCollector object is created"
689             class not_initialized : public std::runtime_error
690             {
691             public:
692                 //@cond
693                 not_initialized()
694                     : std::runtime_error( "Global DHP GarbageCollector is not initialized" )
695                 {}
696                 //@endcond
697             };
698
699             /// Internal GC statistics
700             struct InternalState
701             {
702                 size_t m_nGuardCount       ;   ///< Total guard count
703                 size_t m_nFreeGuardCount   ;   ///< Count of free guard
704
705                 //@cond
706                 InternalState()
707                     : m_nGuardCount(0)
708                     , m_nFreeGuardCount(0)
709                 {}
710
711                 InternalState& operator =( internal_stat const& s )
712                 {
713                     m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
714                     m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
715
716                     return *this;
717                 }
718                 //@endcond
719             };
720
721         private:
722             static GarbageCollector * m_pManager    ;   ///< GC global instance
723
724             details::guard_allocator<>      m_GuardPool         ;   ///< Guard pool
725             details::retired_ptr_pool<>     m_RetiredAllocator  ;   ///< Pool of free retired pointers
726             details::retired_ptr_buffer     m_RetiredBuffer     ;   ///< Retired pointer buffer for liberating
727
728             atomics::atomic<size_t>      m_nLiberateThreshold;   ///< Max size of retired pointer buffer to call \p scan()
729             const size_t    m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
730
731             internal_stat   m_stat  ;   ///< Internal statistics
732             bool            m_bStatEnabled  ;   ///< Internal Statistics enabled
733
734         public:
735             /// Initializes DHP memory manager singleton
736             /**
737                 This member function creates and initializes DHP global object.
738                 The function should be called before using CDS data structure based on cds::gc::DHP GC. Usually,
739                 this member function is called in the \p main() function. See cds::gc::dhp for example.
740                 After calling of this function you may use CDS data structures based on cds::gc::DHP.
741
742                 \par Parameters
743                 \li \p nLiberateThreshold - \p scan() threshold. When count of retired pointers reaches this value,
744                     the \ref dhp_gc_liberate "scan()" member function would be called for freeing retired pointers.
745                     If \p nLiberateThreshold <= 1, \p scan() would called after each \ref dhp_gc_retirePtr "retirePtr" call.
746                 \li \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
747                     is initialized the GC allocates local guard pool for the thread from common guard pool.
748                     By perforce the local thread's guard pool is grown automatically from common pool.
749                     When the thread terminated its guard pool is backed to common GC's pool.
750
751             */
752             static void CDS_STDCALL Construct(
753                 size_t nLiberateThreshold = 1024
754                 , size_t nInitialThreadGuardCount = 8
755             );
756
757             /// Destroys DHP memory manager
758             /**
759                 The member function destroys DHP global object. After calling of this function you may \b NOT
760                 use CDS data structures based on cds::gc::DHP. Usually, the \p Destruct function is called
761                 at the end of your \p main(). See cds::gc::dhp for example.
762             */
763             static void CDS_STDCALL Destruct();
764
765             /// Returns pointer to GarbageCollector instance
766             /**
767                 If DHP GC is not initialized, \p not_initialized exception is thrown
768             */
769             static GarbageCollector&   instance()
770             {
771                 if ( m_pManager == nullptr )
772                     throw not_initialized();
773                 return *m_pManager;
774             }
775
776             /// Checks if global GC object is constructed and may be used
777             static bool isUsed() CDS_NOEXCEPT
778             {
779                 return m_pManager != nullptr;
780             }
781
782         public:
783             //@{
784             /// Internal interface
785
786             /// Allocates a guard
787             details::guard_data * allocGuard()
788             {
789                 return m_GuardPool.alloc();
790             }
791
792             /// Frees guard \p g for reusing in future
793             void freeGuard(details::guard_data * pGuard )
794             {
795                 m_GuardPool.free( pGuard );
796             }
797
798             /// Allocates guard list for a thread.
799             details::guard_data * allocGuardList( size_t nCount )
800             {
801                 return m_GuardPool.allocList( nCount );
802             }
803
804             /// Frees thread's guard list pointed by \p pList
805             void freeGuardList( details::guard_data * pList )
806             {
807                 m_GuardPool.freeList( pList );
808             }
809
810             /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
811             /**@anchor dhp_gc_retirePtr
812             */
813             template <typename T>
814             void retirePtr( T * p, void (* pFunc)(T *) )
815             {
816                 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
817             }
818
819             /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
820             void retirePtr( retired_ptr const& p )
821             {
822                 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
823                     scan();
824             }
825
826         protected:
827             /// Liberate function
828             /** @anchor dhp_gc_liberate
829                 The main function of Dynamic Hazard Pointer algorithm. It tries to free retired pointers if they are not
830                 trapped by any guard.
831             */
832             void scan();
833             //@}
834
835         public:
836             /// Get internal statistics
837             InternalState& getInternalState(InternalState& stat) const
838             {
839                 return stat = m_stat;
840             }
841
842             /// Checks if internal statistics enabled
843             bool              isStatisticsEnabled() const
844             {
845                 return m_bStatEnabled;
846             }
847
848             /// Enables/disables internal statistics
849             bool  enableStatistics( bool bEnable )
850             {
851                 bool bEnabled = m_bStatEnabled;
852                 m_bStatEnabled = bEnable;
853                 return bEnabled;
854             }
855
856         private:
857             GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount );
858             ~GarbageCollector();
859         };
860
861         /// Thread GC
862         /**
863             To use Dynamic Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class
864             that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
865             on the start of each thread that uses DHP GC. Before terminating the thread linked to DHP GC it is necessary to call
866             \ref cds_threading "cds::threading::Manager::detachThread()".
867
868             The ThreadGC object maintains two list:
869             \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
870             \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
871             Free guard list is a subset of thread guard list.
872         */
873         class ThreadGC
874         {
875             GarbageCollector&   m_gc    ;   ///< reference to GC singleton
876             details::guard_data *    m_pList ;   ///< Local list of guards owned by the thread
877             details::guard_data *    m_pFree ;   ///< The list of free guard from m_pList
878
879         public:
880             /// Default constructor
881             ThreadGC()
882                 : m_gc( GarbageCollector::instance() )
883                 , m_pList( nullptr )
884                 , m_pFree( nullptr )
885             {}
886
887             /// The object is not copy-constructible
888             ThreadGC( ThreadGC const& ) = delete;
889
890             /// Dtor calls fini()
891             ~ThreadGC()
892             {
893                 fini();
894             }
895
896             /// Initialization. Repeat call is available
897             void init()
898             {
899                 if ( !m_pList ) {
900                     m_pList =
901                         m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
902                 }
903             }
904
905             /// Finalization. Repeat call is available
906             void fini()
907             {
908                 if ( m_pList ) {
909                     m_gc.freeGuardList( m_pList );
910                     m_pList =
911                         m_pFree = nullptr;
912                 }
913             }
914
915         public:
916             /// Initializes guard \p g
917             void allocGuard( dhp::details::guard& g )
918             {
919                 assert( m_pList != nullptr );
920                 if ( !g.m_pGuard ) {
921                     if ( m_pFree ) {
922                         g.m_pGuard = m_pFree;
923                         m_pFree = m_pFree->pNextFree.load( atomics::memory_order_relaxed );
924                     }
925                     else {
926                         g.m_pGuard = m_gc.allocGuard();
927                         g.m_pGuard->pThreadNext = m_pList;
928                         m_pList = g.m_pGuard;
929                     }
930                 }
931             }
932
933             /// Frees guard \p g
934             void freeGuard( dhp::details::guard& g )
935             {
936                 assert( m_pList != nullptr );
937                 if ( g.m_pGuard ) {
938                     g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
939                     g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
940                     m_pFree = g.m_pGuard;
941                     g.m_pGuard = nullptr;
942                 }
943             }
944
945             /// Initializes guard array \p arr
946             template <size_t Count>
947             void allocGuard( GuardArray<Count>& arr )
948             {
949                 assert( m_pList != nullptr );
950                 size_t nCount = 0;
951
952                 while ( m_pFree && nCount < Count ) {
953                     arr[nCount].set_guard( m_pFree );
954                     m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
955                     ++nCount;
956                 }
957
958                 while ( nCount < Count ) {
959                     details::guard& g = arr[nCount++];
960                     g.set_guard( m_gc.allocGuard() );
961                     g.get_guard()->pThreadNext = m_pList;
962                     m_pList = g.get_guard();
963                 }
964             }
965
966             /// Frees guard array \p arr
967             template <size_t Count>
968             void freeGuard( GuardArray<Count>& arr )
969             {
970                 assert( m_pList != nullptr );
971
972                 details::guard_data * pGuard;
973                 for ( size_t i = 0; i < Count - 1; ++i ) {
974                     pGuard = arr[i].get_guard();
975                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
976                     pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
977                 }
978                 pGuard = arr[Count-1].get_guard();
979                 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
980                 pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
981                 m_pFree = arr[0].get_guard();
982             }
983
984             /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
985             template <typename T>
986             void retirePtr( T * p, void (* pFunc)(T *) )
987             {
988                 m_gc.retirePtr( p, pFunc );
989             }
990
991             /// Run retiring cycle
992             void scan()
993             {
994                 m_gc.scan();
995             }
996         };
997     }   // namespace dhp
998 }}  // namespace cds::gc
999 //@endcond
1000
1001 #if CDS_COMPILER == CDS_COMPILER_MSVC
1002 #   pragma warning(pop)
1003 #endif
1004
1005 #endif // #ifndef CDSLIB_GC_DETAILS_DHP_H