Improve cds::gc::DHP scan() performance
[libcds.git] / cds / gc / details / dhp.h
1 //$$CDS-header$$
2
3 #ifndef __CDS_GC_DETAILS_DHP_H
4 #define __CDS_GC_DETAILS_DHP_H
5
6 #include <mutex>        // unique_lock
7 #include <cds/algo/atomic.h>
8 #include <cds/gc/details/retired_ptr.h>
9 #include <cds/details/aligned_allocator.h>
10 #include <cds/details/allocator.h>
11 #include <cds/lock/spinlock.h>
12
13 #if CDS_COMPILER == CDS_COMPILER_MSVC
14 #   pragma warning(push)
15 #   pragma warning(disable:4251)    // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
16 #endif
17
18 //@cond
19 namespace cds { namespace gc {
20
21     /// Dynamic Hazard Pointer reclamation schema
22     /**
23         The cds::gc::dhp namespace and its members are internal representation of the GC and should not be used directly.
24         Use cds::gc::DHP class in your code.
25
26         Dynamic Hazard Pointer (DHP) garbage collector is a singleton. The main user-level part of DHP schema is
27         GC class and its nested classes. Before use any DHP-related class you must initialize DHP garbage collector
28         by contructing cds::gc::DHP object in beginning of your main().
29         See cds::gc::DHP class for explanation.
30
31         \par Implementation issues
32             The global list of free guards (\p cds::gc::dhp::details::guard_allocator) is protected by a spin-lock (i.e. serialized).
33             It seems that this solution should not introduce significant performance bottleneck, because each thread has its own set
34             of guards allocated from the global list of free guards and the access to the global list is occurred only when
35             all thread's guard is busy. In this case the thread allocates a next block of guards from the global list.
36             Guards allocated for the thread is push back to the global list only when the thread terminates.
37     */
38     namespace dhp {
39
40         // Forward declarations
41         class Guard;
42         template <size_t Count> class GuardArray;
43         class ThreadGC;
44         class GarbageCollector;
45
46         /// Retired pointer type
47         typedef cds::gc::details::retired_ptr retired_ptr;
48
49         using cds::gc::details::free_retired_ptr_func;
50
51         /// Details of Dynamic Hazard Pointer algorithm
52         namespace details {
53
54             // Forward declaration
55             class liberate_set;
56
57             /// Retired pointer buffer node
58             struct retired_ptr_node {
59                 retired_ptr         m_ptr   ;   ///< retired pointer
60                 retired_ptr_node *  m_pNext     ;   ///< next retired pointer in buffer
61                 retired_ptr_node *  m_pNextFree ;   ///< next item in free list of retired_ptr_node
62             };
63
64             /// Internal guard representation
65             struct guard_data {
66                 typedef void * guarded_ptr;  ///< type of value guarded
67
68                 atomics::atomic<guarded_ptr>  pPost;       ///< pointer guarded
69                 atomics::atomic<guard_data *> pGlobalNext; ///< next item of global list of allocated guards
70                 atomics::atomic<guard_data *> pNextFree;   ///< pointer to the next item in global or thread-local free-list
71
72                 guard_data * pThreadNext; ///< next item of thread's local list of guards
73
74                 guard_data() CDS_NOEXCEPT
75                     : pPost( nullptr )
76                     , pGlobalNext( nullptr )
77                     , pNextFree( nullptr )
78                     , pThreadNext( nullptr )
79                 {}
80
81                 void init() CDS_NOEXCEPT
82                 {
83                     pPost.store( nullptr, atomics::memory_order_relaxed );
84                 }
85
86                 /// Checks if the guard is free, that is, it does not contain any pointer guarded
87                 bool isFree() const CDS_NOEXCEPT
88                 {
89                     return pPost.load( atomics::memory_order_acquire ) == nullptr;
90                 }
91             };
92
93             /// Guard allocator
94             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
95             class guard_allocator
96             {
97                 cds::details::Allocator<details::guard_data>  m_GuardAllocator    ;   ///< guard allocator
98
99                 atomics::atomic<guard_data *>    m_GuardList ;       ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
100                 atomics::atomic<guard_data *>    m_FreeGuardList ;   ///< Head of free guard list (linked by guard_data::pNextFree field)
101                 SpinLock                m_freeListLock  ;   ///< Access to m_FreeGuardList
102
103                 /*
104                     Unfortunately, access to the list of free guard is lock-based.
105                     Lock-free manipulations with guard free-list are ABA-prone.
106                     TODO: working with m_FreeGuardList in lock-free manner.
107                 */
108
109             private:
110                 /// Allocates new guard from the heap. The function uses aligned allocator
111                 guard_data * allocNew()
112                 {
113                     //TODO: the allocator should make block allocation
114
115                     details::guard_data * pGuard = m_GuardAllocator.New();
116
117                     // Link guard to the list
118                     // m_GuardList is an accumulating list and it cannot support concurrent deletion,
119                     // so, ABA problem is impossible for it
120                     details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
121                     do {
122                         pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
123                         // pHead is changed by compare_exchange_weak
124                     } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
125
126                     pGuard->init();
127                     return pGuard;
128                 }
129
130             public:
131                 // Default ctor
132                 guard_allocator() CDS_NOEXCEPT
133                     : m_GuardList( nullptr )
134                     , m_FreeGuardList( nullptr )
135                 {}
136
137                 // Destructor
138                 ~guard_allocator()
139                 {
140                     guard_data * pNext;
141                     for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
142                         pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
143                         m_GuardAllocator.Delete( pData );
144                     }
145                 }
146
147                 /// Allocates a guard from free list or from heap if free list is empty
148                 guard_data * alloc()
149                 {
150                     // Try to pop a guard from free-list
151                     details::guard_data * pGuard;
152
153                     {
154                         std::unique_lock<SpinLock> al( m_freeListLock );
155                         pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
156                         if ( pGuard )
157                             m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
158                     }
159                     if ( !pGuard )
160                         return allocNew();
161
162                     pGuard->init();
163                     return pGuard;
164                 }
165
166                 /// Frees guard \p pGuard
167                 /**
168                     The function places the guard \p pGuard into free-list
169                 */
170                 void free( guard_data * pGuard ) CDS_NOEXCEPT
171                 {
172                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
173
174                     std::unique_lock<SpinLock> al( m_freeListLock );
175                     pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
176                     m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
177                 }
178
179                 /// Allocates list of guard
180                 /**
181                     The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
182
183                     cds::gc::dhp::ThreadGC supporting method
184                 */
185                 guard_data * allocList( size_t nCount )
186                 {
187                     assert( nCount != 0 );
188
189                     guard_data * pHead;
190                     guard_data * pLast;
191
192                     pHead =
193                         pLast = alloc();
194
195                     // The guard list allocated is private for the thread,
196                     // so, we can use relaxed memory order
197                     while ( --nCount ) {
198                         guard_data * p = alloc();
199                         pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
200                         pLast = p;
201                     }
202
203                     pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
204
205                     return pHead;
206                 }
207
208                 /// Frees list of guards
209                 /**
210                     The list \p pList is linked by guard's \p pThreadNext field.
211
212                     cds::gc::dhp::ThreadGC supporting method
213                 */
214                 void freeList( guard_data * pList ) CDS_NOEXCEPT
215                 {
216                     assert( pList != nullptr );
217
218                     guard_data * pLast = pList;
219                     while ( pLast->pThreadNext ) {
220                         pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
221                         guard_data * p;
222                         pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
223                         pLast = p;
224                     }
225
226                     std::unique_lock<SpinLock> al( m_freeListLock );
227                     pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
228                     m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
229                 }
230
231                 /// Returns the list's head of guards allocated
232                 guard_data * begin() CDS_NOEXCEPT
233                 {
234                     return m_GuardList.load(atomics::memory_order_acquire);
235                 }
236             };
237
238             /// Retired pointer buffer
239             /**
240                 The buffer of retired nodes ready for liberating.
241                 When size of buffer exceeds a threshold the GC calls \p scan() procedure to free
242                 retired nodes.
243             */
244             class retired_ptr_buffer
245             {
246                 atomics::atomic<retired_ptr_node *>  m_pHead     ;   ///< head of buffer
247                 atomics::atomic<size_t>              m_nItemCount;   ///< buffer's item count
248
249             public:
250                 retired_ptr_buffer() CDS_NOEXCEPT
251                     : m_pHead( nullptr )
252                     , m_nItemCount(0)
253                 {}
254
255                 ~retired_ptr_buffer() CDS_NOEXCEPT
256                 {
257                     assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
258                 }
259
260                 /// Pushes new node into the buffer. Returns current buffer size
261                 size_t push( retired_ptr_node& node ) CDS_NOEXCEPT
262                 {
263                     retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
264                     do {
265                         node.m_pNext = pHead;
266                         // pHead is changed by compare_exchange_weak
267                     } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
268
269                     return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
270                 }
271
272                 /// Pushes [pFirst, pLast] list linked by pNext field.
273                 size_t push_list( retired_ptr_node* pFirst, retired_ptr_node* pLast, size_t nSize )
274                 {
275                     assert( pFirst );
276                     assert( pLast );
277
278                     retired_ptr_node * pHead = m_pHead.load( atomics::memory_order_acquire );
279                     do {
280                         pLast->m_pNext = pHead;
281                         // pHead is changed by compare_exchange_weak
282                     } while ( !m_pHead.compare_exchange_weak( pHead, pFirst, atomics::memory_order_release, atomics::memory_order_relaxed ) );
283
284                     return m_nItemCount.fetch_add( nSize, atomics::memory_order_relaxed ) + 1;
285                 }
286
287                 /// Result of \ref dhp_gc_privatve "privatize" function.
288                 /**
289                     The \p privatize function returns retired node list as \p first and the size of that list as \p second.
290                 */
291                 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
292
293                 /// Gets current list of retired pointer and clears the list
294                 /**@anchor dhp_gc_privatve
295                 */
296                 privatize_result privatize() CDS_NOEXCEPT
297                 {
298                     privatize_result res;
299                     res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
300
301                     // Item counter is needed only as a threshold for \p scan() function
302                     // So, we may clear the item counter without synchronization with m_pHead
303                     res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
304                     return res;
305                 }
306
307                 /// Returns current size of buffer (approximate)
308                 size_t size() const CDS_NOEXCEPT
309                 {
310                     return m_nItemCount.load(atomics::memory_order_relaxed);
311                 }
312             };
313
314             /// Pool of retired pointers
315             /**
316                 The class acts as an allocator of retired node.
317                 Retired pointers are linked in the lock-free list.
318             */
319             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
320             class retired_ptr_pool {
321                 /// Pool item
322                 typedef retired_ptr_node    item;
323
324                 /// Count of items in block
325                 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
326
327                 /// Pool block
328                 struct block {
329                     block *     pNext;  ///< next block
330                     item        items[m_nItemPerBlock];   ///< item array
331                 };
332
333                 atomics::atomic<block *> m_pBlockListHead;   ///< head of of allocated block list
334
335                 // To solve ABA problem we use epoch-based approach
336                 static const unsigned int c_nEpochCount = 4;    ///< Max epoch count
337                 atomics::atomic<unsigned int> m_nCurEpoch;      ///< Current epoch
338                 atomics::atomic<item *>  m_pEpochFree[c_nEpochCount];   ///< List of free item per epoch
339                 atomics::atomic<item *>  m_pGlobalFreeHead;     ///< Head of unallocated item list
340
341                 cds::details::Allocator< block, Alloc > m_BlockAllocator    ;   ///< block allocator
342
343             private:
344                 void allocNewBlock()
345                 {
346                     // allocate new block
347                     block * pNew = m_BlockAllocator.New();
348
349                     // link items within the block
350                     item * pLastItem = pNew->items + m_nItemPerBlock - 1;
351                     for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
352                         pItem->m_pNextFree = pItem + 1;
353                         CDS_STRICT_DO( pItem->m_pNext = nullptr );
354                     }
355
356                     // links new block to the block list
357                     {
358                         block * pHead = m_pBlockListHead.load(atomics::memory_order_acquire);
359                         do {
360                             pNew->pNext = pHead;
361                             // pHead is changed by compare_exchange_weak
362                         } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_relaxed ));
363                     }
364
365                     // links block's items to the free list
366                     {
367                         item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_acquire);
368                         do {
369                             pLastItem->m_pNextFree = pHead;
370                             // pHead is changed by compare_exchange_weak
371                         } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
372                     }
373                 }
374
375                 unsigned int current_epoch() const CDS_NOEXCEPT
376                 {
377                     return m_nCurEpoch.load(atomics::memory_order_acquire) & (c_nEpochCount - 1);
378                 }
379
380                 unsigned int next_epoch() const CDS_NOEXCEPT
381                 {
382                     return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & (c_nEpochCount - 1);
383                 }
384
385             public:
386                 retired_ptr_pool()
387                     : m_pBlockListHead( nullptr )
388                     , m_nCurEpoch(0)
389                     , m_pGlobalFreeHead( nullptr )
390                 {
391                     for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i )
392                         m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
393
394                     allocNewBlock();
395                 }
396
397                 ~retired_ptr_pool()
398                 {
399                     block * p;
400                     for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
401                         p = pBlock->pNext;
402                         m_BlockAllocator.Delete( pBlock );
403                     }
404                 }
405
406                 /// Increments current epoch
407                 void inc_epoch() CDS_NOEXCEPT
408                 {
409                     m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
410                 }
411
412                 /// Allocates the new retired pointer
413                 retired_ptr_node&  alloc()
414                 {
415                     unsigned int nEpoch;
416                     item * pItem;
417                     for (;;) {
418                         pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
419                         if ( !pItem )
420                             goto retry;
421                         if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
422                             goto success;
423                     }
424
425                     // Epoch free list is empty
426                     // Alloc from global free list
427                 retry:
428                     pItem = m_pGlobalFreeHead.load( atomics::memory_order_acquire );
429                     do {
430                         if ( !pItem ) {
431                             allocNewBlock();
432                             goto retry;
433                         }
434                         // pItem is changed by compare_exchange_weak
435                     } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ));
436
437                 success:
438                     CDS_STRICT_DO( pItem->m_pNextFree = nullptr );
439                     return *pItem;
440                 }
441
442                 /// Allocates and initializes new retired pointer
443                 retired_ptr_node& alloc( const retired_ptr& p )
444                 {
445                     retired_ptr_node& node = alloc();
446                     node.m_ptr = p;
447                     return node;
448                 }
449
450                 /// Places the list [pHead, pTail] of retired pointers to pool (frees retired pointers)
451                 /**
452                     The list is linked on the m_pNextFree field
453                 */
454                 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail ) CDS_NOEXCEPT
455                 {
456                     assert( pHead != nullptr );
457                     assert( pTail != nullptr );
458
459                     unsigned int nEpoch;
460                     item * pCurHead;
461                     do {
462                         pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
463                         pTail->m_pNextFree = pCurHead;
464                     } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
465                 }
466             };
467
468             /// Uninitialized guard
469             class guard
470             {
471                 friend class dhp::ThreadGC;
472             protected:
473                 details::guard_data * m_pGuard ;    ///< Pointer to guard data
474
475             public:
476                 /// Initialize empty guard.
477                 CDS_CONSTEXPR guard() CDS_NOEXCEPT
478                     : m_pGuard( nullptr )
479                 {}
480
481                 /// Ã‘opy-ctor is disabled
482                 guard( guard const& ) = delete;
483
484                 /// Move-ctor is disabled
485                 guard( guard&& ) = delete;
486
487                 /// Object destructor, does nothing
488                 ~guard() CDS_NOEXCEPT
489                 {}
490
491                 /// Get current guarded pointer
492                 void * get( atomics::memory_order order = atomics::memory_order_acquire ) const CDS_NOEXCEPT
493                 {
494                     assert( m_pGuard != nullptr );
495                     return m_pGuard->pPost.load( order );
496                 }
497
498                 /// Guards pointer \p p
499                 void set( void * p, atomics::memory_order order = atomics::memory_order_release ) CDS_NOEXCEPT
500                 {
501                     assert( m_pGuard != nullptr );
502                     m_pGuard->pPost.store( p, order );
503                 }
504
505                 /// Clears the guard
506                 void clear( atomics::memory_order order = atomics::memory_order_relaxed ) CDS_NOEXCEPT
507                 {
508                     assert( m_pGuard != nullptr );
509                     m_pGuard->pPost.store( nullptr, order );
510                 }
511
512                 /// Guards pointer \p p
513                 template <typename T>
514                 T * operator =(T * p) CDS_NOEXCEPT
515                 {
516                     set( reinterpret_cast<void *>( const_cast<T *>(p) ));
517                     return p;
518                 }
519
520                 std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
521                 {
522                     clear();
523                     return nullptr;
524                 }
525
526             public: // for ThreadGC.
527                 /*
528                     GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard,
529                     the compiler produces error: \91cds::gc::dhp::details::guard_data* cds::gc::dhp::details::guard::m_pGuard\92 is protected
530                     despite the fact that ThreadGC is declared as friend for guard class.
531                     Therefore, we have to add set_guard/get_guard public functions
532                 */
533                 /// Set guard data
534                 void set_guard( details::guard_data * pGuard ) CDS_NOEXCEPT
535                 {
536                     assert( m_pGuard == nullptr );
537                     m_pGuard = pGuard;
538                 }
539
540                 /// Get current guard data
541                 details::guard_data * get_guard() CDS_NOEXCEPT
542                 {
543                     return m_pGuard;
544                 }
545                 /// Get current guard data
546                 details::guard_data * get_guard() const CDS_NOEXCEPT
547                 {
548                     return m_pGuard;
549                 }
550
551                 details::guard_data * release_guard() CDS_NOEXCEPT
552                 {
553                     details::guard_data * p = m_pGuard;
554                     m_pGuard = nullptr;
555                     return p;
556                 }
557
558                 bool is_initialized() const
559                 {
560                     return m_pGuard != nullptr;
561                 }
562             };
563
564         } // namespace details
565
566         /// Guard
567         /**
568             This class represents auto guard: ctor allocates a guard from guard pool,
569             dtor returns the guard back to the pool of free guard.
570         */
571         class Guard: public details::guard
572         {
573             typedef details::guard    base_class;
574             friend class ThreadGC;
575         public:
576             /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
577             Guard(); // inline in dhp_impl.h
578
579             /// Returns guard allocated back to pool of free guards
580             ~Guard();    // inline in dhp_impl.h
581
582             /// Guards pointer \p p
583             template <typename T>
584             T * operator =(T * p) CDS_NOEXCEPT
585             {
586                 return base_class::operator =<T>( p );
587             }
588
589             std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
590             {
591                 return base_class::operator =(nullptr);
592             }
593         };
594
595         /// Array of guards
596         /**
597             This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
598             dtor returns the guards allocated back to the pool.
599         */
600         template <size_t Count>
601         class GuardArray
602         {
603             details::guard      m_arr[Count]    ;    ///< array of guard
604             const static size_t c_nCapacity = Count ;   ///< Array capacity (equal to \p Count template parameter)
605
606         public:
607             /// Rebind array for other size \p OtherCount
608             template <size_t OtherCount>
609             struct rebind {
610                 typedef GuardArray<OtherCount>  other   ;   ///< rebinding result
611             };
612
613         public:
614             /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
615             GuardArray();    // inline in dhp_impl.h
616
617             /// The object is not copy-constructible
618             GuardArray( GuardArray const& ) = delete;
619
620             /// The object is not move-constructible
621             GuardArray( GuardArray&& ) = delete;
622
623             /// Returns guards allocated back to pool
624             ~GuardArray();    // inline in dh_impl.h
625
626             /// Returns the capacity of array
627             CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
628             {
629                 return c_nCapacity;
630             }
631
632             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
633             details::guard& operator []( size_t nIndex ) CDS_NOEXCEPT
634             {
635                 assert( nIndex < capacity() );
636                 return m_arr[nIndex];
637             }
638
639             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
640             const details::guard& operator []( size_t nIndex ) const CDS_NOEXCEPT
641             {
642                 assert( nIndex < capacity() );
643                 return m_arr[nIndex];
644             }
645
646             /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
647             template <typename T>
648             void set( size_t nIndex, T * p ) CDS_NOEXCEPT
649             {
650                 assert( nIndex < capacity() );
651                 m_arr[nIndex].set( p );
652             }
653
654             /// Clears (sets to \p nullptr) the guard \p nIndex
655             void clear( size_t nIndex ) CDS_NOEXCEPT
656             {
657                 assert( nIndex < capacity() );
658                 m_arr[nIndex].clear();
659             }
660
661             /// Clears all guards in the array
662             void clearAll() CDS_NOEXCEPT
663             {
664                 for ( size_t i = 0; i < capacity(); ++i )
665                     clear(i);
666             }
667         };
668
669         /// Memory manager (Garbage collector)
670         class CDS_EXPORT_API GarbageCollector
671         {
672         private:
673             friend class ThreadGC;
674
675             /// Internal GC statistics
676             struct internal_stat
677             {
678                 atomics::atomic<size_t>  m_nGuardCount       ;   ///< Total guard count
679                 atomics::atomic<size_t>  m_nFreeGuardCount   ;   ///< Count of free guard
680
681                 internal_stat()
682                     : m_nGuardCount(0)
683                     , m_nFreeGuardCount(0)
684                 {}
685             };
686
687         public:
688             /// Exception "No GarbageCollector object is created"
689             class not_initialized : public std::runtime_error
690             {
691             public:
692                 //@cond
693                 not_initialized()
694                     : std::runtime_error( "Global DHP GarbageCollector is not initialized" )
695                 {}
696                 //@endcond
697             };
698
699             /// Internal GC statistics
700             struct InternalState
701             {
702                 size_t m_nGuardCount       ;   ///< Total guard count
703                 size_t m_nFreeGuardCount   ;   ///< Count of free guard
704
705                 InternalState()
706                     : m_nGuardCount(0)
707                     , m_nFreeGuardCount(0)
708                 {}
709
710                 InternalState& operator =( internal_stat const& s )
711                 {
712                     m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
713                     m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
714
715                     return *this;
716                 }
717             };
718
719         private:
720             static GarbageCollector * m_pManager    ;   ///< GC global instance
721
722             details::guard_allocator<>      m_GuardPool         ;   ///< Guard pool
723             details::retired_ptr_pool<>     m_RetiredAllocator  ;   ///< Pool of free retired pointers
724             details::retired_ptr_buffer     m_RetiredBuffer     ;   ///< Retired pointer buffer for liberating
725
726             atomics::atomic<size_t>      m_nLiberateThreshold;   ///< Max size of retired pointer buffer to call \p scan()
727             const size_t    m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
728
729             internal_stat   m_stat  ;   ///< Internal statistics
730             bool            m_bStatEnabled  ;   ///< Internal Statistics enabled
731
732         public:
733             /// Initializes DHP memory manager singleton
734             /**
735                 This member function creates and initializes DHP global object.
736                 The function should be called before using CDS data structure based on cds::gc::DHP GC. Usually,
737                 this member function is called in the \p main() function. See cds::gc::dhp for example.
738                 After calling of this function you may use CDS data structures based on cds::gc::DHP.
739
740                 \par Parameters
741                 \li \p nLiberateThreshold - \p scan() threshold. When count of retired pointers reaches this value,
742                     the \ref dhp_gc_liberate "scan()" member function would be called for freeing retired pointers.
743                     If \p nLiberateThreshold <= 1, \p scan() would called after each \ref dhp_gc_retirePtr "retirePtr" call.
744                 \li \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
745                     is initialized the GC allocates local guard pool for the thread from common guard pool.
746                     By perforce the local thread's guard pool is grown automatically from common pool.
747                     When the thread terminated its guard pool is backed to common GC's pool.
748
749             */
750             static void CDS_STDCALL Construct(
751                 size_t nLiberateThreshold = 1024
752                 , size_t nInitialThreadGuardCount = 8
753             );
754
755             /// Destroys DHP memory manager
756             /**
757                 The member function destroys DHP global object. After calling of this function you may \b NOT
758                 use CDS data structures based on cds::gc::DHP. Usually, the \p Destruct function is called
759                 at the end of your \p main(). See cds::gc::dhp for example.
760             */
761             static void CDS_STDCALL Destruct();
762
763             /// Returns pointer to GarbageCollector instance
764             /**
765                 If DHP GC is not initialized, \p not_initialized exception is thrown
766             */
767             static GarbageCollector&   instance()
768             {
769                 if ( m_pManager == nullptr )
770                     throw not_initialized();
771                 return *m_pManager;
772             }
773
774             /// Checks if global GC object is constructed and may be used
775             static bool isUsed() CDS_NOEXCEPT
776             {
777                 return m_pManager != nullptr;
778             }
779
780         public:
781             //@{
782             /// Internal interface
783
784             /// Allocates a guard
785             details::guard_data * allocGuard()
786             {
787                 return m_GuardPool.alloc();
788             }
789
790             /// Frees guard \p g for reusing in future
791             void freeGuard(details::guard_data * pGuard )
792             {
793                 m_GuardPool.free( pGuard );
794             }
795
796             /// Allocates guard list for a thread.
797             details::guard_data * allocGuardList( size_t nCount )
798             {
799                 return m_GuardPool.allocList( nCount );
800             }
801
802             /// Frees thread's guard list pointed by \p pList
803             void freeGuardList( details::guard_data * pList )
804             {
805                 m_GuardPool.freeList( pList );
806             }
807
808             /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
809             /**@anchor dhp_gc_retirePtr
810             */
811             template <typename T>
812             void retirePtr( T * p, void (* pFunc)(T *) )
813             {
814                 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
815             }
816
817             /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
818             void retirePtr( retired_ptr const& p )
819             {
820                 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
821                     scan();
822             }
823
824         protected:
825             /// Liberate function
826             /** @anchor dhp_gc_liberate
827                 The main function of Dynamic Hazard Pointer algorithm. It tries to free retired pointers if they are not
828                 trapped by any guard.
829             */
830             void scan();
831             //@}
832
833         public:
834             /// Get internal statistics
835             InternalState& getInternalState(InternalState& stat) const
836             {
837                 return stat = m_stat;
838             }
839
840             /// Checks if internal statistics enabled
841             bool              isStatisticsEnabled() const
842             {
843                 return m_bStatEnabled;
844             }
845
846             /// Enables/disables internal statistics
847             bool  enableStatistics( bool bEnable )
848             {
849                 bool bEnabled = m_bStatEnabled;
850                 m_bStatEnabled = bEnable;
851                 return bEnabled;
852             }
853
854         private:
855             GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount );
856             ~GarbageCollector();
857         };
858
859         /// Thread GC
860         /**
861             To use Dynamic Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class
862             that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
863             on the start of each thread that uses DHP GC. Before terminating the thread linked to DHP GC it is necessary to call
864             \ref cds_threading "cds::threading::Manager::detachThread()".
865
866             The ThreadGC object maintains two list:
867             \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
868             \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
869             Free guard list is a subset of thread guard list.
870         */
871         class ThreadGC
872         {
873             GarbageCollector&   m_gc    ;   ///< reference to GC singleton
874             details::guard_data *    m_pList ;   ///< Local list of guards owned by the thread
875             details::guard_data *    m_pFree ;   ///< The list of free guard from m_pList
876
877         public:
878             /// Default constructor
879             ThreadGC()
880                 : m_gc( GarbageCollector::instance() )
881                 , m_pList( nullptr )
882                 , m_pFree( nullptr )
883             {}
884
885             /// The object is not copy-constructible
886             ThreadGC( ThreadGC const& ) = delete;
887
888             /// Dtor calls fini()
889             ~ThreadGC()
890             {
891                 fini();
892             }
893
894             /// Initialization. Repeat call is available
895             void init()
896             {
897                 if ( !m_pList ) {
898                     m_pList =
899                         m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
900                 }
901             }
902
903             /// Finalization. Repeat call is available
904             void fini()
905             {
906                 if ( m_pList ) {
907                     m_gc.freeGuardList( m_pList );
908                     m_pList =
909                         m_pFree = nullptr;
910                 }
911             }
912
913         public:
914             /// Initializes guard \p g
915             void allocGuard( dhp::details::guard& g )
916             {
917                 assert( m_pList != nullptr );
918                 if ( !g.m_pGuard ) {
919                     if ( m_pFree ) {
920                         g.m_pGuard = m_pFree;
921                         m_pFree = m_pFree->pNextFree.load( atomics::memory_order_relaxed );
922                     }
923                     else {
924                         g.m_pGuard = m_gc.allocGuard();
925                         g.m_pGuard->pThreadNext = m_pList;
926                         m_pList = g.m_pGuard;
927                     }
928                 }
929             }
930
931             /// Frees guard \p g
932             void freeGuard( dhp::details::guard& g )
933             {
934                 assert( m_pList != nullptr );
935                 if ( g.m_pGuard ) {
936                     g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
937                     g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
938                     m_pFree = g.m_pGuard;
939                     g.m_pGuard = nullptr;
940                 }
941             }
942
943             /// Initializes guard array \p arr
944             template <size_t Count>
945             void allocGuard( GuardArray<Count>& arr )
946             {
947                 assert( m_pList != nullptr );
948                 size_t nCount = 0;
949
950                 while ( m_pFree && nCount < Count ) {
951                     arr[nCount].set_guard( m_pFree );
952                     m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
953                     ++nCount;
954                 }
955
956                 while ( nCount < Count ) {
957                     details::guard& g = arr[nCount++];
958                     g.set_guard( m_gc.allocGuard() );
959                     g.get_guard()->pThreadNext = m_pList;
960                     m_pList = g.get_guard();
961                 }
962             }
963
964             /// Frees guard array \p arr
965             template <size_t Count>
966             void freeGuard( GuardArray<Count>& arr )
967             {
968                 assert( m_pList != nullptr );
969
970                 details::guard_data * pGuard;
971                 for ( size_t i = 0; i < Count - 1; ++i ) {
972                     pGuard = arr[i].get_guard();
973                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
974                     pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
975                 }
976                 pGuard = arr[Count-1].get_guard();
977                 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
978                 pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
979                 m_pFree = arr[0].get_guard();
980             }
981
982             /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
983             template <typename T>
984             void retirePtr( T * p, void (* pFunc)(T *) )
985             {
986                 m_gc.retirePtr( p, pFunc );
987             }
988
989             void scan()
990             {
991                 m_gc.scan();
992             }
993         };
994     }   // namespace dhp
995 }}  // namespace cds::gc
996 //@endcond
997
998 #if CDS_COMPILER == CDS_COMPILER_MSVC
999 #   pragma warning(pop)
1000 #endif
1001
1002 #endif // #ifndef __CDS_GC_DETAILS_DHP_H