rearrange cds/gc content
[libcds.git] / cds / gc / dhp / dhp.h
1 //$$CDS-header$$
2
3 #ifndef __CDS_GC_PTB_PASS_THE_BUCK_H
4 #define __CDS_GC_PTB_PASS_THE_BUCK_H
5
6 #include <mutex>        // unique_lock
7 #include <cds/cxx11_atomic.h>
8 #include <cds/gc/details/retired_ptr.h>
9 #include <cds/details/aligned_allocator.h>
10 #include <cds/details/allocator.h>
11 #include <cds/lock/spinlock.h>
12
13 #if CDS_COMPILER == CDS_COMPILER_MSVC
14 #   pragma warning(push)
15 #   pragma warning(disable:4251)    // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
16 #endif
17
18 namespace cds { namespace gc {
19
20     /// Pass The Buck reclamation schema
21     /**
22         \par Sources:
23         - [2002] M. Herlihy, V. Luchangco, and M. Moir. The repeat offender problem: A mechanism for supporting
24             dynamic-sized lockfree data structures. Technical Report TR-2002-112, Sun Microsystems Laboratories, 2002
25         - [2002] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Dynamic-sized Lockfree Data Structures.
26             Technical Report TR-2002-110, Sun Microsystems Laboratories, 2002
27         - [2005] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Nonblocking Memory Management Support
28             for Dynamic-Sized Data Structures. ACM Transactions on Computer Systems, Vol.23, No.2, May 2005
29
30
31         The cds::gc::ptb namespace and its members are internal representation of the Pass-the-Buck GC and should not be used directly.
32         Use cds::gc::PTB class in your code.
33
34         Pass-the-Buck (PTB) garbage collector is a singleton. The main user-level part of PTB schema is
35         GC class and its nested classes. Before use any PTB-related class you must initialize PTB garbage collector
36         by contructing cds::gc::PTB object in beginning of your main().
37         See cds::gc::PTB class for explanation.
38
39         \par Implementation issues
40             The global list of free guards (cds::gc::ptb::details::guard_allocator) is protected by spin-lock (i.e. serialized).
41             It seems that solution should not introduce significant performance bottleneck, because each thread has own set
42             of guards allocated from global list of free guards and access to global list is occurred only when
43             all thread's guard is busy. In this case the thread allocates next block of guards from global list.
44             Guards allocated for the thread is push back to the global list only when the thread terminates.
45     */
46     namespace ptb {
47
48         // Forward declarations
49         class Guard;
50         template <size_t Count> class GuardArray;
51         class ThreadGC;
52         class GarbageCollector;
53
54         /// Retired pointer type
55         typedef cds::gc::details::retired_ptr retired_ptr;
56
57         using cds::gc::details::free_retired_ptr_func;
58
59         /// Details of Pass the Buck algorithm
60         namespace details {
61
62             // Forward declaration
63             class liberate_set;
64
65             /// Retired pointer buffer node
66             struct retired_ptr_node {
67                 retired_ptr         m_ptr   ;   ///< retired pointer
68                 retired_ptr_node *  m_pNext     ;   ///< next retired pointer in buffer
69                 retired_ptr_node *  m_pNextFree ;   ///< next item in free list of retired_ptr_node
70             };
71
72             /// Internal guard representation
73             struct guard_data {
74                 typedef retired_ptr_node *      handoff_ptr ;   ///< trapped value type
75                 typedef void *  guarded_ptr  ;   ///< type of value guarded
76
77                 atomics::atomic<guarded_ptr>         pPost   ;   ///< pointer guarded
78
79 #if 0
80                 typedef cds::SpinLock   handoff_spin ;  ///< type of spin-lock for accessing to \p pHandOff field
81                 handoff_spin    spinHandOff     ;   ///< access to \p pHandOff field
82                 handoff_ptr     pHandOff        ;   ///< trapped pointer
83 #endif
84
85                 atomics::atomic<guard_data *>     pGlobalNext ;   ///< next item of global list of allocated guards
86                 atomics::atomic<guard_data *>     pNextFree   ;   ///< pointer to the next item in global or thread-local free-list
87
88                 guard_data *             pThreadNext ;   ///< next item of thread's local list of guards
89
90                 //@cond
91                 guard_data()
92                     : pPost( nullptr )
93 #if 0
94                     , pHandOff( nullptr )
95 #endif
96                     , pGlobalNext( nullptr )
97                     , pNextFree( nullptr )
98                     , pThreadNext( nullptr )
99                 {}
100
101                 void init()
102                 {
103                     pPost.store( nullptr, atomics::memory_order_relaxed );
104                 }
105                 //@endcond
106
107                 /// Checks if the guard is free, that is, it does not contain any pointer guarded
108                 bool isFree() const
109                 {
110                     return pPost.load( atomics::memory_order_acquire ) == nullptr;
111                 }
112             };
113
114             /// Guard allocator
115             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
116             class guard_allocator
117             {
118                 cds::details::Allocator<details::guard_data>  m_GuardAllocator    ;   ///< guard allocator
119
120                 atomics::atomic<guard_data *>    m_GuardList ;       ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
121                 atomics::atomic<guard_data *>    m_FreeGuardList ;   ///< Head of free guard list (linked by guard_data::pNextFree field)
122                 SpinLock                m_freeListLock  ;   ///< Access to m_FreeGuardList
123
124                 /*
125                     Unfortunately, access to the list of free guard is lock-based.
126                     Lock-free manipulations with guard free-list are ABA-prone.
127                     TODO: working with m_FreeGuardList in lock-free manner.
128                 */
129
130             private:
131                 /// Allocates new guard from the heap. The function uses aligned allocator
132                 guard_data * allocNew()
133                 {
134                     //TODO: the allocator should make block allocation
135
136                     details::guard_data * pGuard = m_GuardAllocator.New();
137
138                     // Link guard to the list
139                     // m_GuardList is accumulated list and it cannot support concurrent deletion,
140                     // so, ABA problem is impossible for it
141                     details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
142                     do {
143                         pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
144                         // pHead is changed by compare_exchange_weak
145                     } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
146
147                     pGuard->init();
148                     return pGuard;
149                 }
150
151             public:
152                 // Default ctor
153                 guard_allocator()
154                     : m_GuardList( nullptr )
155                     , m_FreeGuardList( nullptr )
156                 {}
157
158                 // Destructor
159                 ~guard_allocator()
160                 {
161                     guard_data * pNext;
162                     for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
163                         pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
164                         m_GuardAllocator.Delete( pData );
165                     }
166                 }
167
168                 /// Allocates a guard from free list or from heap if free list is empty
169                 guard_data * alloc()
170                 {
171                     // Try to pop a guard from free-list
172                     details::guard_data * pGuard;
173
174                     {
175                         std::unique_lock<SpinLock> al( m_freeListLock );
176                         pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
177                         if ( pGuard )
178                             m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
179                     }
180                     if ( !pGuard )
181                         return allocNew();
182
183                     pGuard->init();
184                     return pGuard;
185                 }
186
187                 /// Frees guard \p pGuard
188                 /**
189                     The function places the guard \p pGuard into free-list
190                 */
191                 void free( guard_data * pGuard )
192                 {
193                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
194
195                     std::unique_lock<SpinLock> al( m_freeListLock );
196                     pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
197                     m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
198                 }
199
200                 /// Allocates list of guard
201                 /**
202                     The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
203
204                     cds::gc::ptb::ThreadGC supporting method
205                 */
206                 guard_data * allocList( size_t nCount )
207                 {
208                     assert( nCount != 0 );
209
210                     guard_data * pHead;
211                     guard_data * pLast;
212
213                     pHead =
214                         pLast = alloc();
215
216                     // The guard list allocated is private for the thread,
217                     // so, we can use relaxed memory order
218                     while ( --nCount ) {
219                         guard_data * p = alloc();
220                         pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
221                         pLast = p;
222                     }
223
224                     pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
225
226                     return pHead;
227                 }
228
229                 /// Frees list of guards
230                 /**
231                     The list \p pList is linked by guard's \p pThreadNext field.
232
233                     cds::gc::ptb::ThreadGC supporting method
234                 */
235                 void freeList( guard_data * pList )
236                 {
237                     assert( pList != nullptr );
238
239                     guard_data * pLast = pList;
240                     while ( pLast->pThreadNext ) {
241                         pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
242                         guard_data * p;
243                         pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
244                         pLast = p;
245                     }
246
247                     std::unique_lock<SpinLock> al( m_freeListLock );
248                     pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
249                     m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
250                 }
251
252                 /// Returns the list's head of guards allocated
253                 guard_data * begin()
254                 {
255                     return m_GuardList.load(atomics::memory_order_acquire);
256                 }
257             };
258
259             /// Retired pointer buffer
260             /**
261                 The buffer of retired nodes ready for liberating.
262                 When size of buffer exceeds a threshold the GC calls \p liberate procedure to free
263                 retired nodes.
264             */
265             class retired_ptr_buffer
266             {
267                 atomics::atomic<retired_ptr_node *>  m_pHead     ;   ///< head of buffer
268                 atomics::atomic<size_t>              m_nItemCount;   ///< buffer's item count
269
270             public:
271                 //@cond
272                 retired_ptr_buffer()
273                     : m_pHead( nullptr )
274                     , m_nItemCount(0)
275                 {}
276
277                 ~retired_ptr_buffer()
278                 {
279                     assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
280                 }
281                 //@endcond
282
283                 /// Pushes new node into the buffer. Returns current buffer size
284                 size_t push( retired_ptr_node& node )
285                 {
286                     retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
287                     do {
288                         node.m_pNext = pHead;
289                         // pHead is changed by compare_exchange_weak
290                     } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
291
292                     return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
293                 }
294
295                 /// Result of \ref ptb_gc_privatve "privatize" function.
296                 /**
297                     The \p privatize function returns retired node list as \p first and the size of that list as \p second.
298                 */
299                 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
300
301                 /// Gets current list of retired pointer and clears the list
302                 /**@anchor ptb_gc_privatve
303                 */
304                 privatize_result privatize()
305                 {
306                     privatize_result res;
307                     res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
308
309                     // Item counter is needed only as a threshold for liberate function
310                     // So, we may clear the item counter without synchronization with m_pHead
311                     res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
312                     return res;
313                 }
314
315                 /// Returns current size of buffer (approximate)
316                 size_t size() const
317                 {
318                     return m_nItemCount.load(atomics::memory_order_relaxed);
319                 }
320             };
321
322             /// Pool of retired pointers
323             /**
324                 The class acts as an allocator of retired node.
325                 Retired pointers are linked in the lock-free list.
326             */
327             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
328             class retired_ptr_pool {
329                 /// Pool item
330                 typedef retired_ptr_node    item;
331
332                 /// Count of items in block
333                 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
334
335                 /// Pool block
336                 struct block {
337                     block *     pNext   ;   ///< next block
338                     item        items[m_nItemPerBlock]  ;   ///< item array
339                 };
340
341                 atomics::atomic<block *> m_pBlockListHead    ;   ///< head of of allocated block list
342
343                 // To solve ABA problem we use epoch-based approach
344                 static const unsigned int c_nEpochCount = 4     ;   ///< Max epoch count
345                 atomics::atomic<unsigned int>    m_nCurEpoch ;   ///< Current epoch
346                 atomics::atomic<item *>  m_pEpochFree[c_nEpochCount]  ;   ///< List of free item per epoch
347                 atomics::atomic<item *>  m_pGlobalFreeHead   ;   ///< Head of unallocated item list
348
349                 cds::details::Allocator< block, Alloc > m_BlockAllocator    ;   ///< block allocator
350
351             private:
352                 //@cond
353                 void allocNewBlock()
354                 {
355                     // allocate new block
356                     block * pNew = m_BlockAllocator.New();
357
358                     // link items within the block
359                     item * pLastItem = pNew->items + m_nItemPerBlock - 1;
360                     for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
361                         pItem->m_pNextFree = pItem + 1;
362                         CDS_STRICT_DO( pItem->m_pNext = nullptr );
363                     }
364
365                     // link new block to block list
366                     {
367                         block * pHead = m_pBlockListHead.load(atomics::memory_order_acquire);
368                         do {
369                             pNew->pNext = pHead;
370                             // pHead is changed by compare_exchange_weak
371                         } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_relaxed ));
372                     }
373
374                     // link block's items to free list
375                     {
376                         item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_acquire);
377                         do {
378                             pLastItem->m_pNextFree = pHead;
379                             // pHead is changed by compare_exchange_weak
380                         } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
381                     }
382                 }
383
384                 unsigned int current_epoch() const
385                 {
386                     return m_nCurEpoch.load(atomics::memory_order_acquire) & (c_nEpochCount - 1);
387                 }
388                 unsigned int next_epoch() const
389                 {
390                     return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & (c_nEpochCount - 1);
391                 }
392                 //@endcond
393
394             public:
395                 //@cond
396                 retired_ptr_pool()
397                     : m_pBlockListHead( nullptr )
398                     , m_nCurEpoch(0)
399                     , m_pGlobalFreeHead( nullptr )
400                 {
401                     for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i )
402                         m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
403
404                     allocNewBlock();
405                 }
406
407                 ~retired_ptr_pool()
408                 {
409                     block * p;
410                     for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
411                         p = pBlock->pNext;
412                         m_BlockAllocator.Delete( pBlock );
413                     }
414                 }
415
416                 /// Increments current epoch
417                 void inc_epoch()
418                 {
419                     m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
420                 }
421
422                 //@endcond
423
424                 /// Allocates new retired pointer
425                 retired_ptr_node&  alloc()
426                 {
427                     unsigned int nEpoch;
428                     item * pItem;
429                     for (;;) {
430                         pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
431                         if ( !pItem )
432                             goto retry;
433                         if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
434                             goto success;
435                     }
436
437                     /*
438                     item * pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
439                     while ( pItem ) {
440                         if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
441                             goto success;
442                     }
443                     */
444
445                     // Epoch free list is empty
446                     // Alloc from global free list
447                 retry:
448                     pItem = m_pGlobalFreeHead.load( atomics::memory_order_acquire );
449                     do {
450                         if ( !pItem ) {
451                             allocNewBlock();
452                             goto retry;
453                         }
454                         // pItem is changed by compare_exchange_weak
455                     } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ));
456
457                 success:
458                     CDS_STRICT_DO( pItem->m_pNextFree = nullptr );
459                     return *pItem;
460                 }
461
462                 /// Allocates and initializes new retired pointer
463                 retired_ptr_node& alloc( const retired_ptr& p )
464                 {
465                     retired_ptr_node& node = alloc();
466                     node.m_ptr = p;
467                     return node;
468                 }
469
470                 /// Places the list (pHead, pTail) of retired pointers to pool (frees retired pointers)
471                 /**
472                     The list is linked on the m_pNextFree field
473                 */
474                 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail )
475                 {
476                     assert( pHead != nullptr );
477                     assert( pTail != nullptr );
478
479                     unsigned int nEpoch;
480                     item * pCurHead;
481                     do {
482                         pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
483                         pTail->m_pNextFree = pCurHead;
484                     } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
485                 }
486             };
487
488             /// Uninitialized guard
489             class guard
490             {
491                 friend class ThreadGC;
492             protected:
493                 details::guard_data * m_pGuard ;    ///< Pointer to guard data
494             public:
495                 /// Initialize empty guard.
496                 guard()
497                     : m_pGuard( nullptr )
498                 {}
499
500                 /// The object is not copy-constructible
501                 guard( guard const& ) = delete;
502
503                 /// Object destructor, does nothing
504                 ~guard()
505                 {}
506
507                 /// Guards pointer \p p
508                 void set( void * p )
509                 {
510                     assert( m_pGuard != nullptr );
511                     m_pGuard->pPost.store( p, atomics::memory_order_release );
512                     //CDS_COMPILER_RW_BARRIER;
513                 }
514
515                 /// Clears the guard
516                 void clear()
517                 {
518                     assert( m_pGuard != nullptr );
519                     m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
520                     CDS_STRICT_DO( CDS_COMPILER_RW_BARRIER );
521                 }
522
523                 /// Guards pointer \p p
524                 template <typename T>
525                 T * operator =( T * p )
526                 {
527                     set( reinterpret_cast<void *>( const_cast<T *>(p) ));
528                     return p;
529                 }
530
531                 //@cond
532                 std::nullptr_t operator=(std::nullptr_t)
533                 {
534                     clear();
535                     return nullptr;
536                 }
537                 //@endcond
538
539             public: // for ThreadGC.
540                 /*
541                     GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard,
542                     the compiler produces error: \91cds::gc::ptb::details::guard_data* cds::gc::ptb::details::guard::m_pGuard\92 is protected
543                     despite the fact that ThreadGC is declared as friend for guard class.
544                     We should not like to declare m_pGuard member as public one.
545                     Therefore, we have to add set_guard/get_guard public functions
546                 */
547                 /// Set guard data
548                 void set_guard( details::guard_data * pGuard )
549                 {
550                     assert( m_pGuard == nullptr );
551                     m_pGuard = pGuard;
552                 }
553
554                 /// Get current guard data
555                 details::guard_data * get_guard()
556                 {
557                     return m_pGuard;
558                 }
559                 /// Get current guard data
560                 details::guard_data * get_guard() const
561                 {
562                     return m_pGuard;
563                 }
564             };
565
566         } // namespace details
567
568         /// Guard
569         /**
570             This class represents auto guard: ctor allocates a guard from guard pool,
571             dtor returns the guard back to the pool of free guard.
572         */
573         class Guard: public details::guard
574         {
575             //@cond
576             typedef details::guard    base_class;
577             friend class ThreadGC;
578             //@endcond
579
580             ThreadGC&    m_gc    ;    ///< ThreadGC object of current thread
581         public:
582             /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
583             Guard(ThreadGC& gc);
584
585             /// Returns guard allocated back to pool of free guards
586             ~Guard();    // inline after GarbageCollector
587
588             /// Returns PTB GC object
589             ThreadGC& getGC()
590             {
591                 return m_gc;
592             }
593
594             /// Guards pointer \p p
595             template <typename T>
596             T * operator =( T * p )
597             {
598                 return base_class::operator =<T>( p );
599             }
600
601             //@cond
602             std::nullptr_t operator=(std::nullptr_t)
603             {
604                 return base_class::operator =(nullptr);
605             }
606             //@endcond
607         };
608
609         /// Array of guards
610         /**
611             This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
612             dtor returns the guards allocated back to the pool.
613         */
614         template <size_t Count>
615         class GuardArray
616         {
617             details::guard      m_arr[Count]    ;    ///< array of guard
618             ThreadGC&           m_gc    ;            ///< ThreadGC object of current thread
619             const static size_t c_nCapacity = Count ;   ///< Array capacity (equal to \p Count template parameter)
620
621         public:
622             /// Rebind array for other size \p OtherCount
623             template <size_t OtherCount>
624             struct rebind {
625                 typedef GuardArray<OtherCount>  other   ;   ///< rebinding result
626             };
627
628         public:
629             /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
630             GuardArray( ThreadGC& gc )    ;    // inline below
631
632             /// The object is not default-constructible
633             GuardArray() = delete;
634
635             /// The object is not copy-constructible
636             GuardArray( GuardArray const& ) = delete;
637
638             /// Returns guards allocated back to pool
639             ~GuardArray()    ;    // inline below
640
641             /// Returns the capacity of array
642             CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
643             {
644                 return c_nCapacity;
645             }
646
647             /// Returns PTB ThreadGC object
648             ThreadGC& getGC() CDS_NOEXCEPT
649             {
650                 return m_gc;
651             }
652
653             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
654             details::guard& operator []( size_t nIndex )
655             {
656                 assert( nIndex < capacity() );
657                 return m_arr[nIndex];
658             }
659
660             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
661             const details::guard& operator []( size_t nIndex ) const
662             {
663                 assert( nIndex < capacity() );
664                 return m_arr[nIndex];
665             }
666
667             /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
668             template <typename T>
669             void set( size_t nIndex, T * p )
670             {
671                 assert( nIndex < capacity() );
672                 m_arr[nIndex].set( p );
673             }
674
675             /// Clears (sets to \p nullptr) the guard \p nIndex
676             void clear( size_t nIndex )
677             {
678                 assert( nIndex < capacity() );
679                 m_arr[nIndex].clear();
680             }
681
682             /// Clears all guards in the array
683             void clearAll()
684             {
685                 for ( size_t i = 0; i < capacity(); ++i )
686                     clear(i);
687             }
688         };
689
690         /// Memory manager (Garbage collector)
691         class CDS_EXPORT_API GarbageCollector
692         {
693         private:
694             //@cond
695             friend class ThreadGC;
696
697             /// Internal GC statistics
698             struct internal_stat
699             {
700                 atomics::atomic<size_t>  m_nGuardCount       ;   ///< Total guard count
701                 atomics::atomic<size_t>  m_nFreeGuardCount   ;   ///< Count of free guard
702
703                 internal_stat()
704                     : m_nGuardCount(0)
705                     , m_nFreeGuardCount(0)
706                 {}
707             };
708             //@endcond
709
710         public:
711             /// Exception "No GarbageCollector object is created"
712             CDS_DECLARE_EXCEPTION( PTBManagerEmpty, "Global PTB GarbageCollector is NULL" );
713
714             /// Internal GC statistics
715             struct InternalState
716             {
717                 size_t m_nGuardCount       ;   ///< Total guard count
718                 size_t m_nFreeGuardCount   ;   ///< Count of free guard
719
720                 //@cond
721                 InternalState()
722                     : m_nGuardCount(0)
723                     , m_nFreeGuardCount(0)
724                 {}
725
726                 InternalState& operator =( internal_stat const& s )
727                 {
728                     m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
729                     m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
730
731                     return *this;
732                 }
733                 //@endcond
734             };
735
736         private:
737             static GarbageCollector * m_pManager    ;   ///< GC global instance
738
739             details::guard_allocator<>      m_GuardPool         ;   ///< Guard pool
740             details::retired_ptr_pool<>     m_RetiredAllocator  ;   ///< Pool of free retired pointers
741             details::retired_ptr_buffer     m_RetiredBuffer     ;   ///< Retired pointer buffer for liberating
742             //atomics::atomic<size_t>      m_nInLiberate       ;   ///< number of parallel \p liberate fnction call
743
744             atomics::atomic<size_t>      m_nLiberateThreshold;   ///< Max size of retired pointer buffer to call liberate
745             const size_t    m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
746
747             internal_stat   m_stat  ;   ///< Internal statistics
748             bool            m_bStatEnabled  ;   ///< Internal Statistics enabled
749
750         public:
751             /// Initializes PTB memory manager singleton
752             /**
753                 This member function creates and initializes PTB global object.
754                 The function should be called before using CDS data structure based on cds::gc::PTB GC. Usually,
755                 this member function is called in the \p main() function. See cds::gc::ptb for example.
756                 After calling of this function you may use CDS data structures based on cds::gc::PTB.
757
758                 \par Parameters
759                 \li \p nLiberateThreshold - the liberate threshold. When count of retired pointers reaches this value,
760                     the \ref ptb_gc_liberate "liberate" member function would be called for freeing retired pointers.
761                     If \p nLiberateThreshold <= 1, \p liberate would called after each \ref ptb_gc_retirePtr "retirePtr" call.
762                 \li \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
763                     is initialized the GC allocates local guard pool for the thread from common guard pool.
764                     By perforce the local thread's guard pool is grown automatically from common pool.
765                     When the thread terminated its guard pool is backed to common GC's pool.
766
767             */
768             static void CDS_STDCALL Construct(
769                 size_t nLiberateThreshold = 1024
770                 , size_t nInitialThreadGuardCount = 8
771             );
772
773             /// Destroys PTB memory manager
774             /**
775                 The member function destroys PTB global object. After calling of this function you may \b NOT
776                 use CDS data structures based on cds::gc::PTB. Usually, the \p Destruct function is called
777                 at the end of your \p main(). See cds::gc::ptb for example.
778             */
779             static void CDS_STDCALL Destruct();
780
781             /// Returns pointer to GarbageCollector instance
782             /**
783                 If PTB GC is not initialized, \p PTBManagerEmpty exception is thrown
784             */
785             static GarbageCollector&   instance()
786             {
787                 if ( m_pManager == nullptr )
788                     throw PTBManagerEmpty();
789                 return *m_pManager;
790             }
791
792             /// Checks if global GC object is constructed and may be used
793             static bool isUsed() CDS_NOEXCEPT
794             {
795                 return m_pManager != nullptr;
796             }
797
798         public:
799             //@{
800             /// Internal interface
801
802             /// Allocates a guard
803             details::guard_data * allocGuard()
804             {
805                 return m_GuardPool.alloc();
806             }
807
808             /// Frees guard \p g for reusing in future
809             void freeGuard(details::guard_data * pGuard )
810             {
811                 m_GuardPool.free( pGuard );
812             }
813
814             /// Allocates guard list for a thread.
815             details::guard_data * allocGuardList( size_t nCount )
816             {
817                 return m_GuardPool.allocList( nCount );
818             }
819
820             /// Frees thread's guard list pointed by \p pList
821             void freeGuardList( details::guard_data * pList )
822             {
823                 m_GuardPool.freeList( pList );
824             }
825
826             /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
827             /**@anchor ptb_gc_retirePtr
828             */
829             template <typename T>
830             void retirePtr( T * p, void (* pFunc)(T *) )
831             {
832                 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
833             }
834
835             /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
836             void retirePtr( retired_ptr const& p )
837             {
838                 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
839                     liberate();
840             }
841
842         protected:
843             /// Liberate function
844             /** @anchor ptb_gc_liberate
845                 The main function of Pass The Buck algorithm. It tries to free retired pointers if they are not
846                 trapped by any guard.
847             */
848             void liberate();
849
850             //@}
851
852         private:
853             //@cond
854 #if 0
855             void liberate( details::liberate_set& set );
856 #endif
857             //@endcond
858
859         public:
860             /// Get internal statistics
861             InternalState& getInternalState(InternalState& stat) const
862             {
863                 return stat = m_stat;
864             }
865
866             /// Checks if internal statistics enabled
867             bool              isStatisticsEnabled() const
868             {
869                 return m_bStatEnabled;
870             }
871
872             /// Enables/disables internal statistics
873             bool  enableStatistics( bool bEnable )
874             {
875                 bool bEnabled = m_bStatEnabled;
876                 m_bStatEnabled = bEnable;
877                 return bEnabled;
878             }
879
880         private:
881             //@cond none
882             GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount );
883             ~GarbageCollector();
884             //@endcond
885         };
886
887         /// Thread GC
888         /**
889             To use Pass The Buck reclamation schema each thread object must be linked with the object of ThreadGC class
890             that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
891             on the start of each thread that uses PTB GC. Before terminating the thread linked to PTB GC it is necessary to call
892             \ref cds_threading "cds::threading::Manager::detachThread()".
893
894             The ThreadGC object maintains two list:
895             \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
896             \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
897             Free guard list is a subset of thread guard list.
898         */
899         class ThreadGC
900         {
901             GarbageCollector&   m_gc    ;   ///< reference to GC singleton
902             details::guard_data *    m_pList ;   ///< Local list of guards owned by the thread
903             details::guard_data *    m_pFree ;   ///< The list of free guard from m_pList
904
905         public:
906             /// Default constructor
907             ThreadGC()
908                 : m_gc( GarbageCollector::instance() )
909                 , m_pList( nullptr )
910                 , m_pFree( nullptr )
911             {}
912
913             /// The object is not copy-constructible
914             ThreadGC( ThreadGC const& ) = delete;
915
916             /// Dtor calls fini()
917             ~ThreadGC()
918             {
919                 fini();
920             }
921
922             /// Initialization. Repeat call is available
923             void init()
924             {
925                 if ( !m_pList ) {
926                     m_pList =
927                         m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
928                 }
929             }
930
931             /// Finalization. Repeat call is available
932             void fini()
933             {
934                 if ( m_pList ) {
935                     m_gc.freeGuardList( m_pList );
936                     m_pList =
937                         m_pFree = nullptr;
938                 }
939             }
940
941         public:
942             /// Initializes guard \p g
943             void allocGuard( Guard& g )
944             {
945                 assert( m_pList != nullptr );
946                 if ( m_pFree ) {
947                     g.m_pGuard = m_pFree;
948                     m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
949                 }
950                 else {
951                     g.m_pGuard = m_gc.allocGuard();
952                     g.m_pGuard->pThreadNext = m_pList;
953                     m_pList = g.m_pGuard;
954                 }
955             }
956
957             /// Frees guard \p g
958             void freeGuard( Guard& g )
959             {
960                 assert( m_pList != nullptr );
961                 g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
962                 g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
963                 m_pFree = g.m_pGuard;
964             }
965
966             /// Initializes guard array \p arr
967             template <size_t Count>
968             void allocGuard( GuardArray<Count>& arr )
969             {
970                 assert( m_pList != nullptr );
971                 size_t nCount = 0;
972
973                 while ( m_pFree && nCount < Count ) {
974                     arr[nCount].set_guard( m_pFree );
975                     m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
976                     ++nCount;
977                 }
978
979                 while ( nCount < Count ) {
980                     details::guard& g = arr[nCount++];
981                     g.set_guard( m_gc.allocGuard() );
982                     g.get_guard()->pThreadNext = m_pList;
983                     m_pList = g.get_guard();
984                 }
985             }
986
987             /// Frees guard array \p arr
988             template <size_t Count>
989             void freeGuard( GuardArray<Count>& arr )
990             {
991                 assert( m_pList != nullptr );
992
993                 details::guard_data * pGuard;
994                 for ( size_t i = 0; i < Count - 1; ++i ) {
995                     pGuard = arr[i].get_guard();
996                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
997                     pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
998                 }
999                 pGuard = arr[Count-1].get_guard();
1000                 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
1001                 pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
1002                 m_pFree = arr[0].get_guard();
1003             }
1004
1005             /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
1006             template <typename T>
1007             void retirePtr( T * p, void (* pFunc)(T *) )
1008             {
1009                 m_gc.retirePtr( p, pFunc );
1010             }
1011
1012             //@cond
1013             void scan()
1014             {
1015                 m_gc.liberate();
1016             }
1017             //@endcond
1018
1019         };
1020
1021         //////////////////////////////////////////////////////////
1022         // Inlines
1023
1024         inline Guard::Guard(ThreadGC& gc)
1025             : m_gc( gc )
1026         {
1027             getGC().allocGuard( *this );
1028         }
1029         inline Guard::~Guard()
1030         {
1031             getGC().freeGuard( *this );
1032         }
1033
1034         template <size_t Count>
1035         inline GuardArray<Count>::GuardArray( ThreadGC& gc )
1036             : m_gc( gc )
1037         {
1038             getGC().allocGuard( *this );
1039         }
1040         template <size_t Count>
1041         inline GuardArray<Count>::~GuardArray()
1042         {
1043             getGC().freeGuard( *this );
1044         }
1045
1046     }   // namespace ptb
1047 }}  // namespace cds::gc
1048
1049 #if CDS_COMPILER == CDS_COMPILER_MSVC
1050 #   pragma warning(pop)
1051 #endif
1052
1053
1054 #endif // #ifndef __CDS_GC_PTB_PASS_THE_BUCK_H