b4d716b22bb8ed26364b654bb1e264f7893a7285
[libcds.git] / cds / gc / ptb / ptb.h
1 //$$CDS-header$$
2
3 #ifndef __CDS_GC_PTB_PASS_THE_BUCK_H
4 #define __CDS_GC_PTB_PASS_THE_BUCK_H
5
6 #include <cds/cxx11_atomic.h>
7 #include <cds/gc/details/retired_ptr.h>
8 #include <cds/details/aligned_allocator.h>
9 #include <cds/details/allocator.h>
10 #include <cds/details/noncopyable.h>
11
12 #include <cds/lock/spinlock.h>
13
14 #if CDS_COMPILER == CDS_COMPILER_MSVC
15 #   pragma warning(push)
16 #   pragma warning(disable:4251)    // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
17 #endif
18
19 namespace cds { namespace gc {
20
21     /// Pass The Buck reclamation schema
22     /**
23         \par Sources:
24         - [2002] M. Herlihy, V. Luchangco, and M. Moir. The repeat offender problem: A mechanism for supporting
25             dynamic-sized lockfree data structures. Technical Report TR-2002-112, Sun Microsystems Laboratories, 2002
26         - [2002] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Dynamic-sized Lockfree Data Structures.
27             Technical Report TR-2002-110, Sun Microsystems Laboratories, 2002
28         - [2005] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Nonblocking Memory Management Support
29             for Dynamic-Sized Data Structures. ACM Transactions on Computer Systems, Vol.23, No.2, May 2005
30
31
32         The cds::gc::ptb namespace and its members are internal representation of the Pass-the-Buck GC and should not be used directly.
33         Use cds::gc::PTB class in your code.
34
35         Pass-the-Buck (PTB) garbage collector is a singleton. The main user-level part of PTB schema is
36         GC class and its nested classes. Before use any PTB-related class you must initialize PTB garbage collector
37         by contructing cds::gc::PTB object in beginning of your main().
38         See cds::gc::PTB class for explanation.
39
40         \par Implementation issues
41             The global list of free guards (cds::gc::ptb::details::guard_allocator) is protected by spin-lock (i.e. serialized).
42             It seems that solution should not introduce significant performance bottleneck, because each thread has own set
43             of guards allocated from global list of free guards and access to global list is occurred only when
44             all thread's guard is busy. In this case the thread allocates next block of guards from global list.
45             Guards allocated for the thread is push back to the global list only when the thread terminates.
46     */
47     namespace ptb {
48
49         // Forward declarations
50         class Guard;
51         template <size_t Count> class GuardArray;
52         class ThreadGC;
53         class GarbageCollector;
54
55         /// Retired pointer type
56         typedef cds::gc::details::retired_ptr retired_ptr;
57
58         using cds::gc::details::free_retired_ptr_func;
59
60         /// Details of Pass the Buck algorithm
61         namespace details {
62
63             // Forward declaration
64             class liberate_set;
65
66             /// Retired pointer buffer node
67             struct retired_ptr_node {
68                 retired_ptr         m_ptr   ;   ///< retired pointer
69                 retired_ptr_node *  m_pNext     ;   ///< next retired pointer in buffer
70                 retired_ptr_node *  m_pNextFree ;   ///< next item in free list of retired_ptr_node
71             };
72
73             /// Internal guard representation
74             struct guard_data {
75                 typedef retired_ptr_node *      handoff_ptr ;   ///< trapped value type
76                 typedef void *  guarded_ptr  ;   ///< type of value guarded
77
78                 atomics::atomic<guarded_ptr>         pPost   ;   ///< pointer guarded
79
80 #if 0
81                 typedef cds::SpinLock   handoff_spin ;  ///< type of spin-lock for accessing to \p pHandOff field
82                 handoff_spin    spinHandOff     ;   ///< access to \p pHandOff field
83                 handoff_ptr     pHandOff        ;   ///< trapped pointer
84 #endif
85
86                 atomics::atomic<guard_data *>     pGlobalNext ;   ///< next item of global list of allocated guards
87                 atomics::atomic<guard_data *>     pNextFree   ;   ///< pointer to the next item in global or thread-local free-list
88
89                 guard_data *             pThreadNext ;   ///< next item of thread's local list of guards
90
91                 //@cond
92                 guard_data()
93                     : pPost( nullptr )
94 #if 0
95                     , pHandOff( nullptr )
96 #endif
97                     , pGlobalNext( nullptr )
98                     , pNextFree( nullptr )
99                     , pThreadNext( nullptr )
100                 {}
101
102                 void init()
103                 {
104                     pPost.store( nullptr, atomics::memory_order_relaxed );
105                 }
106                 //@endcond
107
108                 /// Checks if the guard is free, that is, it does not contain any pointer guarded
109                 bool isFree() const
110                 {
111                     return pPost.load( atomics::memory_order_acquire ) == nullptr;
112                 }
113             };
114
115             /// Guard allocator
116             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
117             class guard_allocator
118             {
119                 cds::details::Allocator<details::guard_data>  m_GuardAllocator    ;   ///< guard allocator
120
121                 atomics::atomic<guard_data *>    m_GuardList ;       ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
122                 atomics::atomic<guard_data *>    m_FreeGuardList ;   ///< Head of free guard list (linked by guard_data::pNextFree field)
123                 SpinLock                m_freeListLock  ;   ///< Access to m_FreeGuardList
124
125                 /*
126                     Unfortunately, access to the list of free guard is lock-based.
127                     Lock-free manipulations with guard free-list are ABA-prone.
128                     TODO: working with m_FreeGuardList in lock-free manner.
129                 */
130
131             private:
132                 /// Allocates new guard from the heap. The function uses aligned allocator
133                 guard_data * allocNew()
134                 {
135                     //TODO: the allocator should make block allocation
136
137                     details::guard_data * pGuard = m_GuardAllocator.New();
138
139                     // Link guard to the list
140                     // m_GuardList is accumulated list and it cannot support concurrent deletion,
141                     // so, ABA problem is impossible for it
142                     details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
143                     do {
144                         pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
145                         // pHead is changed by compare_exchange_weak
146                     } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
147
148                     pGuard->init();
149                     return pGuard;
150                 }
151
152             public:
153                 // Default ctor
154                 guard_allocator()
155                     : m_GuardList( nullptr )
156                     , m_FreeGuardList( nullptr )
157                 {}
158
159                 // Destructor
160                 ~guard_allocator()
161                 {
162                     guard_data * pNext;
163                     for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
164                         pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
165                         m_GuardAllocator.Delete( pData );
166                     }
167                 }
168
169                 /// Allocates a guard from free list or from heap if free list is empty
170                 guard_data * alloc()
171                 {
172                     // Try to pop a guard from free-list
173                     details::guard_data * pGuard;
174
175                     {
176                         cds::lock::scoped_lock<SpinLock> al( m_freeListLock );
177                         pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
178                         if ( pGuard )
179                             m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
180                     }
181                     if ( !pGuard )
182                         return allocNew();
183
184                     pGuard->init();
185                     return pGuard;
186                 }
187
188                 /// Frees guard \p pGuard
189                 /**
190                     The function places the guard \p pGuard into free-list
191                 */
192                 void free( guard_data * pGuard )
193                 {
194                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
195
196                     cds::lock::scoped_lock<SpinLock> al( m_freeListLock );
197                     pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
198                     m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
199                 }
200
201                 /// Allocates list of guard
202                 /**
203                     The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
204
205                     cds::gc::ptb::ThreadGC supporting method
206                 */
207                 guard_data * allocList( size_t nCount )
208                 {
209                     assert( nCount != 0 );
210
211                     guard_data * pHead;
212                     guard_data * pLast;
213
214                     pHead =
215                         pLast = alloc();
216
217                     // The guard list allocated is private for the thread,
218                     // so, we can use relaxed memory order
219                     while ( --nCount ) {
220                         guard_data * p = alloc();
221                         pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
222                         pLast = p;
223                     }
224
225                     pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
226
227                     return pHead;
228                 }
229
230                 /// Frees list of guards
231                 /**
232                     The list \p pList is linked by guard's \p pThreadNext field.
233
234                     cds::gc::ptb::ThreadGC supporting method
235                 */
236                 void freeList( guard_data * pList )
237                 {
238                     assert( pList != nullptr );
239
240                     guard_data * pLast = pList;
241                     while ( pLast->pThreadNext ) {
242                         pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
243                         guard_data * p;
244                         pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
245                         pLast = p;
246                     }
247
248                     cds::lock::scoped_lock<SpinLock> al( m_freeListLock );
249                     pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
250                     m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
251                 }
252
253                 /// Returns the list's head of guards allocated
254                 guard_data * begin()
255                 {
256                     return m_GuardList.load(atomics::memory_order_acquire);
257                 }
258             };
259
260             /// Retired pointer buffer
261             /**
262                 The buffer of retired nodes ready for liberating.
263                 When size of buffer exceeds a threshold the GC calls \p liberate procedure to free
264                 retired nodes.
265             */
266             class retired_ptr_buffer
267             {
268                 atomics::atomic<retired_ptr_node *>  m_pHead     ;   ///< head of buffer
269                 atomics::atomic<size_t>              m_nItemCount;   ///< buffer's item count
270
271             public:
272                 //@cond
273                 retired_ptr_buffer()
274                     : m_pHead( nullptr )
275                     , m_nItemCount(0)
276                 {}
277
278                 ~retired_ptr_buffer()
279                 {
280                     assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
281                 }
282                 //@endcond
283
284                 /// Pushes new node into the buffer. Returns current buffer size
285                 size_t push( retired_ptr_node& node )
286                 {
287                     retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
288                     do {
289                         node.m_pNext = pHead;
290                         // pHead is changed by compare_exchange_weak
291                     } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
292
293                     return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
294                 }
295
296                 /// Result of \ref ptb_gc_privatve "privatize" function.
297                 /**
298                     The \p privatize function returns retired node list as \p first and the size of that list as \p second.
299                 */
300                 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
301
302                 /// Gets current list of retired pointer and clears the list
303                 /**@anchor ptb_gc_privatve
304                 */
305                 privatize_result privatize()
306                 {
307                     privatize_result res;
308                     res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
309
310                     // Item counter is needed only as a threshold for liberate function
311                     // So, we may clear the item counter without synchronization with m_pHead
312                     res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
313                     return res;
314                 }
315
316                 /// Returns current size of buffer (approximate)
317                 size_t size() const
318                 {
319                     return m_nItemCount.load(atomics::memory_order_relaxed);
320                 }
321             };
322
323             /// Pool of retired pointers
324             /**
325                 The class acts as an allocator of retired node.
326                 Retired pointers are linked in the lock-free list.
327             */
328             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
329             class retired_ptr_pool {
330                 /// Pool item
331                 typedef retired_ptr_node    item;
332
333                 /// Count of items in block
334                 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
335
336                 /// Pool block
337                 struct block {
338                     block *     pNext   ;   ///< next block
339                     item        items[m_nItemPerBlock]  ;   ///< item array
340                 };
341
342                 atomics::atomic<block *> m_pBlockListHead    ;   ///< head of of allocated block list
343
344                 // To solve ABA problem we use epoch-based approach
345                 static const unsigned int c_nEpochCount = 4     ;   ///< Max epoch count
346                 atomics::atomic<unsigned int>    m_nCurEpoch ;   ///< Current epoch
347                 atomics::atomic<item *>  m_pEpochFree[c_nEpochCount]  ;   ///< List of free item per epoch
348                 atomics::atomic<item *>  m_pGlobalFreeHead   ;   ///< Head of unallocated item list
349
350                 cds::details::Allocator< block, Alloc > m_BlockAllocator    ;   ///< block allocator
351
352             private:
353                 //@cond
354                 void allocNewBlock()
355                 {
356                     // allocate new block
357                     block * pNew = m_BlockAllocator.New();
358
359                     // link items within the block
360                     item * pLastItem = pNew->items + m_nItemPerBlock - 1;
361                     for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
362                         pItem->m_pNextFree = pItem + 1;
363                         CDS_STRICT_DO( pItem->m_pNext = nullptr );
364                     }
365
366                     // link new block to block list
367                     {
368                         block * pHead = m_pBlockListHead.load(atomics::memory_order_acquire);
369                         do {
370                             pNew->pNext = pHead;
371                             // pHead is changed by compare_exchange_weak
372                         } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_relaxed ));
373                     }
374
375                     // link block's items to free list
376                     {
377                         item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_acquire);
378                         do {
379                             pLastItem->m_pNextFree = pHead;
380                             // pHead is changed by compare_exchange_weak
381                         } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
382                     }
383                 }
384
385                 unsigned int current_epoch() const
386                 {
387                     return m_nCurEpoch.load(atomics::memory_order_acquire) & (c_nEpochCount - 1);
388                 }
389                 unsigned int next_epoch() const
390                 {
391                     return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & (c_nEpochCount - 1);
392                 }
393                 //@endcond
394
395             public:
396                 //@cond
397                 retired_ptr_pool()
398                     : m_pBlockListHead( nullptr )
399                     , m_nCurEpoch(0)
400                     , m_pGlobalFreeHead( nullptr )
401                 {
402                     for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i )
403                         m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
404
405                     allocNewBlock();
406                 }
407
408                 ~retired_ptr_pool()
409                 {
410                     block * p;
411                     for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
412                         p = pBlock->pNext;
413                         m_BlockAllocator.Delete( pBlock );
414                     }
415                 }
416
417                 /// Increments current epoch
418                 void inc_epoch()
419                 {
420                     m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
421                 }
422
423                 //@endcond
424
425                 /// Allocates new retired pointer
426                 retired_ptr_node&  alloc()
427                 {
428                     unsigned int nEpoch;
429                     item * pItem;
430                     for (;;) {
431                         pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
432                         if ( !pItem )
433                             goto retry;
434                         if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
435                             goto success;
436                     }
437
438                     /*
439                     item * pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
440                     while ( pItem ) {
441                         if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
442                             goto success;
443                     }
444                     */
445
446                     // Epoch free list is empty
447                     // Alloc from global free list
448                 retry:
449                     pItem = m_pGlobalFreeHead.load( atomics::memory_order_acquire );
450                     do {
451                         if ( !pItem ) {
452                             allocNewBlock();
453                             goto retry;
454                         }
455                         // pItem is changed by compare_exchange_weak
456                     } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ));
457
458                 success:
459                     CDS_STRICT_DO( pItem->m_pNextFree = nullptr );
460                     return *pItem;
461                 }
462
463                 /// Allocates and initializes new retired pointer
464                 retired_ptr_node& alloc( const retired_ptr& p )
465                 {
466                     retired_ptr_node& node = alloc();
467                     node.m_ptr = p;
468                     return node;
469                 }
470
471                 /// Places the list (pHead, pTail) of retired pointers to pool (frees retired pointers)
472                 /**
473                     The list is linked on the m_pNextFree field
474                 */
475                 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail )
476                 {
477                     assert( pHead != nullptr );
478                     assert( pTail != nullptr );
479
480                     unsigned int nEpoch;
481                     item * pCurHead;
482                     do {
483                         pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
484                         pTail->m_pNextFree = pCurHead;
485                     } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
486                 }
487             };
488
489             /// Uninitialized guard
490             class guard: public cds::details::noncopyable
491             {
492                 friend class ThreadGC;
493             protected:
494                 details::guard_data * m_pGuard ;    ///< Pointer to guard data
495             public:
496                 /// Initialize empty guard.
497                 guard()
498                     : m_pGuard( nullptr )
499                 {}
500
501                 /// Object destructor, does nothing
502                 ~guard()
503                 {}
504
505                 /// Guards pointer \p p
506                 void set( void * p )
507                 {
508                     assert( m_pGuard != nullptr );
509                     m_pGuard->pPost.store( p, atomics::memory_order_release );
510                     //CDS_COMPILER_RW_BARRIER;
511                 }
512
513                 /// Clears the guard
514                 void clear()
515                 {
516                     assert( m_pGuard != nullptr );
517                     m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
518                     CDS_STRICT_DO( CDS_COMPILER_RW_BARRIER );
519                 }
520
521                 /// Guards pointer \p p
522                 template <typename T>
523                 T * operator =( T * p )
524                 {
525                     set( reinterpret_cast<void *>( const_cast<T *>(p) ));
526                     return p;
527                 }
528
529                 //@cond
530                 std::nullptr_t operator=(std::nullptr_t)
531                 {
532                     clear();
533                     return nullptr;
534                 }
535                 //@endcond
536
537             public: // for ThreadGC.
538                 /*
539                     GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard,
540                     the compiler produces error: \91cds::gc::ptb::details::guard_data* cds::gc::ptb::details::guard::m_pGuard\92 is protected
541                     despite the fact that ThreadGC is declared as friend for guard class.
542                     We should not like to declare m_pGuard member as public one.
543                     Therefore, we have to add set_guard/get_guard public functions
544                 */
545                 /// Set guard data
546                 void set_guard( details::guard_data * pGuard )
547                 {
548                     assert( m_pGuard == nullptr );
549                     m_pGuard = pGuard;
550                 }
551
552                 /// Get current guard data
553                 details::guard_data * get_guard()
554                 {
555                     return m_pGuard;
556                 }
557                 /// Get current guard data
558                 details::guard_data * get_guard() const
559                 {
560                     return m_pGuard;
561                 }
562             };
563
564         } // namespace details
565
566         /// Guard
567         /**
568             This class represents auto guard: ctor allocates a guard from guard pool,
569             dtor returns the guard back to the pool of free guard.
570         */
571         class Guard: public details::guard
572         {
573             //@cond
574             typedef details::guard    base_class;
575             friend class ThreadGC;
576             //@endcond
577
578             ThreadGC&    m_gc    ;    ///< ThreadGC object of current thread
579         public:
580             /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
581             Guard(ThreadGC& gc);
582
583             /// Returns guard allocated back to pool of free guards
584             ~Guard();    // inline after GarbageCollector
585
586             /// Returns PTB GC object
587             ThreadGC& getGC()
588             {
589                 return m_gc;
590             }
591
592             /// Guards pointer \p p
593             template <typename T>
594             T * operator =( T * p )
595             {
596                 return base_class::operator =<T>( p );
597             }
598
599             //@cond
600             std::nullptr_t operator=(std::nullptr_t)
601             {
602                 return base_class::operator =(nullptr);
603             }
604             //@endcond
605         };
606
607         /// Array of guards
608         /**
609             This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
610             dtor returns the guards allocated back to the pool.
611         */
612         template <size_t Count>
613         class GuardArray: public cds::details::noncopyable
614         {
615             details::guard      m_arr[Count]    ;    ///< array of guard
616             ThreadGC&           m_gc    ;            ///< ThreadGC object of current thread
617             const static size_t c_nCapacity = Count ;   ///< Array capacity (equal to \p Count template parameter)
618
619         public:
620             /// Rebind array for other size \p OtherCount
621             template <size_t OtherCount>
622             struct rebind {
623                 typedef GuardArray<OtherCount>  other   ;   ///< rebinding result
624             };
625
626         public:
627             /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
628             GuardArray( ThreadGC& gc )    ;    // inline below
629
630             /// Returns guards allocated back to pool
631             ~GuardArray()    ;    // inline below
632
633             /// Returns the capacity of array
634             CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
635             {
636                 return c_nCapacity;
637             }
638
639             /// Returns PTB ThreadGC object
640             ThreadGC& getGC() CDS_NOEXCEPT
641             {
642                 return m_gc;
643             }
644
645             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
646             details::guard& operator []( size_t nIndex )
647             {
648                 assert( nIndex < capacity() );
649                 return m_arr[nIndex];
650             }
651
652             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
653             const details::guard& operator []( size_t nIndex ) const
654             {
655                 assert( nIndex < capacity() );
656                 return m_arr[nIndex];
657             }
658
659             /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
660             template <typename T>
661             void set( size_t nIndex, T * p )
662             {
663                 assert( nIndex < capacity() );
664                 m_arr[nIndex].set( p );
665             }
666
667             /// Clears (sets to \p nullptr) the guard \p nIndex
668             void clear( size_t nIndex )
669             {
670                 assert( nIndex < capacity() );
671                 m_arr[nIndex].clear();
672             }
673
674             /// Clears all guards in the array
675             void clearAll()
676             {
677                 for ( size_t i = 0; i < capacity(); ++i )
678                     clear(i);
679             }
680         };
681
682         /// Memory manager (Garbage collector)
683         class CDS_EXPORT_API GarbageCollector
684         {
685         private:
686             //@cond
687             friend class ThreadGC;
688
689             /// Internal GC statistics
690             struct internal_stat
691             {
692                 atomics::atomic<size_t>  m_nGuardCount       ;   ///< Total guard count
693                 atomics::atomic<size_t>  m_nFreeGuardCount   ;   ///< Count of free guard
694
695                 internal_stat()
696                     : m_nGuardCount(0)
697                     , m_nFreeGuardCount(0)
698                 {}
699             };
700             //@endcond
701
702         public:
703             /// Exception "No GarbageCollector object is created"
704             CDS_DECLARE_EXCEPTION( PTBManagerEmpty, "Global PTB GarbageCollector is NULL" );
705
706             /// Internal GC statistics
707             struct InternalState
708             {
709                 size_t m_nGuardCount       ;   ///< Total guard count
710                 size_t m_nFreeGuardCount   ;   ///< Count of free guard
711
712                 //@cond
713                 InternalState()
714                     : m_nGuardCount(0)
715                     , m_nFreeGuardCount(0)
716                 {}
717
718                 InternalState& operator =( internal_stat const& s )
719                 {
720                     m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
721                     m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
722
723                     return *this;
724                 }
725                 //@endcond
726             };
727
728         private:
729             static GarbageCollector * m_pManager    ;   ///< GC global instance
730
731             details::guard_allocator<>      m_GuardPool         ;   ///< Guard pool
732             details::retired_ptr_pool<>     m_RetiredAllocator  ;   ///< Pool of free retired pointers
733             details::retired_ptr_buffer     m_RetiredBuffer     ;   ///< Retired pointer buffer for liberating
734             //atomics::atomic<size_t>      m_nInLiberate       ;   ///< number of parallel \p liberate fnction call
735
736             atomics::atomic<size_t>      m_nLiberateThreshold;   ///< Max size of retired pointer buffer to call liberate
737             const size_t    m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
738
739             internal_stat   m_stat  ;   ///< Internal statistics
740             bool            m_bStatEnabled  ;   ///< Internal Statistics enabled
741
742         public:
743             /// Initializes PTB memory manager singleton
744             /**
745                 This member function creates and initializes PTB global object.
746                 The function should be called before using CDS data structure based on cds::gc::PTB GC. Usually,
747                 this member function is called in the \p main() function. See cds::gc::ptb for example.
748                 After calling of this function you may use CDS data structures based on cds::gc::PTB.
749
750                 \par Parameters
751                 \li \p nLiberateThreshold - the liberate threshold. When count of retired pointers reaches this value,
752                     the \ref ptb_gc_liberate "liberate" member function would be called for freeing retired pointers.
753                     If \p nLiberateThreshold <= 1, \p liberate would called after each \ref ptb_gc_retirePtr "retirePtr" call.
754                 \li \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
755                     is initialized the GC allocates local guard pool for the thread from common guard pool.
756                     By perforce the local thread's guard pool is grown automatically from common pool.
757                     When the thread terminated its guard pool is backed to common GC's pool.
758
759             */
760             static void CDS_STDCALL Construct(
761                 size_t nLiberateThreshold = 1024
762                 , size_t nInitialThreadGuardCount = 8
763             );
764
765             /// Destroys PTB memory manager
766             /**
767                 The member function destroys PTB global object. After calling of this function you may \b NOT
768                 use CDS data structures based on cds::gc::PTB. Usually, the \p Destruct function is called
769                 at the end of your \p main(). See cds::gc::ptb for example.
770             */
771             static void CDS_STDCALL Destruct();
772
773             /// Returns pointer to GarbageCollector instance
774             /**
775                 If PTB GC is not initialized, \p PTBManagerEmpty exception is thrown
776             */
777             static GarbageCollector&   instance()
778             {
779                 if ( m_pManager == nullptr )
780                     throw PTBManagerEmpty();
781                 return *m_pManager;
782             }
783
784             /// Checks if global GC object is constructed and may be used
785             static bool isUsed() CDS_NOEXCEPT
786             {
787                 return m_pManager != nullptr;
788             }
789
790         public:
791             //@{
792             /// Internal interface
793
794             /// Allocates a guard
795             details::guard_data * allocGuard()
796             {
797                 return m_GuardPool.alloc();
798             }
799
800             /// Frees guard \p g for reusing in future
801             void freeGuard(details::guard_data * pGuard )
802             {
803                 m_GuardPool.free( pGuard );
804             }
805
806             /// Allocates guard list for a thread.
807             details::guard_data * allocGuardList( size_t nCount )
808             {
809                 return m_GuardPool.allocList( nCount );
810             }
811
812             /// Frees thread's guard list pointed by \p pList
813             void freeGuardList( details::guard_data * pList )
814             {
815                 m_GuardPool.freeList( pList );
816             }
817
818             /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
819             /**@anchor ptb_gc_retirePtr
820             */
821             template <typename T>
822             void retirePtr( T * p, void (* pFunc)(T *) )
823             {
824                 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
825             }
826
827             /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
828             void retirePtr( retired_ptr const& p )
829             {
830                 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
831                     liberate();
832             }
833
834         protected:
835             /// Liberate function
836             /** @anchor ptb_gc_liberate
837                 The main function of Pass The Buck algorithm. It tries to free retired pointers if they are not
838                 trapped by any guard.
839             */
840             void liberate();
841
842             //@}
843
844         private:
845             //@cond
846 #if 0
847             void liberate( details::liberate_set& set );
848 #endif
849             //@endcond
850
851         public:
852             /// Get internal statistics
853             InternalState& getInternalState(InternalState& stat) const
854             {
855                 return stat = m_stat;
856             }
857
858             /// Checks if internal statistics enabled
859             bool              isStatisticsEnabled() const
860             {
861                 return m_bStatEnabled;
862             }
863
864             /// Enables/disables internal statistics
865             bool  enableStatistics( bool bEnable )
866             {
867                 bool bEnabled = m_bStatEnabled;
868                 m_bStatEnabled = bEnable;
869                 return bEnabled;
870             }
871
872         private:
873             //@cond none
874             GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount );
875             ~GarbageCollector();
876             //@endcond
877         };
878
879         /// Thread GC
880         /**
881             To use Pass The Buck reclamation schema each thread object must be linked with the object of ThreadGC class
882             that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
883             on the start of each thread that uses PTB GC. Before terminating the thread linked to PTB GC it is necessary to call
884             \ref cds_threading "cds::threading::Manager::detachThread()".
885
886             The ThreadGC object maintains two list:
887             \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
888             \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
889             Free guard list is a subset of thread guard list.
890         */
891         class ThreadGC: public cds::details::noncopyable
892         {
893             GarbageCollector&   m_gc    ;   ///< reference to GC singleton
894             details::guard_data *    m_pList ;   ///< Local list of guards owned by the thread
895             details::guard_data *    m_pFree ;   ///< The list of free guard from m_pList
896
897         public:
898             ThreadGC()
899                 : m_gc( GarbageCollector::instance() )
900                 , m_pList( nullptr )
901                 , m_pFree( nullptr )
902             {}
903
904             /// Dtor calls fini()
905             ~ThreadGC()
906             {
907                 fini();
908             }
909
910             /// Initialization. Repeat call is available
911             void init()
912             {
913                 if ( !m_pList ) {
914                     m_pList =
915                         m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
916                 }
917             }
918
919             /// Finalization. Repeat call is available
920             void fini()
921             {
922                 if ( m_pList ) {
923                     m_gc.freeGuardList( m_pList );
924                     m_pList =
925                         m_pFree = nullptr;
926                 }
927             }
928
929         public:
930             /// Initializes guard \p g
931             void allocGuard( Guard& g )
932             {
933                 assert( m_pList != nullptr );
934                 if ( m_pFree ) {
935                     g.m_pGuard = m_pFree;
936                     m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
937                 }
938                 else {
939                     g.m_pGuard = m_gc.allocGuard();
940                     g.m_pGuard->pThreadNext = m_pList;
941                     m_pList = g.m_pGuard;
942                 }
943             }
944
945             /// Frees guard \p g
946             void freeGuard( Guard& g )
947             {
948                 assert( m_pList != nullptr );
949                 g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
950                 g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
951                 m_pFree = g.m_pGuard;
952             }
953
954             /// Initializes guard array \p arr
955             template <size_t Count>
956             void allocGuard( GuardArray<Count>& arr )
957             {
958                 assert( m_pList != nullptr );
959                 size_t nCount = 0;
960
961                 while ( m_pFree && nCount < Count ) {
962                     arr[nCount].set_guard( m_pFree );
963                     m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
964                     ++nCount;
965                 }
966
967                 while ( nCount < Count ) {
968                     details::guard& g = arr[nCount++];
969                     g.set_guard( m_gc.allocGuard() );
970                     g.get_guard()->pThreadNext = m_pList;
971                     m_pList = g.get_guard();
972                 }
973             }
974
975             /// Frees guard array \p arr
976             template <size_t Count>
977             void freeGuard( GuardArray<Count>& arr )
978             {
979                 assert( m_pList != nullptr );
980
981                 details::guard_data * pGuard;
982                 for ( size_t i = 0; i < Count - 1; ++i ) {
983                     pGuard = arr[i].get_guard();
984                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
985                     pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
986                 }
987                 pGuard = arr[Count-1].get_guard();
988                 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
989                 pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
990                 m_pFree = arr[0].get_guard();
991             }
992
993             /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
994             template <typename T>
995             void retirePtr( T * p, void (* pFunc)(T *) )
996             {
997                 m_gc.retirePtr( p, pFunc );
998             }
999
1000             //@cond
1001             void scan()
1002             {
1003                 m_gc.liberate();
1004             }
1005             //@endcond
1006
1007         };
1008
1009         //////////////////////////////////////////////////////////
1010         // Inlines
1011
1012         inline Guard::Guard(ThreadGC& gc)
1013             : m_gc( gc )
1014         {
1015             getGC().allocGuard( *this );
1016         }
1017         inline Guard::~Guard()
1018         {
1019             getGC().freeGuard( *this );
1020         }
1021
1022         template <size_t Count>
1023         inline GuardArray<Count>::GuardArray( ThreadGC& gc )
1024             : m_gc( gc )
1025         {
1026             getGC().allocGuard( *this );
1027         }
1028         template <size_t Count>
1029         inline GuardArray<Count>::~GuardArray()
1030         {
1031             getGC().freeGuard( *this );
1032         }
1033
1034     }   // namespace ptb
1035 }}  // namespace cds::gc
1036
1037 #if CDS_COMPILER == CDS_COMPILER_MSVC
1038 #   pragma warning(pop)
1039 #endif
1040
1041
1042 #endif // #ifndef __CDS_GC_PTB_PASS_THE_BUCK_H