replace null_ptr<>() with nullptr
[libcds.git] / cds / gc / ptb / ptb.h
1 //$$CDS-header$$
2
3 #ifndef __CDS_GC_PTB_PASS_THE_BUCK_H
4 #define __CDS_GC_PTB_PASS_THE_BUCK_H
5
6 #include <cds/cxx11_atomic.h>
7 #include <cds/gc/details/retired_ptr.h>
8 #include <cds/details/aligned_allocator.h>
9 #include <cds/details/allocator.h>
10 #include <cds/details/noncopyable.h>
11
12 #include <cds/lock/spinlock.h>
13
14 #if CDS_COMPILER == CDS_COMPILER_MSVC
15 #   pragma warning(push)
16 #   pragma warning(disable:4251)    // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
17 #endif
18
19 namespace cds { namespace gc {
20
21     /// Pass The Buck reclamation schema
22     /**
23         \par Sources:
24         - [2002] M. Herlihy, V. Luchangco, and M. Moir. The repeat offender problem: A mechanism for supporting
25             dynamic-sized lockfree data structures. Technical Report TR-2002-112, Sun Microsystems Laboratories, 2002
26         - [2002] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Dynamic-sized Lockfree Data Structures.
27             Technical Report TR-2002-110, Sun Microsystems Laboratories, 2002
28         - [2005] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Nonblocking Memory Management Support
29             for Dynamic-Sized Data Structures. ACM Transactions on Computer Systems, Vol.23, No.2, May 2005
30
31
32         The cds::gc::ptb namespace and its members are internal representation of the Pass-the-Buck GC and should not be used directly.
33         Use cds::gc::PTB class in your code.
34
35         Pass-the-Buck (PTB) garbage collector is a singleton. The main user-level part of PTB schema is
36         GC class and its nested classes. Before use any PTB-related class you must initialize PTB garbage collector
37         by contructing cds::gc::PTB object in beginning of your main().
38         See cds::gc::PTB class for explanation.
39
40         \par Implementation issues
41             The global list of free guards (cds::gc::ptb::details::guard_allocator) is protected by spin-lock (i.e. serialized).
42             It seems that solution should not introduce significant performance bottleneck, because each thread has own set
43             of guards allocated from global list of free guards and access to global list is occurred only when
44             all thread's guard is busy. In this case the thread allocates next block of guards from global list.
45             Guards allocated for the thread is push back to the global list only when the thread terminates.
46     */
47     namespace ptb {
48
49         // Forward declarations
50         class Guard;
51         template <size_t Count> class GuardArray;
52         class ThreadGC;
53         class GarbageCollector;
54
55         /// Retired pointer type
56         typedef cds::gc::details::retired_ptr retired_ptr;
57
58         using cds::gc::details::free_retired_ptr_func;
59
60         /// Details of Pass the Buck algorithm
61         namespace details {
62
63             // Forward declaration
64             class liberate_set;
65
66             /// Retired pointer buffer node
67             struct retired_ptr_node {
68                 retired_ptr         m_ptr   ;   ///< retired pointer
69                 retired_ptr_node *  m_pNext     ;   ///< next retired pointer in buffer
70                 retired_ptr_node *  m_pNextFree ;   ///< next item in free list of retired_ptr_node
71             };
72
73             /// Internal guard representation
74             struct guard_data {
75                 typedef retired_ptr_node *      handoff_ptr ;   ///< trapped value type
76                 typedef void *  guarded_ptr  ;   ///< type of value guarded
77
78                 CDS_ATOMIC::atomic<guarded_ptr>         pPost   ;   ///< pointer guarded
79
80 #if 0
81                 typedef cds::SpinLock   handoff_spin ;  ///< type of spin-lock for accessing to \p pHandOff field
82                 handoff_spin    spinHandOff     ;   ///< access to \p pHandOff field
83                 handoff_ptr     pHandOff        ;   ///< trapped pointer
84 #endif
85
86                 CDS_ATOMIC::atomic<guard_data *>     pGlobalNext ;   ///< next item of global list of allocated guards
87                 CDS_ATOMIC::atomic<guard_data *>     pNextFree   ;   ///< pointer to the next item in global or thread-local free-list
88
89                 guard_data *             pThreadNext ;   ///< next item of thread's local list of guards
90
91                 //@cond
92                 guard_data()
93                     : pPost( nullptr )
94 #if 0
95                     , pHandOff( nullptr )
96 #endif
97                     , pGlobalNext( nullptr )
98                     , pNextFree( nullptr )
99                     , pThreadNext( nullptr )
100                 {}
101
102                 void init()
103                 {
104                     pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
105                 }
106                 //@endcond
107
108                 /// Checks if the guard is free, that is, it does not contain any pointer guarded
109                 bool isFree() const
110                 {
111                     return pPost.load( CDS_ATOMIC::memory_order_acquire ) == nullptr;
112                 }
113             };
114
115             /// Guard allocator
116             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
117             class guard_allocator
118             {
119                 cds::details::Allocator<details::guard_data>  m_GuardAllocator    ;   ///< guard allocator
120
121                 CDS_ATOMIC::atomic<guard_data *>    m_GuardList ;       ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
122                 CDS_ATOMIC::atomic<guard_data *>    m_FreeGuardList ;   ///< Head of free guard list (linked by guard_data::pNextFree field)
123                 SpinLock                m_freeListLock  ;   ///< Access to m_FreeGuardList
124
125                 /*
126                     Unfortunately, access to the list of free guard is lock-based.
127                     Lock-free manipulations with guard free-list are ABA-prone.
128                     TODO: working with m_FreeGuardList in lock-free manner.
129                 */
130
131             private:
132                 /// Allocates new guard from the heap. The function uses aligned allocator
133                 guard_data * allocNew()
134                 {
135                     //TODO: the allocator should make block allocation
136
137                     details::guard_data * pGuard = m_GuardAllocator.New();
138
139                     // Link guard to the list
140                     // m_GuardList is accumulated list and it cannot support concurrent deletion,
141                     // so, ABA problem is impossible for it
142                     details::guard_data * pHead = m_GuardList.load( CDS_ATOMIC::memory_order_acquire );
143                     do {
144                         pGuard->pGlobalNext.store( pHead, CDS_ATOMIC::memory_order_relaxed );
145                         // pHead is changed by compare_exchange_weak
146                     } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
147
148                     pGuard->init();
149                     return pGuard;
150                 }
151
152             public:
153                 // Default ctor
154                 guard_allocator()
155                     : m_GuardList( nullptr )
156                     , m_FreeGuardList( nullptr )
157                 {}
158
159                 // Destructor
160                 ~guard_allocator()
161                 {
162                     guard_data * pNext;
163                     for ( guard_data * pData = m_GuardList.load( CDS_ATOMIC::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
164                         pNext = pData->pGlobalNext.load( CDS_ATOMIC::memory_order_relaxed );
165                         m_GuardAllocator.Delete( pData );
166                     }
167                 }
168
169                 /// Allocates a guard from free list or from heap if free list is empty
170                 guard_data * alloc()
171                 {
172                     // Try to pop a guard from free-list
173                     details::guard_data * pGuard;
174
175                     {
176                         cds::lock::scoped_lock<SpinLock> al( m_freeListLock );
177                         pGuard = m_FreeGuardList.load(CDS_ATOMIC::memory_order_relaxed);
178                         if ( pGuard )
179                             m_FreeGuardList.store( pGuard->pNextFree.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed );
180                     }
181                     if ( !pGuard )
182                         return allocNew();
183
184                     pGuard->init();
185                     return pGuard;
186                 }
187
188                 /// Frees guard \p pGuard
189                 /**
190                     The function places the guard \p pGuard into free-list
191                 */
192                 void free( guard_data * pGuard )
193                 {
194                     pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
195
196                     cds::lock::scoped_lock<SpinLock> al( m_freeListLock );
197                     pGuard->pNextFree.store( m_FreeGuardList.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed );
198                     m_FreeGuardList.store( pGuard, CDS_ATOMIC::memory_order_relaxed );
199                 }
200
201                 /// Allocates list of guard
202                 /**
203                     The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
204
205                     cds::gc::ptb::ThreadGC supporting method
206                 */
207                 guard_data * allocList( size_t nCount )
208                 {
209                     assert( nCount != 0 );
210
211                     guard_data * pHead;
212                     guard_data * pLast;
213
214                     pHead =
215                         pLast = alloc();
216
217                     // The guard list allocated is private for the thread,
218                     // so, we can use relaxed memory order
219                     while ( --nCount ) {
220                         guard_data * p = alloc();
221                         pLast->pNextFree.store( pLast->pThreadNext = p, CDS_ATOMIC::memory_order_relaxed );
222                         pLast = p;
223                     }
224
225                     pLast->pNextFree.store( pLast->pThreadNext = nullptr, CDS_ATOMIC::memory_order_relaxed );
226
227                     return pHead;
228                 }
229
230                 /// Frees list of guards
231                 /**
232                     The list \p pList is linked by guard's \p pThreadNext field.
233
234                     cds::gc::ptb::ThreadGC supporting method
235                 */
236                 void freeList( guard_data * pList )
237                 {
238                     assert( pList != nullptr );
239
240                     guard_data * pLast = pList;
241                     while ( pLast->pThreadNext ) {
242                         pLast->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
243                         guard_data * p;
244                         pLast->pNextFree.store( p = pLast->pThreadNext, CDS_ATOMIC::memory_order_relaxed );
245                         pLast = p;
246                     }
247
248                     cds::lock::scoped_lock<SpinLock> al( m_freeListLock );
249                     pLast->pNextFree.store( m_FreeGuardList.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed );
250                     m_FreeGuardList.store( pList, CDS_ATOMIC::memory_order_relaxed );
251                 }
252
253                 /// Returns the list's head of guards allocated
254                 guard_data * begin()
255                 {
256                     return m_GuardList.load(CDS_ATOMIC::memory_order_acquire);
257                 }
258             };
259
260             /// Retired pointer buffer
261             /**
262                 The buffer of retired nodes ready for liberating.
263                 When size of buffer exceeds a threshold the GC calls \p liberate procedure to free
264                 retired nodes.
265             */
266             class retired_ptr_buffer
267             {
268                 CDS_ATOMIC::atomic<retired_ptr_node *>  m_pHead     ;   ///< head of buffer
269                 CDS_ATOMIC::atomic<size_t>              m_nItemCount;   ///< buffer's item count
270
271             public:
272                 //@cond
273                 retired_ptr_buffer()
274                     : m_pHead( nullptr )
275                     , m_nItemCount(0)
276                 {}
277
278                 ~retired_ptr_buffer()
279                 {
280                     assert( m_pHead.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr );
281                 }
282                 //@endcond
283
284                 /// Pushes new node into the buffer. Returns current buffer size
285                 size_t push( retired_ptr_node& node )
286                 {
287                     retired_ptr_node * pHead = m_pHead.load(CDS_ATOMIC::memory_order_acquire);
288                     do {
289                         node.m_pNext = pHead;
290                         // pHead is changed by compare_exchange_weak
291                     } while ( !m_pHead.compare_exchange_weak( pHead, &node, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
292
293                     return m_nItemCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ) + 1;
294                 }
295
296                 /// Result of \ref ptb_gc_privatve "privatize" function.
297                 /**
298                     The \p privatize function returns retired node list as \p first and the size of that list as \p second.
299                 */
300                 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
301
302                 /// Gets current list of retired pointer and clears the list
303                 /**@anchor ptb_gc_privatve
304                 */
305                 privatize_result privatize()
306                 {
307                     privatize_result res;
308                     res.first = m_pHead.exchange( nullptr, CDS_ATOMIC::memory_order_acq_rel );
309
310                     // Item counter is needed only as a threshold for liberate function
311                     // So, we may clear the item counter without synchronization with m_pHead
312                     res.second = m_nItemCount.exchange( 0, CDS_ATOMIC::memory_order_relaxed );
313                     return res;
314                 }
315
316                 /// Returns current size of buffer (approximate)
317                 size_t size() const
318                 {
319                     return m_nItemCount.load(CDS_ATOMIC::memory_order_relaxed);
320                 }
321             };
322
323             /// Pool of retired pointers
324             /**
325                 The class acts as an allocator of retired node.
326                 Retired pointers are linked in the lock-free list.
327             */
328             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
329             class retired_ptr_pool {
330                 /// Pool item
331                 typedef retired_ptr_node    item;
332
333                 /// Count of items in block
334                 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
335
336                 /// Pool block
337                 struct block {
338                     block *     pNext   ;   ///< next block
339                     item        items[m_nItemPerBlock]  ;   ///< item array
340                 };
341
342                 CDS_ATOMIC::atomic<block *> m_pBlockListHead    ;   ///< head of of allocated block list
343
344                 // To solve ABA problem we use epoch-based approach
345                 static const unsigned int c_nEpochCount = 4     ;   ///< Max epoch count
346                 CDS_ATOMIC::atomic<unsigned int>    m_nCurEpoch ;   ///< Current epoch
347                 CDS_ATOMIC::atomic<item *>  m_pEpochFree[c_nEpochCount]  ;   ///< List of free item per epoch
348                 CDS_ATOMIC::atomic<item *>  m_pGlobalFreeHead   ;   ///< Head of unallocated item list
349
350                 cds::details::Allocator< block, Alloc > m_BlockAllocator    ;   ///< block allocator
351
352             private:
353                 //@cond
354                 void allocNewBlock()
355                 {
356                     // allocate new block
357                     block * pNew = m_BlockAllocator.New();
358
359                     // link items within the block
360                     item * pLastItem = pNew->items + m_nItemPerBlock - 1;
361                     for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
362                         pItem->m_pNextFree = pItem + 1;
363                         CDS_STRICT_DO( pItem->m_pNext = nullptr );
364                     }
365
366                     // link new block to block list
367                     {
368                         block * pHead = m_pBlockListHead.load(CDS_ATOMIC::memory_order_acquire);
369                         do {
370                             pNew->pNext = pHead;
371                             // pHead is changed by compare_exchange_weak
372                         } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
373                     }
374
375                     // link block's items to free list
376                     {
377                         item * pHead = m_pGlobalFreeHead.load(CDS_ATOMIC::memory_order_acquire);
378                         do {
379                             pLastItem->m_pNextFree = pHead;
380                             // pHead is changed by compare_exchange_weak
381                         } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
382                     }
383                 }
384
385                 unsigned int current_epoch() const
386                 {
387                     return m_nCurEpoch.load(CDS_ATOMIC::memory_order_acquire) & (c_nEpochCount - 1);
388                 }
389                 unsigned int next_epoch() const
390                 {
391                     return (m_nCurEpoch.load(CDS_ATOMIC::memory_order_acquire) - 1) & (c_nEpochCount - 1);
392                 }
393                 //@endcond
394
395             public:
396                 //@cond
397                 retired_ptr_pool()
398                     : m_pBlockListHead( nullptr )
399                     , m_nCurEpoch(0)
400                     , m_pGlobalFreeHead( nullptr )
401                 {
402                     for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i )
403                         m_pEpochFree[i].store( nullptr, CDS_ATOMIC::memory_order_relaxed );
404
405                     allocNewBlock();
406                 }
407
408                 ~retired_ptr_pool()
409                 {
410                     block * p;
411                     for ( block * pBlock = m_pBlockListHead.load(CDS_ATOMIC::memory_order_relaxed); pBlock; pBlock = p ) {
412                         p = pBlock->pNext;
413                         m_BlockAllocator.Delete( pBlock );
414                     }
415                 }
416
417                 /// Increments current epoch
418                 void inc_epoch()
419                 {
420                     m_nCurEpoch.fetch_add( 1, CDS_ATOMIC::memory_order_acq_rel );
421                 }
422
423                 //@endcond
424
425                 /// Allocates new retired pointer
426                 retired_ptr_node&  alloc()
427                 {
428                     unsigned int nEpoch;
429                     item * pItem;
430                     for (;;) {
431                         pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(CDS_ATOMIC::memory_order_acquire);
432                         if ( !pItem )
433                             goto retry;
434                         if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
435                             goto success;
436                     }
437
438                     /*
439                     item * pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(CDS_ATOMIC::memory_order_acquire);
440                     while ( pItem ) {
441                         if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
442                             goto success;
443                     }
444                     */
445
446                     // Epoch free list is empty
447                     // Alloc from global free list
448                 retry:
449                     pItem = m_pGlobalFreeHead.load( CDS_ATOMIC::memory_order_acquire );
450                     do {
451                         if ( !pItem ) {
452                             allocNewBlock();
453                             goto retry;
454                         }
455                         // pItem is changed by compare_exchange_weak
456                     } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
457
458                 success:
459                     CDS_STRICT_DO( pItem->m_pNextFree = nullptr );
460                     return *pItem;
461                 }
462
463                 /// Allocates and initializes new retired pointer
464                 retired_ptr_node& alloc( const retired_ptr& p )
465                 {
466                     retired_ptr_node& node = alloc();
467                     node.m_ptr = p;
468                     return node;
469                 }
470
471                 /// Places the list (pHead, pTail) of retired pointers to pool (frees retired pointers)
472                 /**
473                     The list is linked on the m_pNextFree field
474                 */
475                 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail )
476                 {
477                     assert( pHead != nullptr );
478                     assert( pTail != nullptr );
479
480                     unsigned int nEpoch;
481                     item * pCurHead;
482                     do {
483                         pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(CDS_ATOMIC::memory_order_acquire);
484                         pTail->m_pNextFree = pCurHead;
485                     } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
486                 }
487             };
488
489             /// Uninitialized guard
490             class guard: public cds::details::noncopyable
491             {
492                 friend class ThreadGC;
493             protected:
494                 details::guard_data * m_pGuard ;    ///< Pointer to guard data
495             public:
496                 /// Initialize empty guard.
497                 guard()
498                     : m_pGuard( nullptr )
499                 {}
500
501                 /// Object destructor, does nothing
502                 ~guard()
503                 {}
504
505                 /// Guards pointer \p p
506                 void set( void * p )
507                 {
508                     assert( m_pGuard != nullptr );
509                     m_pGuard->pPost.store( p, CDS_ATOMIC::memory_order_release );
510                     //CDS_COMPILER_RW_BARRIER;
511                 }
512
513                 /// Clears the guard
514                 void clear()
515                 {
516                     assert( m_pGuard != nullptr );
517                     m_pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
518                     CDS_STRICT_DO( CDS_COMPILER_RW_BARRIER );
519                 }
520
521                 /// Guards pointer \p p
522                 template <typename T>
523                 T * operator =( T * p )
524                 {
525                     set( reinterpret_cast<void *>( const_cast<T *>(p) ));
526                     return p;
527                 }
528
529             public: // for ThreadGC.
530                 /*
531                     GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard,
532                     the compiler produces error: \91cds::gc::ptb::details::guard_data* cds::gc::ptb::details::guard::m_pGuard\92 is protected
533                     despite the fact that ThreadGC is declared as friend for guard class.
534                     We should not like to declare m_pGuard member as public one.
535                     Therefore, we have to add set_guard/get_guard public functions
536                 */
537                 /// Set guard data
538                 void set_guard( details::guard_data * pGuard )
539                 {
540                     assert( m_pGuard == nullptr );
541                     m_pGuard = pGuard;
542                 }
543
544                 /// Get current guard data
545                 details::guard_data * get_guard()
546                 {
547                     return m_pGuard;
548                 }
549                 /// Get current guard data
550                 details::guard_data * get_guard() const
551                 {
552                     return m_pGuard;
553                 }
554             };
555
556         } // namespace details
557
558         /// Guard
559         /**
560             This class represents auto guard: ctor allocates a guard from guard pool,
561             dtor returns the guard back to the pool of free guard.
562         */
563         class Guard: public details::guard
564         {
565             //@cond
566             typedef details::guard    base_class;
567             friend class ThreadGC;
568             //@endcond
569
570             ThreadGC&    m_gc    ;    ///< ThreadGC object of current thread
571         public:
572             /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
573             Guard(ThreadGC& gc);
574
575             /// Returns guard allocated back to pool of free guards
576             ~Guard();    // inline after GarbageCollector
577
578             /// Returns PTB GC object
579             ThreadGC& getGC()
580             {
581                 return m_gc;
582             }
583
584             /// Guards pointer \p p
585             template <typename T>
586             T * operator =( T * p )
587             {
588                 return base_class::operator =<T>( p );
589             }
590         };
591
592         /// Array of guards
593         /**
594             This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
595             dtor returns the guards allocated back to the pool.
596         */
597         template <size_t Count>
598         class GuardArray: public cds::details::noncopyable
599         {
600             details::guard      m_arr[Count]    ;    ///< array of guard
601             ThreadGC&           m_gc    ;            ///< ThreadGC object of current thread
602             const static size_t c_nCapacity = Count ;   ///< Array capacity (equal to \p Count template parameter)
603
604         public:
605             /// Rebind array for other size \p OtherCount
606             template <size_t OtherCount>
607             struct rebind {
608                 typedef GuardArray<OtherCount>  other   ;   ///< rebinding result
609             };
610
611         public:
612             /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
613             GuardArray( ThreadGC& gc )    ;    // inline below
614
615             /// Returns guards allocated back to pool
616             ~GuardArray()    ;    // inline below
617
618             /// Returns the capacity of array
619             CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
620             {
621                 return c_nCapacity;
622             }
623
624             /// Returns PTB ThreadGC object
625             ThreadGC& getGC() CDS_NOEXCEPT
626             {
627                 return m_gc;
628             }
629
630             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
631             details::guard& operator []( size_t nIndex )
632             {
633                 assert( nIndex < capacity() );
634                 return m_arr[nIndex];
635             }
636
637             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
638             const details::guard& operator []( size_t nIndex ) const
639             {
640                 assert( nIndex < capacity() );
641                 return m_arr[nIndex];
642             }
643
644             /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
645             template <typename T>
646             void set( size_t nIndex, T * p )
647             {
648                 assert( nIndex < capacity() );
649                 m_arr[nIndex].set( p );
650             }
651
652             /// Clears (sets to NULL) the guard \p nIndex
653             void clear( size_t nIndex )
654             {
655                 assert( nIndex < capacity() );
656                 m_arr[nIndex].clear();
657             }
658
659             /// Clears all guards in the array
660             void clearAll()
661             {
662                 for ( size_t i = 0; i < capacity(); ++i )
663                     clear(i);
664             }
665         };
666
667         /// Memory manager (Garbage collector)
668         class CDS_EXPORT_API GarbageCollector
669         {
670         private:
671             //@cond
672             friend class ThreadGC;
673
674             /// Internal GC statistics
675             struct internal_stat
676             {
677                 CDS_ATOMIC::atomic<size_t>  m_nGuardCount       ;   ///< Total guard count
678                 CDS_ATOMIC::atomic<size_t>  m_nFreeGuardCount   ;   ///< Count of free guard
679
680                 internal_stat()
681                     : m_nGuardCount(0)
682                     , m_nFreeGuardCount(0)
683                 {}
684             };
685             //@endcond
686
687         public:
688             /// Exception "No GarbageCollector object is created"
689             CDS_DECLARE_EXCEPTION( PTBManagerEmpty, "Global PTB GarbageCollector is NULL" );
690
691             /// Internal GC statistics
692             struct InternalState
693             {
694                 size_t m_nGuardCount       ;   ///< Total guard count
695                 size_t m_nFreeGuardCount   ;   ///< Count of free guard
696
697                 //@cond
698                 InternalState()
699                     : m_nGuardCount(0)
700                     , m_nFreeGuardCount(0)
701                 {}
702
703                 InternalState& operator =( internal_stat const& s )
704                 {
705                     m_nGuardCount = s.m_nGuardCount.load(CDS_ATOMIC::memory_order_relaxed);
706                     m_nFreeGuardCount = s.m_nFreeGuardCount.load(CDS_ATOMIC::memory_order_relaxed);
707
708                     return *this;
709                 }
710                 //@endcond
711             };
712
713         private:
714             static GarbageCollector * m_pManager    ;   ///< GC global instance
715
716             details::guard_allocator<>      m_GuardPool         ;   ///< Guard pool
717             details::retired_ptr_pool<>     m_RetiredAllocator  ;   ///< Pool of free retired pointers
718             details::retired_ptr_buffer     m_RetiredBuffer     ;   ///< Retired pointer buffer for liberating
719             //CDS_ATOMIC::atomic<size_t>      m_nInLiberate       ;   ///< number of parallel \p liberate fnction call
720
721             CDS_ATOMIC::atomic<size_t>      m_nLiberateThreshold;   ///< Max size of retired pointer buffer to call liberate
722             const size_t    m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
723
724             internal_stat   m_stat  ;   ///< Internal statistics
725             bool            m_bStatEnabled  ;   ///< Internal Statistics enabled
726
727         public:
728             /// Initializes PTB memory manager singleton
729             /**
730                 This member function creates and initializes PTB global object.
731                 The function should be called before using CDS data structure based on cds::gc::PTB GC. Usually,
732                 this member function is called in the \p main() function. See cds::gc::ptb for example.
733                 After calling of this function you may use CDS data structures based on cds::gc::PTB.
734
735                 \par Parameters
736                 \li \p nLiberateThreshold - the liberate threshold. When count of retired pointers reaches this value,
737                     the \ref ptb_gc_liberate "liberate" member function would be called for freeing retired pointers.
738                     If \p nLiberateThreshold <= 1, \p liberate would called after each \ref ptb_gc_retirePtr "retirePtr" call.
739                 \li \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
740                     is initialized the GC allocates local guard pool for the thread from common guard pool.
741                     By perforce the local thread's guard pool is grown automatically from common pool.
742                     When the thread terminated its guard pool is backed to common GC's pool.
743
744             */
745             static void CDS_STDCALL Construct(
746                 size_t nLiberateThreshold = 1024
747                 , size_t nInitialThreadGuardCount = 8
748             );
749
750             /// Destroys PTB memory manager
751             /**
752                 The member function destroys PTB global object. After calling of this function you may \b NOT
753                 use CDS data structures based on cds::gc::PTB. Usually, the \p Destruct function is called
754                 at the end of your \p main(). See cds::gc::ptb for example.
755             */
756             static void CDS_STDCALL Destruct();
757
758             /// Returns pointer to GarbageCollector instance
759             /**
760                 If PTB GC is not initialized, \p PTBManagerEmpty exception is thrown
761             */
762             static GarbageCollector&   instance()
763             {
764                 if ( m_pManager == nullptr )
765                     throw PTBManagerEmpty();
766                 return *m_pManager;
767             }
768
769             /// Checks if global GC object is constructed and may be used
770             static bool isUsed() CDS_NOEXCEPT
771             {
772                 return m_pManager != nullptr;
773             }
774
775         public:
776             //@{
777             /// Internal interface
778
779             /// Allocates a guard
780             details::guard_data * allocGuard()
781             {
782                 return m_GuardPool.alloc();
783             }
784
785             /// Frees guard \p g for reusing in future
786             void freeGuard(details::guard_data * pGuard )
787             {
788                 m_GuardPool.free( pGuard );
789             }
790
791             /// Allocates guard list for a thread.
792             details::guard_data * allocGuardList( size_t nCount )
793             {
794                 return m_GuardPool.allocList( nCount );
795             }
796
797             /// Frees thread's guard list pointed by \p pList
798             void freeGuardList( details::guard_data * pList )
799             {
800                 m_GuardPool.freeList( pList );
801             }
802
803             /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
804             /**@anchor ptb_gc_retirePtr
805             */
806             template <typename T>
807             void retirePtr( T * p, void (* pFunc)(T *) )
808             {
809                 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
810             }
811
812             /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
813             void retirePtr( retired_ptr const& p )
814             {
815                 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(CDS_ATOMIC::memory_order_relaxed) )
816                     liberate();
817             }
818
819         protected:
820             /// Liberate function
821             /** @anchor ptb_gc_liberate
822                 The main function of Pass The Buck algorithm. It tries to free retired pointers if they are not
823                 trapped by any guard.
824             */
825             void liberate();
826
827             //@}
828
829         private:
830             //@cond
831 #if 0
832             void liberate( details::liberate_set& set );
833 #endif
834             //@endcond
835
836         public:
837             /// Get internal statistics
838             InternalState& getInternalState(InternalState& stat) const
839             {
840                 return stat = m_stat;
841             }
842
843             /// Checks if internal statistics enabled
844             bool              isStatisticsEnabled() const
845             {
846                 return m_bStatEnabled;
847             }
848
849             /// Enables/disables internal statistics
850             bool  enableStatistics( bool bEnable )
851             {
852                 bool bEnabled = m_bStatEnabled;
853                 m_bStatEnabled = bEnable;
854                 return bEnabled;
855             }
856
857         private:
858             //@cond none
859             GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount );
860             ~GarbageCollector();
861             //@endcond
862         };
863
864         /// Thread GC
865         /**
866             To use Pass The Buck reclamation schema each thread object must be linked with the object of ThreadGC class
867             that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
868             on the start of each thread that uses PTB GC. Before terminating the thread linked to PTB GC it is necessary to call
869             \ref cds_threading "cds::threading::Manager::detachThread()".
870
871             The ThreadGC object maintains two list:
872             \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
873             \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
874             Free guard list is a subset of thread guard list.
875         */
876         class ThreadGC: public cds::details::noncopyable
877         {
878             GarbageCollector&   m_gc    ;   ///< reference to GC singleton
879             details::guard_data *    m_pList ;   ///< Local list of guards owned by the thread
880             details::guard_data *    m_pFree ;   ///< The list of free guard from m_pList
881
882         public:
883             ThreadGC()
884                 : m_gc( GarbageCollector::instance() )
885                 , m_pList( nullptr )
886                 , m_pFree( nullptr )
887             {}
888
889             /// Dtor calls fini()
890             ~ThreadGC()
891             {
892                 fini();
893             }
894
895             /// Initialization. Repeat call is available
896             void init()
897             {
898                 if ( !m_pList ) {
899                     m_pList =
900                         m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
901                 }
902             }
903
904             /// Finalization. Repeat call is available
905             void fini()
906             {
907                 if ( m_pList ) {
908                     m_gc.freeGuardList( m_pList );
909                     m_pList =
910                         m_pFree = nullptr;
911                 }
912             }
913
914         public:
915             /// Initializes guard \p g
916             void allocGuard( Guard& g )
917             {
918                 assert( m_pList != nullptr );
919                 if ( m_pFree ) {
920                     g.m_pGuard = m_pFree;
921                     m_pFree = m_pFree->pNextFree.load(CDS_ATOMIC::memory_order_relaxed);
922                 }
923                 else {
924                     g.m_pGuard = m_gc.allocGuard();
925                     g.m_pGuard->pThreadNext = m_pList;
926                     m_pList = g.m_pGuard;
927                 }
928             }
929
930             /// Frees guard \p g
931             void freeGuard( Guard& g )
932             {
933                 assert( m_pList != nullptr );
934                 g.m_pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
935                 g.m_pGuard->pNextFree.store( m_pFree, CDS_ATOMIC::memory_order_relaxed );
936                 m_pFree = g.m_pGuard;
937             }
938
939             /// Initializes guard array \p arr
940             template <size_t Count>
941             void allocGuard( GuardArray<Count>& arr )
942             {
943                 assert( m_pList != nullptr );
944                 size_t nCount = 0;
945
946                 while ( m_pFree && nCount < Count ) {
947                     arr[nCount].set_guard( m_pFree );
948                     m_pFree = m_pFree->pNextFree.load(CDS_ATOMIC::memory_order_relaxed);
949                     ++nCount;
950                 }
951
952                 while ( nCount < Count ) {
953                     details::guard& g = arr[nCount++];
954                     g.set_guard( m_gc.allocGuard() );
955                     g.get_guard()->pThreadNext = m_pList;
956                     m_pList = g.get_guard();
957                 }
958             }
959
960             /// Frees guard array \p arr
961             template <size_t Count>
962             void freeGuard( GuardArray<Count>& arr )
963             {
964                 assert( m_pList != nullptr );
965
966                 details::guard_data * pGuard;
967                 for ( size_t i = 0; i < Count - 1; ++i ) {
968                     pGuard = arr[i].get_guard();
969                     pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
970                     pGuard->pNextFree.store( arr[i+1].get_guard(), CDS_ATOMIC::memory_order_relaxed );
971                 }
972                 pGuard = arr[Count-1].get_guard();
973                 pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
974                 pGuard->pNextFree.store( m_pFree, CDS_ATOMIC::memory_order_relaxed );
975                 m_pFree = arr[0].get_guard();
976             }
977
978             /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
979             template <typename T>
980             void retirePtr( T * p, void (* pFunc)(T *) )
981             {
982                 m_gc.retirePtr( p, pFunc );
983             }
984
985             //@cond
986             void scan()
987             {
988                 m_gc.liberate();
989             }
990             //@endcond
991
992         };
993
994         //////////////////////////////////////////////////////////
995         // Inlines
996
997         inline Guard::Guard(ThreadGC& gc)
998             : m_gc( gc )
999         {
1000             getGC().allocGuard( *this );
1001         }
1002         inline Guard::~Guard()
1003         {
1004             getGC().freeGuard( *this );
1005         }
1006
1007         template <size_t Count>
1008         inline GuardArray<Count>::GuardArray( ThreadGC& gc )
1009             : m_gc( gc )
1010         {
1011             getGC().allocGuard( *this );
1012         }
1013         template <size_t Count>
1014         inline GuardArray<Count>::~GuardArray()
1015         {
1016             getGC().freeGuard( *this );
1017         }
1018
1019     }   // namespace ptb
1020 }}  // namespace cds::gc
1021
1022 #if CDS_COMPILER == CDS_COMPILER_MSVC
1023 #   pragma warning(pop)
1024 #endif
1025
1026
1027 #endif // #ifndef __CDS_GC_PTB_PASS_THE_BUCK_H