3 #ifndef __CDS_GC_PTB_PASS_THE_BUCK_H
4 #define __CDS_GC_PTB_PASS_THE_BUCK_H
6 #include <mutex> // unique_lock
7 #include <cds/cxx11_atomic.h>
8 #include <cds/gc/details/retired_ptr.h>
9 #include <cds/details/aligned_allocator.h>
10 #include <cds/details/allocator.h>
11 #include <cds/lock/spinlock.h>
13 #if CDS_COMPILER == CDS_COMPILER_MSVC
14 # pragma warning(push)
15 # pragma warning(disable:4251) // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
18 namespace cds { namespace gc {
20 /// Pass The Buck reclamation schema
23 - [2002] M. Herlihy, V. Luchangco, and M. Moir. The repeat offender problem: A mechanism for supporting
24 dynamic-sized lockfree data structures. Technical Report TR-2002-112, Sun Microsystems Laboratories, 2002
25 - [2002] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Dynamic-sized Lockfree Data Structures.
26 Technical Report TR-2002-110, Sun Microsystems Laboratories, 2002
27 - [2005] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Nonblocking Memory Management Support
28 for Dynamic-Sized Data Structures. ACM Transactions on Computer Systems, Vol.23, No.2, May 2005
31 The cds::gc::ptb namespace and its members are internal representation of the Pass-the-Buck GC and should not be used directly.
32 Use cds::gc::PTB class in your code.
34 Pass-the-Buck (PTB) garbage collector is a singleton. The main user-level part of PTB schema is
35 GC class and its nested classes. Before use any PTB-related class you must initialize PTB garbage collector
36 by contructing cds::gc::PTB object in beginning of your main().
37 See cds::gc::PTB class for explanation.
39 \par Implementation issues
40 The global list of free guards (cds::gc::ptb::details::guard_allocator) is protected by spin-lock (i.e. serialized).
41 It seems that solution should not introduce significant performance bottleneck, because each thread has own set
42 of guards allocated from global list of free guards and access to global list is occurred only when
43 all thread's guard is busy. In this case the thread allocates next block of guards from global list.
44 Guards allocated for the thread is push back to the global list only when the thread terminates.
48 // Forward declarations
50 template <size_t Count> class GuardArray;
52 class GarbageCollector;
54 /// Retired pointer type
55 typedef cds::gc::details::retired_ptr retired_ptr;
57 using cds::gc::details::free_retired_ptr_func;
59 /// Details of Pass the Buck algorithm
62 // Forward declaration
65 /// Retired pointer buffer node
66 struct retired_ptr_node {
67 retired_ptr m_ptr ; ///< retired pointer
68 retired_ptr_node * m_pNext ; ///< next retired pointer in buffer
69 retired_ptr_node * m_pNextFree ; ///< next item in free list of retired_ptr_node
72 /// Internal guard representation
74 typedef retired_ptr_node * handoff_ptr ; ///< trapped value type
75 typedef void * guarded_ptr ; ///< type of value guarded
77 atomics::atomic<guarded_ptr> pPost ; ///< pointer guarded
80 typedef cds::SpinLock handoff_spin ; ///< type of spin-lock for accessing to \p pHandOff field
81 handoff_spin spinHandOff ; ///< access to \p pHandOff field
82 handoff_ptr pHandOff ; ///< trapped pointer
85 atomics::atomic<guard_data *> pGlobalNext ; ///< next item of global list of allocated guards
86 atomics::atomic<guard_data *> pNextFree ; ///< pointer to the next item in global or thread-local free-list
88 guard_data * pThreadNext ; ///< next item of thread's local list of guards
96 , pGlobalNext( nullptr )
97 , pNextFree( nullptr )
98 , pThreadNext( nullptr )
103 pPost.store( nullptr, atomics::memory_order_relaxed );
107 /// Checks if the guard is free, that is, it does not contain any pointer guarded
110 return pPost.load( atomics::memory_order_acquire ) == nullptr;
115 template <class Alloc = CDS_DEFAULT_ALLOCATOR>
116 class guard_allocator
118 cds::details::Allocator<details::guard_data> m_GuardAllocator ; ///< guard allocator
120 atomics::atomic<guard_data *> m_GuardList ; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
121 atomics::atomic<guard_data *> m_FreeGuardList ; ///< Head of free guard list (linked by guard_data::pNextFree field)
122 SpinLock m_freeListLock ; ///< Access to m_FreeGuardList
125 Unfortunately, access to the list of free guard is lock-based.
126 Lock-free manipulations with guard free-list are ABA-prone.
127 TODO: working with m_FreeGuardList in lock-free manner.
131 /// Allocates new guard from the heap. The function uses aligned allocator
132 guard_data * allocNew()
134 //TODO: the allocator should make block allocation
136 details::guard_data * pGuard = m_GuardAllocator.New();
138 // Link guard to the list
139 // m_GuardList is accumulated list and it cannot support concurrent deletion,
140 // so, ABA problem is impossible for it
141 details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
143 pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
144 // pHead is changed by compare_exchange_weak
145 } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
154 : m_GuardList( nullptr )
155 , m_FreeGuardList( nullptr )
162 for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
163 pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
164 m_GuardAllocator.Delete( pData );
168 /// Allocates a guard from free list or from heap if free list is empty
171 // Try to pop a guard from free-list
172 details::guard_data * pGuard;
175 std::unique_lock<SpinLock> al( m_freeListLock );
176 pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
178 m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
187 /// Frees guard \p pGuard
189 The function places the guard \p pGuard into free-list
191 void free( guard_data * pGuard )
193 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
195 std::unique_lock<SpinLock> al( m_freeListLock );
196 pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
197 m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
200 /// Allocates list of guard
202 The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
204 cds::gc::ptb::ThreadGC supporting method
206 guard_data * allocList( size_t nCount )
208 assert( nCount != 0 );
216 // The guard list allocated is private for the thread,
217 // so, we can use relaxed memory order
219 guard_data * p = alloc();
220 pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
224 pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
229 /// Frees list of guards
231 The list \p pList is linked by guard's \p pThreadNext field.
233 cds::gc::ptb::ThreadGC supporting method
235 void freeList( guard_data * pList )
237 assert( pList != nullptr );
239 guard_data * pLast = pList;
240 while ( pLast->pThreadNext ) {
241 pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
243 pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
247 std::unique_lock<SpinLock> al( m_freeListLock );
248 pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
249 m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
252 /// Returns the list's head of guards allocated
255 return m_GuardList.load(atomics::memory_order_acquire);
259 /// Retired pointer buffer
261 The buffer of retired nodes ready for liberating.
262 When size of buffer exceeds a threshold the GC calls \p liberate procedure to free
265 class retired_ptr_buffer
267 atomics::atomic<retired_ptr_node *> m_pHead ; ///< head of buffer
268 atomics::atomic<size_t> m_nItemCount; ///< buffer's item count
277 ~retired_ptr_buffer()
279 assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
283 /// Pushes new node into the buffer. Returns current buffer size
284 size_t push( retired_ptr_node& node )
286 retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
288 node.m_pNext = pHead;
289 // pHead is changed by compare_exchange_weak
290 } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
292 return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
295 /// Result of \ref ptb_gc_privatve "privatize" function.
297 The \p privatize function returns retired node list as \p first and the size of that list as \p second.
299 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
301 /// Gets current list of retired pointer and clears the list
302 /**@anchor ptb_gc_privatve
304 privatize_result privatize()
306 privatize_result res;
307 res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
309 // Item counter is needed only as a threshold for liberate function
310 // So, we may clear the item counter without synchronization with m_pHead
311 res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
315 /// Returns current size of buffer (approximate)
318 return m_nItemCount.load(atomics::memory_order_relaxed);
322 /// Pool of retired pointers
324 The class acts as an allocator of retired node.
325 Retired pointers are linked in the lock-free list.
327 template <class Alloc = CDS_DEFAULT_ALLOCATOR>
328 class retired_ptr_pool {
330 typedef retired_ptr_node item;
332 /// Count of items in block
333 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
337 block * pNext ; ///< next block
338 item items[m_nItemPerBlock] ; ///< item array
341 atomics::atomic<block *> m_pBlockListHead ; ///< head of of allocated block list
343 // To solve ABA problem we use epoch-based approach
344 static const unsigned int c_nEpochCount = 4 ; ///< Max epoch count
345 atomics::atomic<unsigned int> m_nCurEpoch ; ///< Current epoch
346 atomics::atomic<item *> m_pEpochFree[c_nEpochCount] ; ///< List of free item per epoch
347 atomics::atomic<item *> m_pGlobalFreeHead ; ///< Head of unallocated item list
349 cds::details::Allocator< block, Alloc > m_BlockAllocator ; ///< block allocator
355 // allocate new block
356 block * pNew = m_BlockAllocator.New();
358 // link items within the block
359 item * pLastItem = pNew->items + m_nItemPerBlock - 1;
360 for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
361 pItem->m_pNextFree = pItem + 1;
362 CDS_STRICT_DO( pItem->m_pNext = nullptr );
365 // link new block to block list
367 block * pHead = m_pBlockListHead.load(atomics::memory_order_acquire);
370 // pHead is changed by compare_exchange_weak
371 } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_relaxed ));
374 // link block's items to free list
376 item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_acquire);
378 pLastItem->m_pNextFree = pHead;
379 // pHead is changed by compare_exchange_weak
380 } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
384 unsigned int current_epoch() const
386 return m_nCurEpoch.load(atomics::memory_order_acquire) & (c_nEpochCount - 1);
388 unsigned int next_epoch() const
390 return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & (c_nEpochCount - 1);
397 : m_pBlockListHead( nullptr )
399 , m_pGlobalFreeHead( nullptr )
401 for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i )
402 m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
410 for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
412 m_BlockAllocator.Delete( pBlock );
416 /// Increments current epoch
419 m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
424 /// Allocates new retired pointer
425 retired_ptr_node& alloc()
430 pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
433 if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
438 item * pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
440 if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
445 // Epoch free list is empty
446 // Alloc from global free list
448 pItem = m_pGlobalFreeHead.load( atomics::memory_order_acquire );
454 // pItem is changed by compare_exchange_weak
455 } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ));
458 CDS_STRICT_DO( pItem->m_pNextFree = nullptr );
462 /// Allocates and initializes new retired pointer
463 retired_ptr_node& alloc( const retired_ptr& p )
465 retired_ptr_node& node = alloc();
470 /// Places the list (pHead, pTail) of retired pointers to pool (frees retired pointers)
472 The list is linked on the m_pNextFree field
474 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail )
476 assert( pHead != nullptr );
477 assert( pTail != nullptr );
482 pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
483 pTail->m_pNextFree = pCurHead;
484 } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
488 /// Uninitialized guard
491 friend class ThreadGC;
493 details::guard_data * m_pGuard ; ///< Pointer to guard data
495 /// Initialize empty guard.
497 : m_pGuard( nullptr )
500 /// The object is not copy-constructible
501 guard( guard const& ) = delete;
503 /// Object destructor, does nothing
507 /// Guards pointer \p p
510 assert( m_pGuard != nullptr );
511 m_pGuard->pPost.store( p, atomics::memory_order_release );
512 //CDS_COMPILER_RW_BARRIER;
518 assert( m_pGuard != nullptr );
519 m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
520 CDS_STRICT_DO( CDS_COMPILER_RW_BARRIER );
523 /// Guards pointer \p p
524 template <typename T>
525 T * operator =( T * p )
527 set( reinterpret_cast<void *>( const_cast<T *>(p) ));
532 std::nullptr_t operator=(std::nullptr_t)
539 public: // for ThreadGC.
541 GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard,
542 the compiler produces error:
\91cds::gc::ptb::details::guard_data* cds::gc::ptb::details::guard::m_pGuard
\92 is protected
543 despite the fact that ThreadGC is declared as friend for guard class.
544 We should not like to declare m_pGuard member as public one.
545 Therefore, we have to add set_guard/get_guard public functions
548 void set_guard( details::guard_data * pGuard )
550 assert( m_pGuard == nullptr );
554 /// Get current guard data
555 details::guard_data * get_guard()
559 /// Get current guard data
560 details::guard_data * get_guard() const
566 } // namespace details
570 This class represents auto guard: ctor allocates a guard from guard pool,
571 dtor returns the guard back to the pool of free guard.
573 class Guard: public details::guard
576 typedef details::guard base_class;
577 friend class ThreadGC;
580 ThreadGC& m_gc ; ///< ThreadGC object of current thread
582 /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
585 /// Returns guard allocated back to pool of free guards
586 ~Guard(); // inline after GarbageCollector
588 /// Returns PTB GC object
594 /// Guards pointer \p p
595 template <typename T>
596 T * operator =( T * p )
598 return base_class::operator =<T>( p );
602 std::nullptr_t operator=(std::nullptr_t)
604 return base_class::operator =(nullptr);
611 This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
612 dtor returns the guards allocated back to the pool.
614 template <size_t Count>
617 details::guard m_arr[Count] ; ///< array of guard
618 ThreadGC& m_gc ; ///< ThreadGC object of current thread
619 const static size_t c_nCapacity = Count ; ///< Array capacity (equal to \p Count template parameter)
622 /// Rebind array for other size \p OtherCount
623 template <size_t OtherCount>
625 typedef GuardArray<OtherCount> other ; ///< rebinding result
629 /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
630 GuardArray( ThreadGC& gc ) ; // inline below
632 /// The object is not default-constructible
633 GuardArray() = delete;
635 /// The object is not copy-constructible
636 GuardArray( GuardArray const& ) = delete;
638 /// Returns guards allocated back to pool
639 ~GuardArray() ; // inline below
641 /// Returns the capacity of array
642 CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
647 /// Returns PTB ThreadGC object
648 ThreadGC& getGC() CDS_NOEXCEPT
653 /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
654 details::guard& operator []( size_t nIndex )
656 assert( nIndex < capacity() );
657 return m_arr[nIndex];
660 /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
661 const details::guard& operator []( size_t nIndex ) const
663 assert( nIndex < capacity() );
664 return m_arr[nIndex];
667 /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
668 template <typename T>
669 void set( size_t nIndex, T * p )
671 assert( nIndex < capacity() );
672 m_arr[nIndex].set( p );
675 /// Clears (sets to \p nullptr) the guard \p nIndex
676 void clear( size_t nIndex )
678 assert( nIndex < capacity() );
679 m_arr[nIndex].clear();
682 /// Clears all guards in the array
685 for ( size_t i = 0; i < capacity(); ++i )
690 /// Memory manager (Garbage collector)
691 class CDS_EXPORT_API GarbageCollector
695 friend class ThreadGC;
697 /// Internal GC statistics
700 atomics::atomic<size_t> m_nGuardCount ; ///< Total guard count
701 atomics::atomic<size_t> m_nFreeGuardCount ; ///< Count of free guard
705 , m_nFreeGuardCount(0)
711 /// Exception "No GarbageCollector object is created"
712 CDS_DECLARE_EXCEPTION( PTBManagerEmpty, "Global PTB GarbageCollector is NULL" );
714 /// Internal GC statistics
717 size_t m_nGuardCount ; ///< Total guard count
718 size_t m_nFreeGuardCount ; ///< Count of free guard
723 , m_nFreeGuardCount(0)
726 InternalState& operator =( internal_stat const& s )
728 m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
729 m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
737 static GarbageCollector * m_pManager ; ///< GC global instance
739 details::guard_allocator<> m_GuardPool ; ///< Guard pool
740 details::retired_ptr_pool<> m_RetiredAllocator ; ///< Pool of free retired pointers
741 details::retired_ptr_buffer m_RetiredBuffer ; ///< Retired pointer buffer for liberating
742 //atomics::atomic<size_t> m_nInLiberate ; ///< number of parallel \p liberate fnction call
744 atomics::atomic<size_t> m_nLiberateThreshold; ///< Max size of retired pointer buffer to call liberate
745 const size_t m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
747 internal_stat m_stat ; ///< Internal statistics
748 bool m_bStatEnabled ; ///< Internal Statistics enabled
751 /// Initializes PTB memory manager singleton
753 This member function creates and initializes PTB global object.
754 The function should be called before using CDS data structure based on cds::gc::PTB GC. Usually,
755 this member function is called in the \p main() function. See cds::gc::ptb for example.
756 After calling of this function you may use CDS data structures based on cds::gc::PTB.
759 \li \p nLiberateThreshold - the liberate threshold. When count of retired pointers reaches this value,
760 the \ref ptb_gc_liberate "liberate" member function would be called for freeing retired pointers.
761 If \p nLiberateThreshold <= 1, \p liberate would called after each \ref ptb_gc_retirePtr "retirePtr" call.
762 \li \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
763 is initialized the GC allocates local guard pool for the thread from common guard pool.
764 By perforce the local thread's guard pool is grown automatically from common pool.
765 When the thread terminated its guard pool is backed to common GC's pool.
768 static void CDS_STDCALL Construct(
769 size_t nLiberateThreshold = 1024
770 , size_t nInitialThreadGuardCount = 8
773 /// Destroys PTB memory manager
775 The member function destroys PTB global object. After calling of this function you may \b NOT
776 use CDS data structures based on cds::gc::PTB. Usually, the \p Destruct function is called
777 at the end of your \p main(). See cds::gc::ptb for example.
779 static void CDS_STDCALL Destruct();
781 /// Returns pointer to GarbageCollector instance
783 If PTB GC is not initialized, \p PTBManagerEmpty exception is thrown
785 static GarbageCollector& instance()
787 if ( m_pManager == nullptr )
788 throw PTBManagerEmpty();
792 /// Checks if global GC object is constructed and may be used
793 static bool isUsed() CDS_NOEXCEPT
795 return m_pManager != nullptr;
800 /// Internal interface
802 /// Allocates a guard
803 details::guard_data * allocGuard()
805 return m_GuardPool.alloc();
808 /// Frees guard \p g for reusing in future
809 void freeGuard(details::guard_data * pGuard )
811 m_GuardPool.free( pGuard );
814 /// Allocates guard list for a thread.
815 details::guard_data * allocGuardList( size_t nCount )
817 return m_GuardPool.allocList( nCount );
820 /// Frees thread's guard list pointed by \p pList
821 void freeGuardList( details::guard_data * pList )
823 m_GuardPool.freeList( pList );
826 /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
827 /**@anchor ptb_gc_retirePtr
829 template <typename T>
830 void retirePtr( T * p, void (* pFunc)(T *) )
832 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
835 /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
836 void retirePtr( retired_ptr const& p )
838 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
843 /// Liberate function
844 /** @anchor ptb_gc_liberate
845 The main function of Pass The Buck algorithm. It tries to free retired pointers if they are not
846 trapped by any guard.
855 void liberate( details::liberate_set& set );
860 /// Get internal statistics
861 InternalState& getInternalState(InternalState& stat) const
863 return stat = m_stat;
866 /// Checks if internal statistics enabled
867 bool isStatisticsEnabled() const
869 return m_bStatEnabled;
872 /// Enables/disables internal statistics
873 bool enableStatistics( bool bEnable )
875 bool bEnabled = m_bStatEnabled;
876 m_bStatEnabled = bEnable;
882 GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount );
889 To use Pass The Buck reclamation schema each thread object must be linked with the object of ThreadGC class
890 that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
891 on the start of each thread that uses PTB GC. Before terminating the thread linked to PTB GC it is necessary to call
892 \ref cds_threading "cds::threading::Manager::detachThread()".
894 The ThreadGC object maintains two list:
895 \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
896 \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
897 Free guard list is a subset of thread guard list.
901 GarbageCollector& m_gc ; ///< reference to GC singleton
902 details::guard_data * m_pList ; ///< Local list of guards owned by the thread
903 details::guard_data * m_pFree ; ///< The list of free guard from m_pList
906 /// Default constructor
908 : m_gc( GarbageCollector::instance() )
913 /// The object is not copy-constructible
914 ThreadGC( ThreadGC const& ) = delete;
916 /// Dtor calls fini()
922 /// Initialization. Repeat call is available
927 m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
931 /// Finalization. Repeat call is available
935 m_gc.freeGuardList( m_pList );
942 /// Initializes guard \p g
943 void allocGuard( Guard& g )
945 assert( m_pList != nullptr );
947 g.m_pGuard = m_pFree;
948 m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
951 g.m_pGuard = m_gc.allocGuard();
952 g.m_pGuard->pThreadNext = m_pList;
953 m_pList = g.m_pGuard;
958 void freeGuard( Guard& g )
960 assert( m_pList != nullptr );
961 g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
962 g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
963 m_pFree = g.m_pGuard;
966 /// Initializes guard array \p arr
967 template <size_t Count>
968 void allocGuard( GuardArray<Count>& arr )
970 assert( m_pList != nullptr );
973 while ( m_pFree && nCount < Count ) {
974 arr[nCount].set_guard( m_pFree );
975 m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
979 while ( nCount < Count ) {
980 details::guard& g = arr[nCount++];
981 g.set_guard( m_gc.allocGuard() );
982 g.get_guard()->pThreadNext = m_pList;
983 m_pList = g.get_guard();
987 /// Frees guard array \p arr
988 template <size_t Count>
989 void freeGuard( GuardArray<Count>& arr )
991 assert( m_pList != nullptr );
993 details::guard_data * pGuard;
994 for ( size_t i = 0; i < Count - 1; ++i ) {
995 pGuard = arr[i].get_guard();
996 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
997 pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
999 pGuard = arr[Count-1].get_guard();
1000 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
1001 pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
1002 m_pFree = arr[0].get_guard();
1005 /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
1006 template <typename T>
1007 void retirePtr( T * p, void (* pFunc)(T *) )
1009 m_gc.retirePtr( p, pFunc );
1021 //////////////////////////////////////////////////////////
1024 inline Guard::Guard(ThreadGC& gc)
1027 getGC().allocGuard( *this );
1029 inline Guard::~Guard()
1031 getGC().freeGuard( *this );
1034 template <size_t Count>
1035 inline GuardArray<Count>::GuardArray( ThreadGC& gc )
1038 getGC().allocGuard( *this );
1040 template <size_t Count>
1041 inline GuardArray<Count>::~GuardArray()
1043 getGC().freeGuard( *this );
1047 }} // namespace cds::gc
1049 #if CDS_COMPILER == CDS_COMPILER_MSVC
1050 # pragma warning(pop)
1054 #endif // #ifndef __CDS_GC_PTB_PASS_THE_BUCK_H