3 #ifndef __CDS_GC_PTB_PASS_THE_BUCK_H
4 #define __CDS_GC_PTB_PASS_THE_BUCK_H
6 #include <cds/cxx11_atomic.h>
7 #include <cds/gc/details/retired_ptr.h>
8 #include <cds/details/aligned_allocator.h>
9 #include <cds/details/allocator.h>
10 #include <cds/details/noncopyable.h>
12 #include <cds/lock/spinlock.h>
14 #if CDS_COMPILER == CDS_COMPILER_MSVC
15 # pragma warning(push)
16 # pragma warning(disable:4251) // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
19 namespace cds { namespace gc {
21 /// Pass The Buck reclamation schema
24 - [2002] M. Herlihy, V. Luchangco, and M. Moir. The repeat offender problem: A mechanism for supporting
25 dynamic-sized lockfree data structures. Technical Report TR-2002-112, Sun Microsystems Laboratories, 2002
26 - [2002] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Dynamic-sized Lockfree Data Structures.
27 Technical Report TR-2002-110, Sun Microsystems Laboratories, 2002
28 - [2005] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Nonblocking Memory Management Support
29 for Dynamic-Sized Data Structures. ACM Transactions on Computer Systems, Vol.23, No.2, May 2005
32 The cds::gc::ptb namespace and its members are internal representation of the Pass-the-Buck GC and should not be used directly.
33 Use cds::gc::PTB class in your code.
35 Pass-the-Buck (PTB) garbage collector is a singleton. The main user-level part of PTB schema is
36 GC class and its nested classes. Before use any PTB-related class you must initialize PTB garbage collector
37 by contructing cds::gc::PTB object in beginning of your main().
38 See cds::gc::PTB class for explanation.
40 \par Implementation issues
41 The global list of free guards (cds::gc::ptb::details::guard_allocator) is protected by spin-lock (i.e. serialized).
42 It seems that solution should not introduce significant performance bottleneck, because each thread has own set
43 of guards allocated from global list of free guards and access to global list is occurred only when
44 all thread's guard is busy. In this case the thread allocates next block of guards from global list.
45 Guards allocated for the thread is push back to the global list only when the thread terminates.
49 // Forward declarations
51 template <size_t Count> class GuardArray;
53 class GarbageCollector;
55 /// Retired pointer type
56 typedef cds::gc::details::retired_ptr retired_ptr;
58 using cds::gc::details::free_retired_ptr_func;
60 /// Details of Pass the Buck algorithm
63 // Forward declaration
66 /// Retired pointer buffer node
67 struct retired_ptr_node {
68 retired_ptr m_ptr ; ///< retired pointer
69 retired_ptr_node * m_pNext ; ///< next retired pointer in buffer
70 retired_ptr_node * m_pNextFree ; ///< next item in free list of retired_ptr_node
73 /// Internal guard representation
75 typedef retired_ptr_node * handoff_ptr ; ///< trapped value type
76 typedef void * guarded_ptr ; ///< type of value guarded
78 CDS_ATOMIC::atomic<guarded_ptr> pPost ; ///< pointer guarded
81 typedef cds::SpinLock handoff_spin ; ///< type of spin-lock for accessing to \p pHandOff field
82 handoff_spin spinHandOff ; ///< access to \p pHandOff field
83 handoff_ptr pHandOff ; ///< trapped pointer
86 CDS_ATOMIC::atomic<guard_data *> pGlobalNext ; ///< next item of global list of allocated guards
87 CDS_ATOMIC::atomic<guard_data *> pNextFree ; ///< pointer to the next item in global or thread-local free-list
89 guard_data * pThreadNext ; ///< next item of thread's local list of guards
97 , pGlobalNext( nullptr )
98 , pNextFree( nullptr )
99 , pThreadNext( nullptr )
104 pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
108 /// Checks if the guard is free, that is, it does not contain any pointer guarded
111 return pPost.load( CDS_ATOMIC::memory_order_acquire ) == nullptr;
116 template <class Alloc = CDS_DEFAULT_ALLOCATOR>
117 class guard_allocator
119 cds::details::Allocator<details::guard_data> m_GuardAllocator ; ///< guard allocator
121 CDS_ATOMIC::atomic<guard_data *> m_GuardList ; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
122 CDS_ATOMIC::atomic<guard_data *> m_FreeGuardList ; ///< Head of free guard list (linked by guard_data::pNextFree field)
123 SpinLock m_freeListLock ; ///< Access to m_FreeGuardList
126 Unfortunately, access to the list of free guard is lock-based.
127 Lock-free manipulations with guard free-list are ABA-prone.
128 TODO: working with m_FreeGuardList in lock-free manner.
132 /// Allocates new guard from the heap. The function uses aligned allocator
133 guard_data * allocNew()
135 //TODO: the allocator should make block allocation
137 details::guard_data * pGuard = m_GuardAllocator.New();
139 // Link guard to the list
140 // m_GuardList is accumulated list and it cannot support concurrent deletion,
141 // so, ABA problem is impossible for it
142 details::guard_data * pHead = m_GuardList.load( CDS_ATOMIC::memory_order_acquire );
144 pGuard->pGlobalNext.store( pHead, CDS_ATOMIC::memory_order_relaxed );
145 // pHead is changed by compare_exchange_weak
146 } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
155 : m_GuardList( nullptr )
156 , m_FreeGuardList( nullptr )
163 for ( guard_data * pData = m_GuardList.load( CDS_ATOMIC::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
164 pNext = pData->pGlobalNext.load( CDS_ATOMIC::memory_order_relaxed );
165 m_GuardAllocator.Delete( pData );
169 /// Allocates a guard from free list or from heap if free list is empty
172 // Try to pop a guard from free-list
173 details::guard_data * pGuard;
176 cds::lock::scoped_lock<SpinLock> al( m_freeListLock );
177 pGuard = m_FreeGuardList.load(CDS_ATOMIC::memory_order_relaxed);
179 m_FreeGuardList.store( pGuard->pNextFree.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed );
188 /// Frees guard \p pGuard
190 The function places the guard \p pGuard into free-list
192 void free( guard_data * pGuard )
194 pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
196 cds::lock::scoped_lock<SpinLock> al( m_freeListLock );
197 pGuard->pNextFree.store( m_FreeGuardList.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed );
198 m_FreeGuardList.store( pGuard, CDS_ATOMIC::memory_order_relaxed );
201 /// Allocates list of guard
203 The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
205 cds::gc::ptb::ThreadGC supporting method
207 guard_data * allocList( size_t nCount )
209 assert( nCount != 0 );
217 // The guard list allocated is private for the thread,
218 // so, we can use relaxed memory order
220 guard_data * p = alloc();
221 pLast->pNextFree.store( pLast->pThreadNext = p, CDS_ATOMIC::memory_order_relaxed );
225 pLast->pNextFree.store( pLast->pThreadNext = nullptr, CDS_ATOMIC::memory_order_relaxed );
230 /// Frees list of guards
232 The list \p pList is linked by guard's \p pThreadNext field.
234 cds::gc::ptb::ThreadGC supporting method
236 void freeList( guard_data * pList )
238 assert( pList != nullptr );
240 guard_data * pLast = pList;
241 while ( pLast->pThreadNext ) {
242 pLast->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
244 pLast->pNextFree.store( p = pLast->pThreadNext, CDS_ATOMIC::memory_order_relaxed );
248 cds::lock::scoped_lock<SpinLock> al( m_freeListLock );
249 pLast->pNextFree.store( m_FreeGuardList.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed );
250 m_FreeGuardList.store( pList, CDS_ATOMIC::memory_order_relaxed );
253 /// Returns the list's head of guards allocated
256 return m_GuardList.load(CDS_ATOMIC::memory_order_acquire);
260 /// Retired pointer buffer
262 The buffer of retired nodes ready for liberating.
263 When size of buffer exceeds a threshold the GC calls \p liberate procedure to free
266 class retired_ptr_buffer
268 CDS_ATOMIC::atomic<retired_ptr_node *> m_pHead ; ///< head of buffer
269 CDS_ATOMIC::atomic<size_t> m_nItemCount; ///< buffer's item count
278 ~retired_ptr_buffer()
280 assert( m_pHead.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr );
284 /// Pushes new node into the buffer. Returns current buffer size
285 size_t push( retired_ptr_node& node )
287 retired_ptr_node * pHead = m_pHead.load(CDS_ATOMIC::memory_order_acquire);
289 node.m_pNext = pHead;
290 // pHead is changed by compare_exchange_weak
291 } while ( !m_pHead.compare_exchange_weak( pHead, &node, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
293 return m_nItemCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ) + 1;
296 /// Result of \ref ptb_gc_privatve "privatize" function.
298 The \p privatize function returns retired node list as \p first and the size of that list as \p second.
300 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
302 /// Gets current list of retired pointer and clears the list
303 /**@anchor ptb_gc_privatve
305 privatize_result privatize()
307 privatize_result res;
308 res.first = m_pHead.exchange( nullptr, CDS_ATOMIC::memory_order_acq_rel );
310 // Item counter is needed only as a threshold for liberate function
311 // So, we may clear the item counter without synchronization with m_pHead
312 res.second = m_nItemCount.exchange( 0, CDS_ATOMIC::memory_order_relaxed );
316 /// Returns current size of buffer (approximate)
319 return m_nItemCount.load(CDS_ATOMIC::memory_order_relaxed);
323 /// Pool of retired pointers
325 The class acts as an allocator of retired node.
326 Retired pointers are linked in the lock-free list.
328 template <class Alloc = CDS_DEFAULT_ALLOCATOR>
329 class retired_ptr_pool {
331 typedef retired_ptr_node item;
333 /// Count of items in block
334 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
338 block * pNext ; ///< next block
339 item items[m_nItemPerBlock] ; ///< item array
342 CDS_ATOMIC::atomic<block *> m_pBlockListHead ; ///< head of of allocated block list
344 // To solve ABA problem we use epoch-based approach
345 static const unsigned int c_nEpochCount = 4 ; ///< Max epoch count
346 CDS_ATOMIC::atomic<unsigned int> m_nCurEpoch ; ///< Current epoch
347 CDS_ATOMIC::atomic<item *> m_pEpochFree[c_nEpochCount] ; ///< List of free item per epoch
348 CDS_ATOMIC::atomic<item *> m_pGlobalFreeHead ; ///< Head of unallocated item list
350 cds::details::Allocator< block, Alloc > m_BlockAllocator ; ///< block allocator
356 // allocate new block
357 block * pNew = m_BlockAllocator.New();
359 // link items within the block
360 item * pLastItem = pNew->items + m_nItemPerBlock - 1;
361 for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
362 pItem->m_pNextFree = pItem + 1;
363 CDS_STRICT_DO( pItem->m_pNext = nullptr );
366 // link new block to block list
368 block * pHead = m_pBlockListHead.load(CDS_ATOMIC::memory_order_acquire);
371 // pHead is changed by compare_exchange_weak
372 } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
375 // link block's items to free list
377 item * pHead = m_pGlobalFreeHead.load(CDS_ATOMIC::memory_order_acquire);
379 pLastItem->m_pNextFree = pHead;
380 // pHead is changed by compare_exchange_weak
381 } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
385 unsigned int current_epoch() const
387 return m_nCurEpoch.load(CDS_ATOMIC::memory_order_acquire) & (c_nEpochCount - 1);
389 unsigned int next_epoch() const
391 return (m_nCurEpoch.load(CDS_ATOMIC::memory_order_acquire) - 1) & (c_nEpochCount - 1);
398 : m_pBlockListHead( nullptr )
400 , m_pGlobalFreeHead( nullptr )
402 for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i )
403 m_pEpochFree[i].store( nullptr, CDS_ATOMIC::memory_order_relaxed );
411 for ( block * pBlock = m_pBlockListHead.load(CDS_ATOMIC::memory_order_relaxed); pBlock; pBlock = p ) {
413 m_BlockAllocator.Delete( pBlock );
417 /// Increments current epoch
420 m_nCurEpoch.fetch_add( 1, CDS_ATOMIC::memory_order_acq_rel );
425 /// Allocates new retired pointer
426 retired_ptr_node& alloc()
431 pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(CDS_ATOMIC::memory_order_acquire);
434 if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
439 item * pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(CDS_ATOMIC::memory_order_acquire);
441 if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
446 // Epoch free list is empty
447 // Alloc from global free list
449 pItem = m_pGlobalFreeHead.load( CDS_ATOMIC::memory_order_acquire );
455 // pItem is changed by compare_exchange_weak
456 } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
459 CDS_STRICT_DO( pItem->m_pNextFree = nullptr );
463 /// Allocates and initializes new retired pointer
464 retired_ptr_node& alloc( const retired_ptr& p )
466 retired_ptr_node& node = alloc();
471 /// Places the list (pHead, pTail) of retired pointers to pool (frees retired pointers)
473 The list is linked on the m_pNextFree field
475 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail )
477 assert( pHead != nullptr );
478 assert( pTail != nullptr );
483 pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(CDS_ATOMIC::memory_order_acquire);
484 pTail->m_pNextFree = pCurHead;
485 } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
489 /// Uninitialized guard
490 class guard: public cds::details::noncopyable
492 friend class ThreadGC;
494 details::guard_data * m_pGuard ; ///< Pointer to guard data
496 /// Initialize empty guard.
498 : m_pGuard( nullptr )
501 /// Object destructor, does nothing
505 /// Guards pointer \p p
508 assert( m_pGuard != nullptr );
509 m_pGuard->pPost.store( p, CDS_ATOMIC::memory_order_release );
510 //CDS_COMPILER_RW_BARRIER;
516 assert( m_pGuard != nullptr );
517 m_pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
518 CDS_STRICT_DO( CDS_COMPILER_RW_BARRIER );
521 /// Guards pointer \p p
522 template <typename T>
523 T * operator =( T * p )
525 set( reinterpret_cast<void *>( const_cast<T *>(p) ));
529 public: // for ThreadGC.
531 GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard,
532 the compiler produces error:
\91cds::gc::ptb::details::guard_data* cds::gc::ptb::details::guard::m_pGuard
\92 is protected
533 despite the fact that ThreadGC is declared as friend for guard class.
534 We should not like to declare m_pGuard member as public one.
535 Therefore, we have to add set_guard/get_guard public functions
538 void set_guard( details::guard_data * pGuard )
540 assert( m_pGuard == nullptr );
544 /// Get current guard data
545 details::guard_data * get_guard()
549 /// Get current guard data
550 details::guard_data * get_guard() const
556 } // namespace details
560 This class represents auto guard: ctor allocates a guard from guard pool,
561 dtor returns the guard back to the pool of free guard.
563 class Guard: public details::guard
566 typedef details::guard base_class;
567 friend class ThreadGC;
570 ThreadGC& m_gc ; ///< ThreadGC object of current thread
572 /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
575 /// Returns guard allocated back to pool of free guards
576 ~Guard(); // inline after GarbageCollector
578 /// Returns PTB GC object
584 /// Guards pointer \p p
585 template <typename T>
586 T * operator =( T * p )
588 return base_class::operator =<T>( p );
594 This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
595 dtor returns the guards allocated back to the pool.
597 template <size_t Count>
598 class GuardArray: public cds::details::noncopyable
600 details::guard m_arr[Count] ; ///< array of guard
601 ThreadGC& m_gc ; ///< ThreadGC object of current thread
602 const static size_t c_nCapacity = Count ; ///< Array capacity (equal to \p Count template parameter)
605 /// Rebind array for other size \p OtherCount
606 template <size_t OtherCount>
608 typedef GuardArray<OtherCount> other ; ///< rebinding result
612 /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
613 GuardArray( ThreadGC& gc ) ; // inline below
615 /// Returns guards allocated back to pool
616 ~GuardArray() ; // inline below
618 /// Returns the capacity of array
619 CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
624 /// Returns PTB ThreadGC object
625 ThreadGC& getGC() CDS_NOEXCEPT
630 /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
631 details::guard& operator []( size_t nIndex )
633 assert( nIndex < capacity() );
634 return m_arr[nIndex];
637 /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
638 const details::guard& operator []( size_t nIndex ) const
640 assert( nIndex < capacity() );
641 return m_arr[nIndex];
644 /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
645 template <typename T>
646 void set( size_t nIndex, T * p )
648 assert( nIndex < capacity() );
649 m_arr[nIndex].set( p );
652 /// Clears (sets to NULL) the guard \p nIndex
653 void clear( size_t nIndex )
655 assert( nIndex < capacity() );
656 m_arr[nIndex].clear();
659 /// Clears all guards in the array
662 for ( size_t i = 0; i < capacity(); ++i )
667 /// Memory manager (Garbage collector)
668 class CDS_EXPORT_API GarbageCollector
672 friend class ThreadGC;
674 /// Internal GC statistics
677 CDS_ATOMIC::atomic<size_t> m_nGuardCount ; ///< Total guard count
678 CDS_ATOMIC::atomic<size_t> m_nFreeGuardCount ; ///< Count of free guard
682 , m_nFreeGuardCount(0)
688 /// Exception "No GarbageCollector object is created"
689 CDS_DECLARE_EXCEPTION( PTBManagerEmpty, "Global PTB GarbageCollector is NULL" );
691 /// Internal GC statistics
694 size_t m_nGuardCount ; ///< Total guard count
695 size_t m_nFreeGuardCount ; ///< Count of free guard
700 , m_nFreeGuardCount(0)
703 InternalState& operator =( internal_stat const& s )
705 m_nGuardCount = s.m_nGuardCount.load(CDS_ATOMIC::memory_order_relaxed);
706 m_nFreeGuardCount = s.m_nFreeGuardCount.load(CDS_ATOMIC::memory_order_relaxed);
714 static GarbageCollector * m_pManager ; ///< GC global instance
716 details::guard_allocator<> m_GuardPool ; ///< Guard pool
717 details::retired_ptr_pool<> m_RetiredAllocator ; ///< Pool of free retired pointers
718 details::retired_ptr_buffer m_RetiredBuffer ; ///< Retired pointer buffer for liberating
719 //CDS_ATOMIC::atomic<size_t> m_nInLiberate ; ///< number of parallel \p liberate fnction call
721 CDS_ATOMIC::atomic<size_t> m_nLiberateThreshold; ///< Max size of retired pointer buffer to call liberate
722 const size_t m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
724 internal_stat m_stat ; ///< Internal statistics
725 bool m_bStatEnabled ; ///< Internal Statistics enabled
728 /// Initializes PTB memory manager singleton
730 This member function creates and initializes PTB global object.
731 The function should be called before using CDS data structure based on cds::gc::PTB GC. Usually,
732 this member function is called in the \p main() function. See cds::gc::ptb for example.
733 After calling of this function you may use CDS data structures based on cds::gc::PTB.
736 \li \p nLiberateThreshold - the liberate threshold. When count of retired pointers reaches this value,
737 the \ref ptb_gc_liberate "liberate" member function would be called for freeing retired pointers.
738 If \p nLiberateThreshold <= 1, \p liberate would called after each \ref ptb_gc_retirePtr "retirePtr" call.
739 \li \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
740 is initialized the GC allocates local guard pool for the thread from common guard pool.
741 By perforce the local thread's guard pool is grown automatically from common pool.
742 When the thread terminated its guard pool is backed to common GC's pool.
745 static void CDS_STDCALL Construct(
746 size_t nLiberateThreshold = 1024
747 , size_t nInitialThreadGuardCount = 8
750 /// Destroys PTB memory manager
752 The member function destroys PTB global object. After calling of this function you may \b NOT
753 use CDS data structures based on cds::gc::PTB. Usually, the \p Destruct function is called
754 at the end of your \p main(). See cds::gc::ptb for example.
756 static void CDS_STDCALL Destruct();
758 /// Returns pointer to GarbageCollector instance
760 If PTB GC is not initialized, \p PTBManagerEmpty exception is thrown
762 static GarbageCollector& instance()
764 if ( m_pManager == nullptr )
765 throw PTBManagerEmpty();
769 /// Checks if global GC object is constructed and may be used
770 static bool isUsed() CDS_NOEXCEPT
772 return m_pManager != nullptr;
777 /// Internal interface
779 /// Allocates a guard
780 details::guard_data * allocGuard()
782 return m_GuardPool.alloc();
785 /// Frees guard \p g for reusing in future
786 void freeGuard(details::guard_data * pGuard )
788 m_GuardPool.free( pGuard );
791 /// Allocates guard list for a thread.
792 details::guard_data * allocGuardList( size_t nCount )
794 return m_GuardPool.allocList( nCount );
797 /// Frees thread's guard list pointed by \p pList
798 void freeGuardList( details::guard_data * pList )
800 m_GuardPool.freeList( pList );
803 /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
804 /**@anchor ptb_gc_retirePtr
806 template <typename T>
807 void retirePtr( T * p, void (* pFunc)(T *) )
809 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
812 /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
813 void retirePtr( retired_ptr const& p )
815 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(CDS_ATOMIC::memory_order_relaxed) )
820 /// Liberate function
821 /** @anchor ptb_gc_liberate
822 The main function of Pass The Buck algorithm. It tries to free retired pointers if they are not
823 trapped by any guard.
832 void liberate( details::liberate_set& set );
837 /// Get internal statistics
838 InternalState& getInternalState(InternalState& stat) const
840 return stat = m_stat;
843 /// Checks if internal statistics enabled
844 bool isStatisticsEnabled() const
846 return m_bStatEnabled;
849 /// Enables/disables internal statistics
850 bool enableStatistics( bool bEnable )
852 bool bEnabled = m_bStatEnabled;
853 m_bStatEnabled = bEnable;
859 GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount );
866 To use Pass The Buck reclamation schema each thread object must be linked with the object of ThreadGC class
867 that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
868 on the start of each thread that uses PTB GC. Before terminating the thread linked to PTB GC it is necessary to call
869 \ref cds_threading "cds::threading::Manager::detachThread()".
871 The ThreadGC object maintains two list:
872 \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
873 \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
874 Free guard list is a subset of thread guard list.
876 class ThreadGC: public cds::details::noncopyable
878 GarbageCollector& m_gc ; ///< reference to GC singleton
879 details::guard_data * m_pList ; ///< Local list of guards owned by the thread
880 details::guard_data * m_pFree ; ///< The list of free guard from m_pList
884 : m_gc( GarbageCollector::instance() )
889 /// Dtor calls fini()
895 /// Initialization. Repeat call is available
900 m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
904 /// Finalization. Repeat call is available
908 m_gc.freeGuardList( m_pList );
915 /// Initializes guard \p g
916 void allocGuard( Guard& g )
918 assert( m_pList != nullptr );
920 g.m_pGuard = m_pFree;
921 m_pFree = m_pFree->pNextFree.load(CDS_ATOMIC::memory_order_relaxed);
924 g.m_pGuard = m_gc.allocGuard();
925 g.m_pGuard->pThreadNext = m_pList;
926 m_pList = g.m_pGuard;
931 void freeGuard( Guard& g )
933 assert( m_pList != nullptr );
934 g.m_pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
935 g.m_pGuard->pNextFree.store( m_pFree, CDS_ATOMIC::memory_order_relaxed );
936 m_pFree = g.m_pGuard;
939 /// Initializes guard array \p arr
940 template <size_t Count>
941 void allocGuard( GuardArray<Count>& arr )
943 assert( m_pList != nullptr );
946 while ( m_pFree && nCount < Count ) {
947 arr[nCount].set_guard( m_pFree );
948 m_pFree = m_pFree->pNextFree.load(CDS_ATOMIC::memory_order_relaxed);
952 while ( nCount < Count ) {
953 details::guard& g = arr[nCount++];
954 g.set_guard( m_gc.allocGuard() );
955 g.get_guard()->pThreadNext = m_pList;
956 m_pList = g.get_guard();
960 /// Frees guard array \p arr
961 template <size_t Count>
962 void freeGuard( GuardArray<Count>& arr )
964 assert( m_pList != nullptr );
966 details::guard_data * pGuard;
967 for ( size_t i = 0; i < Count - 1; ++i ) {
968 pGuard = arr[i].get_guard();
969 pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
970 pGuard->pNextFree.store( arr[i+1].get_guard(), CDS_ATOMIC::memory_order_relaxed );
972 pGuard = arr[Count-1].get_guard();
973 pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
974 pGuard->pNextFree.store( m_pFree, CDS_ATOMIC::memory_order_relaxed );
975 m_pFree = arr[0].get_guard();
978 /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
979 template <typename T>
980 void retirePtr( T * p, void (* pFunc)(T *) )
982 m_gc.retirePtr( p, pFunc );
994 //////////////////////////////////////////////////////////
997 inline Guard::Guard(ThreadGC& gc)
1000 getGC().allocGuard( *this );
1002 inline Guard::~Guard()
1004 getGC().freeGuard( *this );
1007 template <size_t Count>
1008 inline GuardArray<Count>::GuardArray( ThreadGC& gc )
1011 getGC().allocGuard( *this );
1013 template <size_t Count>
1014 inline GuardArray<Count>::~GuardArray()
1016 getGC().freeGuard( *this );
1020 }} // namespace cds::gc
1022 #if CDS_COMPILER == CDS_COMPILER_MSVC
1023 # pragma warning(pop)
1027 #endif // #ifndef __CDS_GC_PTB_PASS_THE_BUCK_H