3 #ifndef __CDS_GC_DETAILS_DHP_H
4 #define __CDS_GC_DETAILS_DHP_H
6 #include <mutex> // unique_lock
7 #include <cds/cxx11_atomic.h>
8 #include <cds/gc/details/retired_ptr.h>
9 #include <cds/details/aligned_allocator.h>
10 #include <cds/details/allocator.h>
11 #include <cds/lock/spinlock.h>
13 #if CDS_COMPILER == CDS_COMPILER_MSVC
14 # pragma warning(push)
15 # pragma warning(disable:4251) // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
18 namespace cds { namespace gc {
20 /// Dynamic Hazard Pointer reclamation schema
22 The cds::gc::dhp namespace and its members are internal representation of the GC and should not be used directly.
23 Use cds::gc::DHP class in your code.
25 Dynamic Hazard Pointer (DHP) garbage collector is a singleton. The main user-level part of DHP schema is
26 GC class and its nested classes. Before use any DHP-related class you must initialize DHP garbage collector
27 by contructing cds::gc::DHP object in beginning of your main().
28 See cds::gc::DHP class for explanation.
30 \par Implementation issues
31 The global list of free guards (\p cds::gc::dhp::details::guard_allocator) is protected by a spin-lock (i.e. serialized).
32 It seems that this solution should not introduce significant performance bottleneck, because each thread has its own set
33 of guards allocated from the global list of free guards and the access to the global list is occurred only when
34 all thread's guard is busy. In this case the thread allocates a next block of guards from the global list.
35 Guards allocated for the thread is push back to the global list only when the thread terminates.
39 // Forward declarations
41 template <size_t Count> class GuardArray;
43 class GarbageCollector;
45 /// Retired pointer type
46 typedef cds::gc::details::retired_ptr retired_ptr;
48 using cds::gc::details::free_retired_ptr_func;
50 /// Details of Dynamic Hazard Pointer algorithm
53 // Forward declaration
56 /// Retired pointer buffer node
57 struct retired_ptr_node {
58 retired_ptr m_ptr ; ///< retired pointer
59 retired_ptr_node * m_pNext ; ///< next retired pointer in buffer
60 retired_ptr_node * m_pNextFree ; ///< next item in free list of retired_ptr_node
63 /// Internal guard representation
65 typedef retired_ptr_node * handoff_ptr ; ///< trapped value type
66 typedef void * guarded_ptr ; ///< type of value guarded
68 atomics::atomic<guarded_ptr> pPost ; ///< pointer guarded
70 atomics::atomic<guard_data *> pGlobalNext ; ///< next item of global list of allocated guards
71 atomics::atomic<guard_data *> pNextFree ; ///< pointer to the next item in global or thread-local free-list
73 guard_data * pThreadNext ; ///< next item of thread's local list of guards
76 guard_data() CDS_NOEXCEPT
78 , pGlobalNext( nullptr )
79 , pNextFree( nullptr )
80 , pThreadNext( nullptr )
83 void init() CDS_NOEXCEPT
85 pPost.store( nullptr, atomics::memory_order_relaxed );
89 /// Checks if the guard is free, that is, it does not contain any pointer guarded
90 bool isFree() const CDS_NOEXCEPT
92 return pPost.load( atomics::memory_order_acquire ) == nullptr;
97 template <class Alloc = CDS_DEFAULT_ALLOCATOR>
100 cds::details::Allocator<details::guard_data> m_GuardAllocator ; ///< guard allocator
102 atomics::atomic<guard_data *> m_GuardList ; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
103 atomics::atomic<guard_data *> m_FreeGuardList ; ///< Head of free guard list (linked by guard_data::pNextFree field)
104 SpinLock m_freeListLock ; ///< Access to m_FreeGuardList
107 Unfortunately, access to the list of free guard is lock-based.
108 Lock-free manipulations with guard free-list are ABA-prone.
109 TODO: working with m_FreeGuardList in lock-free manner.
113 /// Allocates new guard from the heap. The function uses aligned allocator
114 guard_data * allocNew()
116 //TODO: the allocator should make block allocation
118 details::guard_data * pGuard = m_GuardAllocator.New();
120 // Link guard to the list
121 // m_GuardList is accumulated list and it cannot support concurrent deletion,
122 // so, ABA problem is impossible for it
123 details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
125 pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
126 // pHead is changed by compare_exchange_weak
127 } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
135 guard_allocator() CDS_NOEXCEPT
136 : m_GuardList( nullptr )
137 , m_FreeGuardList( nullptr )
144 for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
145 pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
146 m_GuardAllocator.Delete( pData );
150 /// Allocates a guard from free list or from heap if free list is empty
153 // Try to pop a guard from free-list
154 details::guard_data * pGuard;
157 std::unique_lock<SpinLock> al( m_freeListLock );
158 pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
160 m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
169 /// Frees guard \p pGuard
171 The function places the guard \p pGuard into free-list
173 void free( guard_data * pGuard ) CDS_NOEXCEPT
175 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
177 std::unique_lock<SpinLock> al( m_freeListLock );
178 pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
179 m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
182 /// Allocates list of guard
184 The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
186 cds::gc::dhp::ThreadGC supporting method
188 guard_data * allocList( size_t nCount )
190 assert( nCount != 0 );
198 // The guard list allocated is private for the thread,
199 // so, we can use relaxed memory order
201 guard_data * p = alloc();
202 pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
206 pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
211 /// Frees list of guards
213 The list \p pList is linked by guard's \p pThreadNext field.
215 cds::gc::dhp::ThreadGC supporting method
217 void freeList( guard_data * pList ) CDS_NOEXCEPT
219 assert( pList != nullptr );
221 guard_data * pLast = pList;
222 while ( pLast->pThreadNext ) {
223 pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
225 pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
229 std::unique_lock<SpinLock> al( m_freeListLock );
230 pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
231 m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
234 /// Returns the list's head of guards allocated
235 guard_data * begin() CDS_NOEXCEPT
237 return m_GuardList.load(atomics::memory_order_acquire);
241 /// Retired pointer buffer
243 The buffer of retired nodes ready for liberating.
244 When size of buffer exceeds a threshold the GC calls \p scan() procedure to free
247 class retired_ptr_buffer
249 atomics::atomic<retired_ptr_node *> m_pHead ; ///< head of buffer
250 atomics::atomic<size_t> m_nItemCount; ///< buffer's item count
254 CDS_CONSTEXPR retired_ptr_buffer() CDS_NOEXCEPT
259 ~retired_ptr_buffer() CDS_NOEXCEPT
261 assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
265 /// Pushes new node into the buffer. Returns current buffer size
266 size_t push( retired_ptr_node& node ) CDS_NOEXCEPT
268 retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
270 node.m_pNext = pHead;
271 // pHead is changed by compare_exchange_weak
272 } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
274 return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
277 /// Result of \ref dhp_gc_privatve "privatize" function.
279 The \p privatize function returns retired node list as \p first and the size of that list as \p second.
281 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
283 /// Gets current list of retired pointer and clears the list
284 /**@anchor dhp_gc_privatve
286 privatize_result privatize() CDS_NOEXCEPT
288 privatize_result res;
289 res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
291 // Item counter is needed only as a threshold for \p scan() function
292 // So, we may clear the item counter without synchronization with m_pHead
293 res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
297 /// Returns current size of buffer (approximate)
298 size_t size() const CDS_NOEXCEPT
300 return m_nItemCount.load(atomics::memory_order_relaxed);
304 /// Pool of retired pointers
306 The class acts as an allocator of retired node.
307 Retired pointers are linked in the lock-free list.
309 template <class Alloc = CDS_DEFAULT_ALLOCATOR>
310 class retired_ptr_pool {
312 typedef retired_ptr_node item;
314 /// Count of items in block
315 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
319 block * pNext ; ///< next block
320 item items[m_nItemPerBlock] ; ///< item array
323 atomics::atomic<block *> m_pBlockListHead ; ///< head of of allocated block list
325 // To solve ABA problem we use epoch-based approach
326 static const unsigned int c_nEpochCount = 4 ; ///< Max epoch count
327 atomics::atomic<unsigned int> m_nCurEpoch ; ///< Current epoch
328 atomics::atomic<item *> m_pEpochFree[c_nEpochCount] ; ///< List of free item per epoch
329 atomics::atomic<item *> m_pGlobalFreeHead ; ///< Head of unallocated item list
331 cds::details::Allocator< block, Alloc > m_BlockAllocator ; ///< block allocator
337 // allocate new block
338 block * pNew = m_BlockAllocator.New();
340 // link items within the block
341 item * pLastItem = pNew->items + m_nItemPerBlock - 1;
342 for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
343 pItem->m_pNextFree = pItem + 1;
344 CDS_STRICT_DO( pItem->m_pNext = nullptr );
347 // link new block to block list
349 block * pHead = m_pBlockListHead.load(atomics::memory_order_acquire);
352 // pHead is changed by compare_exchange_weak
353 } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_relaxed ));
356 // link block's items to free list
358 item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_acquire);
360 pLastItem->m_pNextFree = pHead;
361 // pHead is changed by compare_exchange_weak
362 } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
366 unsigned int current_epoch() const CDS_NOEXCEPT
368 return m_nCurEpoch.load(atomics::memory_order_acquire) & (c_nEpochCount - 1);
371 unsigned int next_epoch() const CDS_NOEXCEPT
373 return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & (c_nEpochCount - 1);
380 : m_pBlockListHead( nullptr )
382 , m_pGlobalFreeHead( nullptr )
384 for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i )
385 m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
393 for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
395 m_BlockAllocator.Delete( pBlock );
399 /// Increments current epoch
400 void inc_epoch() CDS_NOEXCEPT
402 m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
407 /// Allocates new retired pointer
408 retired_ptr_node& alloc()
413 pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
416 if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
420 // Epoch free list is empty
421 // Alloc from global free list
423 pItem = m_pGlobalFreeHead.load( atomics::memory_order_acquire );
429 // pItem is changed by compare_exchange_weak
430 } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ));
433 CDS_STRICT_DO( pItem->m_pNextFree = nullptr );
437 /// Allocates and initializes new retired pointer
438 retired_ptr_node& alloc( const retired_ptr& p )
440 retired_ptr_node& node = alloc();
445 /// Places the list (pHead, pTail) of retired pointers to pool (frees retired pointers)
447 The list is linked on the m_pNextFree field
449 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail ) CDS_NOEXCEPT
451 assert( pHead != nullptr );
452 assert( pTail != nullptr );
457 pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
458 pTail->m_pNextFree = pCurHead;
459 } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
463 /// Uninitialized guard
466 friend class ThreadGC;
468 details::guard_data * m_pGuard ; ///< Pointer to guard data
470 /// Initialize empty guard.
471 CDS_CONSTEXPR guard() CDS_NOEXCEPT
472 : m_pGuard( nullptr )
475 /// The object is not copy-constructible
476 guard( guard const& ) = delete;
478 /// Object destructor, does nothing
479 ~guard() CDS_NOEXCEPT
482 /// Guards pointer \p p
483 void set( void * p ) CDS_NOEXCEPT
485 assert( m_pGuard != nullptr );
486 m_pGuard->pPost.store( p, atomics::memory_order_release );
487 //CDS_COMPILER_RW_BARRIER;
491 void clear() CDS_NOEXCEPT
493 assert( m_pGuard != nullptr );
494 m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
495 CDS_STRICT_DO( CDS_COMPILER_RW_BARRIER );
498 /// Guards pointer \p p
499 template <typename T>
500 T * operator =(T * p) CDS_NOEXCEPT
502 set( reinterpret_cast<void *>( const_cast<T *>(p) ));
507 std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
514 public: // for ThreadGC.
516 GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard,
517 the compiler produces error:
\91cds::gc::dhp::details::guard_data* cds::gc::dhp::details::guard::m_pGuard
\92 is protected
518 despite the fact that ThreadGC is declared as friend for guard class.
519 We should not like to declare m_pGuard member as public one.
520 Therefore, we have to add set_guard/get_guard public functions
523 void set_guard( details::guard_data * pGuard ) CDS_NOEXCEPT
525 assert( m_pGuard == nullptr );
529 /// Get current guard data
530 details::guard_data * get_guard() CDS_NOEXCEPT
534 /// Get current guard data
535 details::guard_data * get_guard() const CDS_NOEXCEPT
541 } // namespace details
545 This class represents auto guard: ctor allocates a guard from guard pool,
546 dtor returns the guard back to the pool of free guard.
548 class Guard: public details::guard
551 typedef details::guard base_class;
552 friend class ThreadGC;
555 ThreadGC& m_gc ; ///< ThreadGC object of current thread
557 /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
558 Guard( ThreadGC& gc ) CDS_NOEXCEPT;
560 /// Returns guard allocated back to pool of free guards
561 ~Guard() CDS_NOEXCEPT; // inline after GarbageCollector
563 /// Returns DHP GC object
564 ThreadGC& getGC() CDS_NOEXCEPT
569 /// Guards pointer \p p
570 template <typename T>
571 T * operator =(T * p) CDS_NOEXCEPT
573 return base_class::operator =<T>( p );
577 std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
579 return base_class::operator =(nullptr);
586 This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
587 dtor returns the guards allocated back to the pool.
589 template <size_t Count>
592 details::guard m_arr[Count] ; ///< array of guard
593 ThreadGC& m_gc ; ///< ThreadGC object of current thread
594 const static size_t c_nCapacity = Count ; ///< Array capacity (equal to \p Count template parameter)
597 /// Rebind array for other size \p OtherCount
598 template <size_t OtherCount>
600 typedef GuardArray<OtherCount> other ; ///< rebinding result
604 /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
605 GuardArray( ThreadGC& gc ) CDS_NOEXCEPT; // inline below
607 /// The object is not default-constructible
608 GuardArray() = delete;
610 /// The object is not copy-constructible
611 GuardArray( GuardArray const& ) = delete;
613 /// Returns guards allocated back to pool
614 ~GuardArray() CDS_NOEXCEPT; // inline below
616 /// Returns the capacity of array
617 CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
622 /// Returns DHP ThreadGC object
623 ThreadGC& getGC() CDS_NOEXCEPT
628 /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
629 details::guard& operator []( size_t nIndex ) CDS_NOEXCEPT
631 assert( nIndex < capacity() );
632 return m_arr[nIndex];
635 /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
636 const details::guard& operator []( size_t nIndex ) const CDS_NOEXCEPT
638 assert( nIndex < capacity() );
639 return m_arr[nIndex];
642 /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
643 template <typename T>
644 void set( size_t nIndex, T * p ) CDS_NOEXCEPT
646 assert( nIndex < capacity() );
647 m_arr[nIndex].set( p );
650 /// Clears (sets to \p nullptr) the guard \p nIndex
651 void clear( size_t nIndex ) CDS_NOEXCEPT
653 assert( nIndex < capacity() );
654 m_arr[nIndex].clear();
657 /// Clears all guards in the array
658 void clearAll() CDS_NOEXCEPT
660 for ( size_t i = 0; i < capacity(); ++i )
665 /// Memory manager (Garbage collector)
666 class CDS_EXPORT_API GarbageCollector
670 friend class ThreadGC;
672 /// Internal GC statistics
675 atomics::atomic<size_t> m_nGuardCount ; ///< Total guard count
676 atomics::atomic<size_t> m_nFreeGuardCount ; ///< Count of free guard
680 , m_nFreeGuardCount(0)
686 /// Exception "No GarbageCollector object is created"
687 CDS_DECLARE_EXCEPTION( DHPManagerEmpty, "Global DHP GarbageCollector is NULL" );
689 /// Internal GC statistics
692 size_t m_nGuardCount ; ///< Total guard count
693 size_t m_nFreeGuardCount ; ///< Count of free guard
698 , m_nFreeGuardCount(0)
701 InternalState& operator =( internal_stat const& s )
703 m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
704 m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
712 static GarbageCollector * m_pManager ; ///< GC global instance
714 details::guard_allocator<> m_GuardPool ; ///< Guard pool
715 details::retired_ptr_pool<> m_RetiredAllocator ; ///< Pool of free retired pointers
716 details::retired_ptr_buffer m_RetiredBuffer ; ///< Retired pointer buffer for liberating
718 atomics::atomic<size_t> m_nLiberateThreshold; ///< Max size of retired pointer buffer to call \p scan()
719 const size_t m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
721 internal_stat m_stat ; ///< Internal statistics
722 bool m_bStatEnabled ; ///< Internal Statistics enabled
725 /// Initializes DHP memory manager singleton
727 This member function creates and initializes DHP global object.
728 The function should be called before using CDS data structure based on cds::gc::DHP GC. Usually,
729 this member function is called in the \p main() function. See cds::gc::dhp for example.
730 After calling of this function you may use CDS data structures based on cds::gc::DHP.
733 \li \p nLiberateThreshold - \p scan() threshold. When count of retired pointers reaches this value,
734 the \ref dhp_gc_liberate "scan()" member function would be called for freeing retired pointers.
735 If \p nLiberateThreshold <= 1, \p scan() would called after each \ref dhp_gc_retirePtr "retirePtr" call.
736 \li \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
737 is initialized the GC allocates local guard pool for the thread from common guard pool.
738 By perforce the local thread's guard pool is grown automatically from common pool.
739 When the thread terminated its guard pool is backed to common GC's pool.
742 static void CDS_STDCALL Construct(
743 size_t nLiberateThreshold = 1024
744 , size_t nInitialThreadGuardCount = 8
747 /// Destroys DHP memory manager
749 The member function destroys DHP global object. After calling of this function you may \b NOT
750 use CDS data structures based on cds::gc::DHP. Usually, the \p Destruct function is called
751 at the end of your \p main(). See cds::gc::dhp for example.
753 static void CDS_STDCALL Destruct();
755 /// Returns pointer to GarbageCollector instance
757 If DHP GC is not initialized, \p DHPManagerEmpty exception is thrown
759 static GarbageCollector& instance()
761 if ( m_pManager == nullptr )
762 throw DHPManagerEmpty();
766 /// Checks if global GC object is constructed and may be used
767 static bool isUsed() CDS_NOEXCEPT
769 return m_pManager != nullptr;
774 /// Internal interface
776 /// Allocates a guard
777 details::guard_data * allocGuard()
779 return m_GuardPool.alloc();
782 /// Frees guard \p g for reusing in future
783 void freeGuard(details::guard_data * pGuard )
785 m_GuardPool.free( pGuard );
788 /// Allocates guard list for a thread.
789 details::guard_data * allocGuardList( size_t nCount )
791 return m_GuardPool.allocList( nCount );
794 /// Frees thread's guard list pointed by \p pList
795 void freeGuardList( details::guard_data * pList )
797 m_GuardPool.freeList( pList );
800 /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
801 /**@anchor dhp_gc_retirePtr
803 template <typename T>
804 void retirePtr( T * p, void (* pFunc)(T *) )
806 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
809 /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
810 void retirePtr( retired_ptr const& p )
812 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
817 /// Liberate function
818 /** @anchor dhp_gc_liberate
819 The main function of Dynamic Hazard Pointer algorithm. It tries to free retired pointers if they are not
820 trapped by any guard.
826 /// Get internal statistics
827 InternalState& getInternalState(InternalState& stat) const
829 return stat = m_stat;
832 /// Checks if internal statistics enabled
833 bool isStatisticsEnabled() const
835 return m_bStatEnabled;
838 /// Enables/disables internal statistics
839 bool enableStatistics( bool bEnable )
841 bool bEnabled = m_bStatEnabled;
842 m_bStatEnabled = bEnable;
848 GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount );
855 To use Dynamic Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class
856 that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
857 on the start of each thread that uses DHP GC. Before terminating the thread linked to DHP GC it is necessary to call
858 \ref cds_threading "cds::threading::Manager::detachThread()".
860 The ThreadGC object maintains two list:
861 \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
862 \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
863 Free guard list is a subset of thread guard list.
867 GarbageCollector& m_gc ; ///< reference to GC singleton
868 details::guard_data * m_pList ; ///< Local list of guards owned by the thread
869 details::guard_data * m_pFree ; ///< The list of free guard from m_pList
872 /// Default constructor
874 : m_gc( GarbageCollector::instance() )
879 /// The object is not copy-constructible
880 ThreadGC( ThreadGC const& ) = delete;
882 /// Dtor calls fini()
888 /// Initialization. Repeat call is available
893 m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
897 /// Finalization. Repeat call is available
901 m_gc.freeGuardList( m_pList );
908 /// Initializes guard \p g
909 void allocGuard( Guard& g )
911 assert( m_pList != nullptr );
913 g.m_pGuard = m_pFree;
914 m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
917 g.m_pGuard = m_gc.allocGuard();
918 g.m_pGuard->pThreadNext = m_pList;
919 m_pList = g.m_pGuard;
924 void freeGuard( Guard& g )
926 assert( m_pList != nullptr );
927 g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
928 g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
929 m_pFree = g.m_pGuard;
932 /// Initializes guard array \p arr
933 template <size_t Count>
934 void allocGuard( GuardArray<Count>& arr )
936 assert( m_pList != nullptr );
939 while ( m_pFree && nCount < Count ) {
940 arr[nCount].set_guard( m_pFree );
941 m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
945 while ( nCount < Count ) {
946 details::guard& g = arr[nCount++];
947 g.set_guard( m_gc.allocGuard() );
948 g.get_guard()->pThreadNext = m_pList;
949 m_pList = g.get_guard();
953 /// Frees guard array \p arr
954 template <size_t Count>
955 void freeGuard( GuardArray<Count>& arr )
957 assert( m_pList != nullptr );
959 details::guard_data * pGuard;
960 for ( size_t i = 0; i < Count - 1; ++i ) {
961 pGuard = arr[i].get_guard();
962 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
963 pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
965 pGuard = arr[Count-1].get_guard();
966 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
967 pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
968 m_pFree = arr[0].get_guard();
971 /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
972 template <typename T>
973 void retirePtr( T * p, void (* pFunc)(T *) )
975 m_gc.retirePtr( p, pFunc );
987 //////////////////////////////////////////////////////////
990 inline Guard::Guard(ThreadGC& gc)
993 getGC().allocGuard( *this );
995 inline Guard::~Guard()
997 getGC().freeGuard( *this );
1000 template <size_t Count>
1001 inline GuardArray<Count>::GuardArray( ThreadGC& gc )
1004 getGC().allocGuard( *this );
1006 template <size_t Count>
1007 inline GuardArray<Count>::~GuardArray()
1009 getGC().freeGuard( *this );
1013 }} // namespace cds::gc
1015 #if CDS_COMPILER == CDS_COMPILER_MSVC
1016 # pragma warning(pop)
1019 #endif // #ifndef __CDS_GC_DETAILS_DHP_H