2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_GC_DETAILS_DHP_H
32 #define CDSLIB_GC_DETAILS_DHP_H
34 #include <mutex> // unique_lock
35 #include <cds/algo/atomic.h>
36 #include <cds/algo/int_algo.h>
37 #include <cds/gc/details/retired_ptr.h>
38 #include <cds/details/aligned_allocator.h>
39 #include <cds/details/allocator.h>
40 #include <cds/sync/spinlock.h>
42 #if CDS_COMPILER == CDS_COMPILER_MSVC
43 # pragma warning(push)
44 # pragma warning(disable:4251) // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
48 namespace cds { namespace gc {
50 /// Dynamic Hazard Pointer reclamation schema
52 The cds::gc::dhp namespace and its members are internal representation of the GC and should not be used directly.
53 Use cds::gc::DHP class in your code.
55 Dynamic Hazard Pointer (DHP) garbage collector is a singleton. The main user-level part of DHP schema is
56 GC class and its nested classes. Before use any DHP-related class you must initialize DHP garbage collector
57 by contructing cds::gc::DHP object in beginning of your main().
58 See cds::gc::DHP class for explanation.
60 \par Implementation issues
61 The global list of free guards (\p cds::gc::dhp::details::guard_allocator) is protected by a spin-lock (i.e. serialized).
62 It seems that this solution should not introduce significant performance bottleneck, because each thread has its own set
63 of guards allocated from the global list of free guards and the access to the global list is occurred only when
64 all thread's guard is busy. In this case the thread allocates a next block of guards from the global list.
65 Guards allocated for the thread is push back to the global list only when the thread terminates.
69 // Forward declarations
71 template <size_t Count> class GuardArray;
73 class GarbageCollector;
75 /// Retired pointer type
76 typedef cds::gc::details::retired_ptr retired_ptr;
78 using cds::gc::details::free_retired_ptr_func;
80 /// Details of Dynamic Hazard Pointer algorithm
83 // Forward declaration
86 /// Retired pointer buffer node
87 struct retired_ptr_node {
88 retired_ptr m_ptr ; ///< retired pointer
89 atomics::atomic<retired_ptr_node *> m_pNext ; ///< next retired pointer in buffer
90 atomics::atomic<retired_ptr_node *> m_pNextFree ; ///< next item in free list of \p retired_ptr_node
93 /// Internal guard representation
95 typedef void * guarded_ptr; ///< type of value guarded
97 atomics::atomic<guarded_ptr> pPost; ///< pointer guarded
98 atomics::atomic<guard_data *> pGlobalNext; ///< next item of global list of allocated guards
99 atomics::atomic<guard_data *> pNextFree; ///< pointer to the next item in global or thread-local free-list
101 guard_data * pThreadNext; ///< next item of thread's local list of guards
103 guard_data() CDS_NOEXCEPT
105 , pGlobalNext( nullptr )
106 , pNextFree( nullptr )
107 , pThreadNext( nullptr )
110 void init() CDS_NOEXCEPT
112 pPost.store( nullptr, atomics::memory_order_relaxed );
115 /// Checks if the guard is free, that is, it does not contain any pointer guarded
116 bool isFree() const CDS_NOEXCEPT
118 return pPost.load( atomics::memory_order_acquire ) == nullptr;
123 template <class Alloc = CDS_DEFAULT_ALLOCATOR>
124 class guard_allocator
126 cds::details::Allocator<details::guard_data> m_GuardAllocator ; ///< guard allocator
128 atomics::atomic<guard_data *> m_GuardList; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
129 atomics::atomic<guard_data *> m_FreeGuardList; ///< Head of free guard list (linked by guard_data::pNextFree field)
130 cds::sync::spin m_freeListLock; ///< Access to m_FreeGuardList
133 Unfortunately, access to the list of free guard is lock-based.
134 Lock-free manipulations with guard free-list are ABA-prone.
135 TODO: working with m_FreeGuardList in lock-free manner.
139 /// Allocates new guard from the heap. The function uses aligned allocator
140 guard_data * allocNew()
142 //TODO: the allocator should make block allocation
144 details::guard_data * pGuard = m_GuardAllocator.New();
146 // Link guard to the list
147 // m_GuardList is an accumulating list and it cannot support concurrent deletion,
148 // so, ABA problem is impossible for it
149 details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
151 pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
152 // pHead is changed by compare_exchange_weak
153 } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
161 guard_allocator() CDS_NOEXCEPT
162 : m_GuardList( nullptr )
163 , m_FreeGuardList( nullptr )
170 for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
171 pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
172 m_GuardAllocator.Delete( pData );
176 /// Allocates a guard from free list or from heap if free list is empty
179 // Try to pop a guard from free-list
180 details::guard_data * pGuard;
183 std::unique_lock<cds::sync::spin> al( m_freeListLock );
184 pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
186 m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
195 /// Frees guard \p pGuard
197 The function places the guard \p pGuard into free-list
199 void free( guard_data * pGuard ) CDS_NOEXCEPT
201 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
203 std::unique_lock<cds::sync::spin> al( m_freeListLock );
204 pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
205 m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
208 /// Allocates list of guard
210 The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
212 cds::gc::dhp::ThreadGC supporting method
214 guard_data * allocList( size_t nCount )
216 assert( nCount != 0 );
224 // The guard list allocated is private for the thread,
225 // so, we can use relaxed memory order
227 guard_data * p = alloc();
228 pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
232 pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
237 /// Frees list of guards
239 The list \p pList is linked by guard's \p pThreadNext field.
241 cds::gc::dhp::ThreadGC supporting method
243 void freeList( guard_data * pList ) CDS_NOEXCEPT
245 assert( pList != nullptr );
247 guard_data * pLast = pList;
248 while ( pLast->pThreadNext ) {
249 pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
251 pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
255 std::unique_lock<cds::sync::spin> al( m_freeListLock );
256 pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
257 m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
260 /// Returns the list's head of guards allocated
261 guard_data * begin() CDS_NOEXCEPT
263 return m_GuardList.load(atomics::memory_order_acquire);
267 /// Retired pointer buffer
269 The buffer of retired nodes ready for liberating.
270 When size of buffer exceeds a threshold the GC calls \p scan() procedure to free
273 class retired_ptr_buffer
275 atomics::atomic<retired_ptr_node *> m_pHead ; ///< head of buffer
276 atomics::atomic<size_t> m_nItemCount; ///< buffer's item count
279 retired_ptr_buffer() CDS_NOEXCEPT
284 ~retired_ptr_buffer() CDS_NOEXCEPT
286 assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
289 /// Pushes new node into the buffer. Returns current buffer size
290 size_t push( retired_ptr_node& node ) CDS_NOEXCEPT
292 retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
294 node.m_pNext.store( pHead, atomics::memory_order_relaxed );
295 // pHead is changed by compare_exchange_weak
296 } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
298 return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
301 /// Pushes [pFirst, pLast] list linked by pNext field.
302 size_t push_list( retired_ptr_node* pFirst, retired_ptr_node* pLast, size_t nSize )
307 retired_ptr_node * pHead = m_pHead.load( atomics::memory_order_acquire );
309 pLast->m_pNext.store( pHead, atomics::memory_order_relaxed );
310 // pHead is changed by compare_exchange_weak
311 } while ( !m_pHead.compare_exchange_weak( pHead, pFirst, atomics::memory_order_release, atomics::memory_order_relaxed ) );
313 return m_nItemCount.fetch_add( nSize, atomics::memory_order_relaxed ) + 1;
316 /// Result of \ref dhp_gc_privatize "privatize" function.
318 The \p privatize function returns retired node list as \p first and the size of that list as \p second.
320 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
322 /// Gets current list of retired pointer and clears the list
323 /**@anchor dhp_gc_privatize
325 privatize_result privatize() CDS_NOEXCEPT
327 privatize_result res;
329 // Item counter is needed only as a threshold for \p scan() function
330 // So, we may clear the item counter without synchronization with m_pHead
331 res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
333 res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
338 /// Returns current size of buffer (approximate)
339 size_t size() const CDS_NOEXCEPT
341 return m_nItemCount.load(atomics::memory_order_relaxed);
345 /// Pool of retired pointers
347 The class acts as an allocator of retired node.
348 Retired pointers are linked in the lock-free list.
350 template <class Alloc = CDS_DEFAULT_ALLOCATOR>
351 class retired_ptr_pool {
353 typedef retired_ptr_node item;
355 /// Count of items in block
356 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
360 atomics::atomic<block *> pNext; ///< next block
361 item items[m_nItemPerBlock]; ///< item array
364 atomics::atomic<block *> m_pBlockListHead; ///< head of of allocated block list
366 // To solve ABA problem we use epoch-based approach
367 unsigned int const m_nEpochBitmask; ///< Epoch bitmask (log2( m_nEpochCount))
368 atomics::atomic<unsigned int> m_nCurEpoch; ///< Current epoch
369 atomics::atomic<item *>* m_pEpochFree; ///< List of free item per epoch
370 atomics::atomic<item *> m_pGlobalFreeHead; ///< Head of unallocated item list
372 typedef cds::details::Allocator< block, Alloc > block_allocator;
373 typedef cds::details::Allocator< atomics::atomic<item *>, Alloc > epoch_array_alloc;
378 // allocate new block
379 block * pNew = block_allocator().New();
381 // link items within the block
382 item * pLastItem = pNew->items + m_nItemPerBlock - 1;
383 for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
384 pItem->m_pNextFree.store( pItem + 1, atomics::memory_order_release );
385 CDS_STRICT_DO( pItem->m_pNext.store( nullptr, atomics::memory_order_relaxed ));
388 // links new block to the block list
390 block * pHead = m_pBlockListHead.load(atomics::memory_order_relaxed);
392 pNew->pNext.store( pHead, atomics::memory_order_relaxed );
393 // pHead is changed by compare_exchange_weak
394 } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_relaxed, atomics::memory_order_relaxed ));
397 // links block's items to the free list
399 item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_relaxed);
401 pLastItem->m_pNextFree.store( pHead, atomics::memory_order_release );
402 // pHead is changed by compare_exchange_weak
403 } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
407 unsigned int current_epoch() const CDS_NOEXCEPT
409 return m_nCurEpoch.load(atomics::memory_order_acquire) & m_nEpochBitmask;
412 unsigned int next_epoch() const CDS_NOEXCEPT
414 return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & m_nEpochBitmask;
418 retired_ptr_pool( unsigned int nEpochCount = 8 )
419 : m_pBlockListHead( nullptr )
420 , m_nEpochBitmask( static_cast<unsigned int>(beans::ceil2(nEpochCount)) - 1 )
422 , m_pEpochFree( epoch_array_alloc().NewArray( m_nEpochBitmask + 1))
423 , m_pGlobalFreeHead( nullptr )
427 for (unsigned int i = 0; i <= m_nEpochBitmask; ++i )
428 m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
437 for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
438 p = pBlock->pNext.load( atomics::memory_order_relaxed );
442 epoch_array_alloc().Delete( m_pEpochFree, m_nEpochBitmask + 1 );
445 /// Increments current epoch
446 void inc_epoch() CDS_NOEXCEPT
448 m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
451 /// Allocates the new retired pointer
452 retired_ptr_node& alloc()
457 pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
460 if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem,
461 pItem->m_pNextFree.load(atomics::memory_order_acquire),
462 atomics::memory_order_acquire, atomics::memory_order_relaxed ))
468 // Epoch free list is empty
469 // Alloc from global free list
471 pItem = m_pGlobalFreeHead.load( atomics::memory_order_relaxed );
477 // pItem is changed by compare_exchange_weak
478 } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem,
479 pItem->m_pNextFree.load(atomics::memory_order_acquire),
480 atomics::memory_order_acquire, atomics::memory_order_relaxed ));
483 CDS_STRICT_DO( pItem->m_pNextFree.store( nullptr, atomics::memory_order_relaxed ));
487 /// Allocates and initializes new retired pointer
488 retired_ptr_node& alloc( const retired_ptr& p )
490 retired_ptr_node& node = alloc();
495 /// Places the list [pHead, pTail] of retired pointers to pool (frees retired pointers)
497 The list is linked on the m_pNextFree field
499 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail ) CDS_NOEXCEPT
501 assert( pHead != nullptr );
502 assert( pTail != nullptr );
507 pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
508 pTail->m_pNextFree.store( pCurHead, atomics::memory_order_release );
509 } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
513 /// Uninitialized guard
516 friend class dhp::ThreadGC;
518 details::guard_data * m_pGuard ; ///< Pointer to guard data
521 /// Initialize empty guard.
522 CDS_CONSTEXPR guard() CDS_NOEXCEPT
523 : m_pGuard( nullptr )
526 /// Copy-ctor is disabled
527 guard( guard const& ) = delete;
529 /// Move-ctor is disabled
530 guard( guard&& ) = delete;
532 /// Object destructor, does nothing
533 ~guard() CDS_NOEXCEPT
536 /// Get current guarded pointer
537 void * get( atomics::memory_order order = atomics::memory_order_acquire ) const CDS_NOEXCEPT
539 assert( m_pGuard != nullptr );
540 return m_pGuard->pPost.load( order );
543 /// Guards pointer \p p
544 void set( void * p, atomics::memory_order order = atomics::memory_order_release ) CDS_NOEXCEPT
546 assert( m_pGuard != nullptr );
547 m_pGuard->pPost.store( p, order );
551 void clear( atomics::memory_order order = atomics::memory_order_relaxed ) CDS_NOEXCEPT
553 assert( m_pGuard != nullptr );
554 m_pGuard->pPost.store( nullptr, order );
557 /// Guards pointer \p p
558 template <typename T>
559 T * operator =(T * p) CDS_NOEXCEPT
561 set( reinterpret_cast<void *>( const_cast<T *>(p) ));
565 std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
571 public: // for ThreadGC.
573 GCC cannot compile code for template versions of ThreadGC::allocGuard/freeGuard,
574 the compiler produces error: 'cds::gc::dhp::details::guard_data* cds::gc::dhp::details::guard::m_pGuard' is protected
575 despite the fact that ThreadGC is declared as friend for guard class.
576 Therefore, we have to add set_guard/get_guard public functions
579 void set_guard( details::guard_data * pGuard ) CDS_NOEXCEPT
581 assert( m_pGuard == nullptr );
585 /// Get current guard data
586 details::guard_data * get_guard() CDS_NOEXCEPT
590 /// Get current guard data
591 details::guard_data * get_guard() const CDS_NOEXCEPT
596 details::guard_data * release_guard() CDS_NOEXCEPT
598 details::guard_data * p = m_pGuard;
603 bool is_initialized() const
605 return m_pGuard != nullptr;
609 } // namespace details
613 This class represents auto guard: ctor allocates a guard from guard pool,
614 dtor returns the guard back to the pool of free guard.
616 class Guard: public details::guard
618 typedef details::guard base_class;
619 friend class ThreadGC;
621 /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
622 Guard(); // inline in dhp_impl.h
624 /// Returns guard allocated back to pool of free guards
625 ~Guard(); // inline in dhp_impl.h
627 /// Guards pointer \p p
628 template <typename T>
629 T * operator =(T * p) CDS_NOEXCEPT
631 return base_class::operator =<T>( p );
634 std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
636 return base_class::operator =(nullptr);
642 This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
643 dtor returns the guards allocated back to the pool.
645 template <size_t Count>
648 details::guard m_arr[Count] ; ///< array of guard
649 const static size_t c_nCapacity = Count ; ///< Array capacity (equal to \p Count template parameter)
652 /// Rebind array for other size \p OtherCount
653 template <size_t OtherCount>
655 typedef GuardArray<OtherCount> other ; ///< rebinding result
659 /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
660 GuardArray(); // inline in dhp_impl.h
662 /// The object is not copy-constructible
663 GuardArray( GuardArray const& ) = delete;
665 /// The object is not move-constructible
666 GuardArray( GuardArray&& ) = delete;
668 /// Returns guards allocated back to pool
669 ~GuardArray(); // inline in dh_impl.h
671 /// Returns the capacity of array
672 CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
677 /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
678 details::guard& operator []( size_t nIndex ) CDS_NOEXCEPT
680 assert( nIndex < capacity() );
681 return m_arr[nIndex];
684 /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
685 const details::guard& operator []( size_t nIndex ) const CDS_NOEXCEPT
687 assert( nIndex < capacity() );
688 return m_arr[nIndex];
691 /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
692 template <typename T>
693 void set( size_t nIndex, T * p ) CDS_NOEXCEPT
695 assert( nIndex < capacity() );
696 m_arr[nIndex].set( p );
699 /// Clears (sets to \p nullptr) the guard \p nIndex
700 void clear( size_t nIndex ) CDS_NOEXCEPT
702 assert( nIndex < capacity() );
703 m_arr[nIndex].clear();
706 /// Clears all guards in the array
707 void clearAll() CDS_NOEXCEPT
709 for ( size_t i = 0; i < capacity(); ++i )
714 /// Memory manager (Garbage collector)
715 class CDS_EXPORT_API GarbageCollector
718 friend class ThreadGC;
720 /// Internal GC statistics
723 atomics::atomic<size_t> m_nGuardCount ; ///< Total guard count
724 atomics::atomic<size_t> m_nFreeGuardCount ; ///< Count of free guard
728 , m_nFreeGuardCount(0)
733 /// Exception "No GarbageCollector object is created"
734 class not_initialized : public std::runtime_error
739 : std::runtime_error( "Global DHP GarbageCollector is not initialized" )
744 /// Internal GC statistics
747 size_t m_nGuardCount ; ///< Total guard count
748 size_t m_nFreeGuardCount ; ///< Count of free guard
753 , m_nFreeGuardCount(0)
756 InternalState& operator =( internal_stat const& s )
758 m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
759 m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
767 static GarbageCollector * m_pManager ; ///< GC global instance
769 atomics::atomic<size_t> m_nLiberateThreshold; ///< Max size of retired pointer buffer to call \p scan()
770 const size_t m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
772 details::guard_allocator<> m_GuardPool ; ///< Guard pool
773 details::retired_ptr_pool<> m_RetiredAllocator ; ///< Pool of free retired pointers
774 details::retired_ptr_buffer m_RetiredBuffer ; ///< Retired pointer buffer for liberating
776 internal_stat m_stat ; ///< Internal statistics
777 bool m_bStatEnabled ; ///< Internal Statistics enabled
780 /// Initializes DHP memory manager singleton
782 This member function creates and initializes DHP global object.
783 The function should be called before using CDS data structure based on cds::gc::DHP GC. Usually,
784 this member function is called in the \p main() function. See cds::gc::dhp for example.
785 After calling of this function you may use CDS data structures based on cds::gc::DHP.
788 - \p nLiberateThreshold - \p scan() threshold. When count of retired pointers reaches this value,
789 the \ref dhp_gc_liberate "scan()" member function would be called for freeing retired pointers.
790 If \p nLiberateThreshold <= 1, \p scan() would called after each \ref dhp_gc_retirePtr "retirePtr" call.
791 - \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
792 is initialized the GC allocates local guard pool for the thread from common guard pool.
793 By perforce the local thread's guard pool is grown automatically from common pool.
794 When the thread terminated its guard pool is backed to common GC's pool.
795 - \p nEpochCount: internally, DHP memory manager uses epoch-based schema to solve
796 ABA problem for internal data. \p nEpochCount specifies the epoch count,
797 i.e. the count of simultaneously working threads that remove the elements
798 of DHP-based concurrent data structure. Default value is 16.
800 static void CDS_STDCALL Construct(
801 size_t nLiberateThreshold = 1024
802 , size_t nInitialThreadGuardCount = 8
803 , size_t nEpochCount = 16
806 /// Destroys DHP memory manager
808 The member function destroys DHP global object. After calling of this function you may \b NOT
809 use CDS data structures based on cds::gc::DHP. Usually, the \p Destruct function is called
810 at the end of your \p main(). See cds::gc::dhp for example.
812 static void CDS_STDCALL Destruct();
814 /// Returns pointer to GarbageCollector instance
816 If DHP GC is not initialized, \p not_initialized exception is thrown
818 static GarbageCollector& instance()
820 if ( m_pManager == nullptr )
821 throw not_initialized();
825 /// Checks if global GC object is constructed and may be used
826 static bool isUsed() CDS_NOEXCEPT
828 return m_pManager != nullptr;
833 /// Internal interface
835 /// Allocates a guard
836 details::guard_data * allocGuard()
838 return m_GuardPool.alloc();
841 /// Frees guard \p g for reusing in future
842 void freeGuard(details::guard_data * pGuard )
844 m_GuardPool.free( pGuard );
847 /// Allocates guard list for a thread.
848 details::guard_data * allocGuardList( size_t nCount )
850 return m_GuardPool.allocList( nCount );
853 /// Frees thread's guard list pointed by \p pList
854 void freeGuardList( details::guard_data * pList )
856 m_GuardPool.freeList( pList );
859 /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
860 /**@anchor dhp_gc_retirePtr
862 template <typename T>
863 void retirePtr( T * p, void (* pFunc)(T *) )
865 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
868 /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
869 void retirePtr( retired_ptr const& p )
871 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
876 /// Liberate function
877 /** @anchor dhp_gc_liberate
878 The main function of Dynamic Hazard Pointer algorithm. It tries to free retired pointers if they are not
879 trapped by any guard.
885 /// Get internal statistics
886 InternalState& getInternalState(InternalState& stat) const
888 return stat = m_stat;
891 /// Checks if internal statistics enabled
892 bool isStatisticsEnabled() const
894 return m_bStatEnabled;
897 /// Enables/disables internal statistics
898 bool enableStatistics( bool bEnable )
900 bool bEnabled = m_bStatEnabled;
901 m_bStatEnabled = bEnable;
906 GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount, size_t nEpochCount );
912 To use Dynamic Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class
913 that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
914 on the start of each thread that uses DHP GC. Before terminating the thread linked to DHP GC it is necessary to call
915 \ref cds_threading "cds::threading::Manager::detachThread()".
917 The ThreadGC object maintains two list:
918 \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
919 \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
920 Free guard list is a subset of thread guard list.
924 GarbageCollector& m_gc ; ///< reference to GC singleton
925 details::guard_data * m_pList ; ///< Local list of guards owned by the thread
926 details::guard_data * m_pFree ; ///< The list of free guard from m_pList
929 /// Default constructor
931 : m_gc( GarbageCollector::instance() )
936 /// The object is not copy-constructible
937 ThreadGC( ThreadGC const& ) = delete;
939 /// Dtor calls fini()
945 /// Initialization. Repeat call is available
950 m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
954 /// Finalization. Repeat call is available
958 m_gc.freeGuardList( m_pList );
965 /// Initializes guard \p g
966 void allocGuard( dhp::details::guard& g )
968 assert( m_pList != nullptr );
971 g.m_pGuard = m_pFree;
972 m_pFree = m_pFree->pNextFree.load( atomics::memory_order_relaxed );
975 g.m_pGuard = m_gc.allocGuard();
976 g.m_pGuard->pThreadNext = m_pList;
977 m_pList = g.m_pGuard;
983 void freeGuard( dhp::details::guard& g )
985 assert( m_pList != nullptr );
987 g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
988 g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
989 m_pFree = g.m_pGuard;
990 g.m_pGuard = nullptr;
994 /// Initializes guard array \p arr
995 template <size_t Count>
996 void allocGuard( GuardArray<Count>& arr )
998 assert( m_pList != nullptr );
1001 while ( m_pFree && nCount < Count ) {
1002 arr[nCount].set_guard( m_pFree );
1003 m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
1007 while ( nCount < Count ) {
1008 details::guard& g = arr[nCount++];
1009 g.set_guard( m_gc.allocGuard() );
1010 g.get_guard()->pThreadNext = m_pList;
1011 m_pList = g.get_guard();
1015 /// Frees guard array \p arr
1016 template <size_t Count>
1017 void freeGuard( GuardArray<Count>& arr )
1019 assert( m_pList != nullptr );
1021 details::guard_data * pGuard;
1022 for ( size_t i = 0; i < Count - 1; ++i ) {
1023 pGuard = arr[i].get_guard();
1024 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
1025 pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
1027 pGuard = arr[Count-1].get_guard();
1028 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
1029 pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
1030 m_pFree = arr[0].get_guard();
1033 /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
1034 template <typename T>
1035 void retirePtr( T * p, void (* pFunc)(T *) )
1037 m_gc.retirePtr( p, pFunc );
1040 /// Run retiring cycle
1047 }} // namespace cds::gc
1050 #if CDS_COMPILER == CDS_COMPILER_MSVC
1051 # pragma warning(pop)
1054 #endif // #ifndef CDSLIB_GC_DETAILS_DHP_H