3 #ifndef __CDS_GC_HRC_DECL_H
4 #define __CDS_GC_HRC_DECL_H
6 #include <cds/gc/hrc/hrc.h>
7 #include <cds/details/marked_ptr.h>
9 namespace cds { namespace gc {
11 /// Gidenstam's garbage collector
12 /** @ingroup cds_garbage_collector
13 @headerfile cds/gc/hrc.h
15 This class is a wrapper for Gidenstam's memory reclamation schema (HRC - Hazard pointer + Reference Counting)
16 internal implementation.
19 - [2006] A.Gidenstam "Algorithms for synchronization and consistency
20 in concurrent system services", Chapter 5 "Lock-Free Memory Reclamation"
21 Thesis for the degree of Doctor of Philosophy
22 - [2005] Anders Gidenstam, Marina Papatriantafilou and Philippas Tsigas "Allocating
23 memory in a lock-free manner", Proceedings of the 13th Annual European
24 Symposium on Algorithms (ESA 2005), Lecture Notes in Computer
25 Science Vol. 3669, pages 229
\96 242, Springer-Verlag, 2005
27 Note that HRC schema does not support cyclic references that significantly limits the applicability of this GC.
30 In your \p main function you declare a object of class cds::gc::HRC. This declaration
31 initializes internal hrc::GarbageCollector singleton.
33 #include <cds/init.h> // for cds::Initialize and cds::Terminate
34 #include <cds/gc/hrc.h>
36 int main(int argc, char** argv)
42 // Initialize HRC singleton
54 Each thread that uses cds::gc::HRC -based containers must be attached to HRC
55 singleton. To make attachment you should declare a object of class HRC::thread_gc:
57 #include <cds/gc/hrc.h>
59 int myThreadEntryPoint()
61 // Attach the thread to HRC singleton
62 cds::gc::HRC::thread_gc myThreadGC();
67 // The destructor of myThreadGC object detaches the thread from HRC singleton
71 In some cases, you should work in a external thread. For example, your application
72 is a plug-in for a server that calls your code in the threads that has been created by server.
73 In this case, you may use persistent mode of HRC::thread_gc. In this mode, the thread attaches
74 to the HRC singleton only if it is not yet attached and never call detaching:
76 #include <cds/gc/hrc.h>
78 int myThreadEntryPoint()
80 // Attach the thread in persistent mode
81 cds::gc::HRC::thread_gc myThreadGC( true );
86 // The destructor of myThreadGC object does NOT detach the thread from HRC singleton
95 /// Thread GC implementation for internal usage
96 typedef hrc::ThreadGC thread_gc_impl;
98 /// Wrapper for hrc::ThreadGC class
100 @headerfile cds/gc/hrc.h
101 This class performs automatically attaching/detaching Gidenstam's GC
102 for the current thread.
104 class thread_gc: public thread_gc_impl
112 The constructor attaches the current thread to the Gidenstam's GC
113 if it is not yet attached.
114 The \p bPersistent parameter specifies attachment persistence:
115 - \p true - the class destructor will not detach the thread from Gidenstam's GC.
116 - \p false (default) - the class destructor will detach the thread from Gidenstam's GC.
119 bool bPersistent = false
120 ) ; // inline in hrc_impl.h
124 If the object has been created in persistent mode, the destructor does nothing.
125 Otherwise it detaches the current thread from HRC GC.
127 ~thread_gc() ; // inline in hrc_impl.h
130 ///@anchor hrc_gc_HRC_container_node Base for container node
131 typedef hrc::ContainerNode container_node;
133 /// Native hazard pointer type
134 typedef container_node * guarded_pointer;
138 @headerfile cds/gc/hrc.h
140 template <typename T>
141 class atomic_ref: protected atomics::atomic<T *>
144 typedef atomics::atomic<T *> base_class;
148 # ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT
149 atomic_ref() = default;
151 atomic_ref() CDS_NOEXCEPT
155 explicit CDS_CONSTEXPR atomic_ref(T * p) CDS_NOEXCEPT
160 /// Read reference value
161 T * load( atomics::memory_order order ) const CDS_NOEXCEPT
163 return base_class::load( order );
166 T * load( atomics::memory_order order ) const volatile CDS_NOEXCEPT
168 return base_class::load( order );
172 /// Store new value to reference
173 void store( T * pNew, atomics::memory_order order ) CDS_NOEXCEPT
175 before_store( pNew );
176 T * pOld = base_class::exchange( pNew, order );
177 after_store( pOld, pNew );
180 void store( T * pNew, atomics::memory_order order ) volatile CDS_NOEXCEPT
182 before_store( pNew );
183 T * pOld = base_class::exchange( pNew, order );
184 after_store( pOld, pNew );
188 /// Updates atomic reference from current value \p pOld to new value \p pNew (strong CAS)
190 May be used when concurrent updates are possible
192 \p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type
194 bool compare_exchange_strong( T *& pOld, T * pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) CDS_NOEXCEPT
197 bool bSuccess = base_class::compare_exchange_strong( pOld, pNew, mo_success, mo_fail );
198 after_cas( bSuccess, pOld, pNew );
202 bool compare_exchange_strong( T *& pOld, T * pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) volatile CDS_NOEXCEPT
205 bool bSuccess = base_class::compare_exchange_strong( pOld, pNew, mo_success, mo_fail );
206 after_cas( bSuccess, pOld, pNew );
209 bool compare_exchange_strong( T *& pOld, T * pNew, atomics::memory_order mo_success ) CDS_NOEXCEPT
211 return compare_exchange_strong( pOld, pNew, mo_success, atomics::memory_order_relaxed );
213 bool compare_exchange_strong( T *& pOld, T * pNew, atomics::memory_order mo_success ) volatile CDS_NOEXCEPT
215 return compare_exchange_strong( pOld, pNew, mo_success, atomics::memory_order_relaxed );
219 /// Updates atomic reference from current value \p pOld to new value \p pNew (weak CAS)
221 May be used when concurrent updates are possible
223 \p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type
225 bool compare_exchange_weak( T *& pOld, T * pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) CDS_NOEXCEPT
228 bool bSuccess = base_class::compare_exchange_weak( pOld, pNew, mo_success, mo_fail );
229 after_cas( bSuccess, pOld, pNew );
233 bool compare_exchange_weak( T *& pOld, T * pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) volatile CDS_NOEXCEPT
236 bool bSuccess = base_class::compare_exchange_weak( pOld, pNew, mo_success, mo_fail );
237 after_cas( bSuccess, pOld, pNew );
240 bool compare_exchange_weak( T *& pOld, T * pNew, atomics::memory_order mo_success ) CDS_NOEXCEPT
242 return compare_exchange_weak( pOld, pNew, mo_success, atomics::memory_order_relaxed );
244 bool compare_exchange_weak( T *& pOld, T * pNew, atomics::memory_order mo_success ) volatile CDS_NOEXCEPT
246 return compare_exchange_weak( pOld, pNew, mo_success, atomics::memory_order_relaxed );
252 static void before_store( T * pNew ) CDS_NOEXCEPT
257 static void after_store( T * pOld, T * pNew ) CDS_NOEXCEPT
260 pNew->m_bTrace.store( false, atomics::memory_order_release );
264 static void before_cas( T * p ) CDS_NOEXCEPT
268 p->m_bTrace.store( false, atomics::memory_order_release );
271 static void after_cas( bool bSuccess, T * pOld, T * pNew ) CDS_NOEXCEPT
285 /// Atomic marked pointer
287 @headerfile cds/gc/hrc.h
289 template <typename MarkedPtr>
290 class atomic_marked_ptr
293 atomics::atomic< MarkedPtr > m_a;
296 /// Marked pointer type
297 typedef MarkedPtr marked_ptr;
300 atomic_marked_ptr() CDS_NOEXCEPT
301 : m_a( marked_ptr() )
304 explicit CDS_CONSTEXPR atomic_marked_ptr( typename marked_ptr::value_type * p ) CDS_NOEXCEPT
305 : m_a( marked_ptr(p) )
308 atomic_marked_ptr( typename marked_ptr::value_type * ptr, int nMask ) CDS_NOEXCEPT
309 : m_a( marked_ptr(ptr, nMask) )
312 explicit atomic_marked_ptr( marked_ptr const& ptr ) CDS_NOEXCEPT
318 /// Read reference value
319 marked_ptr load(atomics::memory_order order) const CDS_NOEXCEPT
321 return m_a.load(order);
324 /// Store new value to reference
325 void store( marked_ptr pNew, atomics::memory_order order ) CDS_NOEXCEPT
327 before_store( pNew.ptr() );
328 marked_ptr pOld = m_a.exchange( pNew, order );
329 after_store( pOld.ptr(), pNew.ptr() );
332 /// Store new value to reference
333 void store( typename marked_ptr::pointer_type pNew, atomics::memory_order order ) CDS_NOEXCEPT
335 before_store( pNew );
336 marked_ptr pOld = m_a.exchange( marked_ptr(pNew), order );
337 after_store( pOld.ptr(), pNew );
340 /// Updates atomic reference from current value \p pOld to new value \p pNew (weak CAS)
342 May be used when concurrent updates are possible
344 \p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type
346 bool compare_exchange_weak( marked_ptr& pOld, marked_ptr pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) CDS_NOEXCEPT
348 before_cas( pNew.ptr() );
349 bool bSuccess = m_a.compare_exchange_weak( pOld, pNew, mo_success, mo_fail );
350 after_cas( bSuccess, pOld.ptr(), pNew.ptr() );
354 bool compare_exchange_weak( marked_ptr& pOld, marked_ptr pNew, atomics::memory_order mo_success ) CDS_NOEXCEPT
356 before_cas( pNew.ptr() );
357 bool bSuccess = m_a.compare_exchange_weak( pOld, pNew, mo_success );
358 after_cas( bSuccess, pOld.ptr(), pNew.ptr() );
363 /// Updates atomic reference from current value \p pOld to new value \p pNew (strong CAS)
365 May be used when concurrent updates are possible
367 \p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type
369 bool compare_exchange_strong( marked_ptr& pOld, marked_ptr pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) CDS_NOEXCEPT
372 before_cas( pNew.ptr() );
373 bool bSuccess = m_a.compare_exchange_strong( pOld, pNew, mo_success, mo_fail );
374 after_cas( bSuccess, pOld.ptr(), pNew.ptr() );
378 bool compare_exchange_strong( marked_ptr& pOld, marked_ptr pNew, atomics::memory_order mo_success ) CDS_NOEXCEPT
380 before_cas( pNew.ptr() );
381 bool bSuccess = m_a.compare_exchange_strong( pOld, pNew, mo_success );
382 after_cas( bSuccess, pOld.ptr(), pNew.ptr() );
389 static void before_store( typename marked_ptr::pointer_type p ) CDS_NOEXCEPT
394 static void after_store( typename marked_ptr::pointer_type pOld, typename marked_ptr::pointer_type pNew ) CDS_NOEXCEPT
397 pNew->m_bTrace.store( false, atomics::memory_order_release );
401 static void before_cas( typename marked_ptr::pointer_type p ) CDS_NOEXCEPT
405 p->m_bTrace.store( false, atomics::memory_order_release );
408 static void after_cas( bool bSuccess, typename marked_ptr::pointer_type pOld, typename marked_ptr::pointer_type pNew ) CDS_NOEXCEPT
424 @headerfile cds/gc/hrc.h
425 This class is a wrapper for hrc::AutoHPGuard.
427 class Guard: public hrc::AutoHPGuard
430 typedef hrc::AutoHPGuard base_class;
434 /// Default constructor
435 Guard() ; // inline in hrc_impl.h
437 /// Protects atomic pointer
439 Return the value of \p toGuard
441 The function tries to load \p toGuard and to store it
442 to the HP slot repeatedly until the guard's value equals \p toGuard
444 template <typename T>
445 T * protect( atomic_ref<T> const& toGuard )
447 T * pCur = toGuard.load(atomics::memory_order_relaxed);
450 pRet = assign( pCur );
451 pCur = toGuard.load(atomics::memory_order_acquire);
452 } while ( pRet != pCur );
456 /// Protects a converted pointer of type \p atomic<T*>
458 Return the value of \p toGuard
460 The function tries to load \p toGuard and to store result of \p f functor
461 to the HP slot repeatedly until the guard's value equals \p toGuard.
463 The function is useful for intrusive containers when \p toGuard is a node pointer
464 that should be converted to a pointer to the value type before guarding.
465 The parameter \p f of type Func is a functor that makes this conversion:
468 value_type * operator()( T * p );
471 Really, the result of <tt> f( toGuard.load() ) </tt> is assigned to the hazard pointer.
473 template <typename T, class Func>
474 T * protect( atomic_ref<T> const& toGuard, Func f )
476 T * pCur = toGuard.load(atomics::memory_order_relaxed);
481 pCur = toGuard.load(atomics::memory_order_acquire);
482 } while ( pRet != pCur );
486 /// Protects a atomic marked reference \p link
488 Returns current value of \p link.
490 The function tries to load \p link and to store it
491 to the guard repeatedly until the guard's value equals \p link
493 template <typename T>
494 typename atomic_marked_ptr<T>::marked_ptr protect( atomic_marked_ptr<T> const& link )
496 typename atomic_marked_ptr<T>::marked_ptr p;
498 assign( ( p = link.load(atomics::memory_order_relaxed)).ptr() );
499 } while ( p != link.load(atomics::memory_order_acquire) );
503 /// Protects a atomic marked reference \p link
505 Returns current value of \p link.
507 The function tries to load \p link and to store it
508 to the guard repeatedly until the guard's value equals \p link
510 The function is useful for intrusive containers when \p link is a node pointer
511 that should be converted to a pointer to the value type before guarding.
512 The parameter \p f of type Func is a functor that makes this conversion:
515 value_type * operator()( T p );
518 Really, the result of <tt> f( link.load() ) </tt> is assigned to the hazard pointer.
520 template <typename T, typename Func>
521 typename atomic_marked_ptr<T>::marked_ptr protect( atomic_marked_ptr<T> const& link, Func f )
523 typename atomic_marked_ptr<T>::marked_ptr pCur;
525 pCur = link.load(atomics::memory_order_relaxed);
527 } while ( pCur != link.load(atomics::memory_order_acquire) );
531 /// Stores \p p to the guard
533 The function equals to a simple assignment, no loop is performed.
534 Can be used for a pointer that cannot be changed concurrently.
536 template <typename T>
539 return base_class::operator =(p);
542 /// Stores marked pointer \p p to the guard
544 The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
545 Can be used for a marked pointer that cannot be changed concurrently.
547 template <typename T, int Bitmask>
548 T * assign( cds::details::marked_ptr<T, Bitmask> p )
550 return base_class::operator =( p.ptr() );
553 /// Copy from \p src guard to \p this guard
554 void copy( Guard const& src )
556 assign( src.get_native() );
559 /// Clear value of the guard
565 /// Get the value currently protected
566 template <typename T>
569 return static_cast<T *>( get_native());
572 /// Get native hazard pointer stored
573 guarded_pointer get_native() const
575 return base_class::get();
581 @headerfile cds/gc/hrc.h
582 This class is a wrapper for AutoHPArray template.
583 Template parameter \p Limit defines the size of HP array.
585 template <size_t Limit>
586 class GuardArray: public hrc::AutoHPArray<Limit>
589 typedef hrc::AutoHPArray<Limit> base_class;
592 /// Rebind array for other size \p OtherLimit
593 template <size_t OtherLimit>
595 typedef GuardArray<OtherLimit> other ; ///< rebinding result
600 GuardArray() ; // inline in hrc_impl.h
601 GuardArray( thread_gc_impl& threadGC )
602 : base_class( threadGC )
606 /// Protects an atomic reference \p link in slot \p nIndex
608 Returns current value of \p link.
610 The function tries to load \p pToGuard and to store it
611 to the slot \p nIndex repeatedly until the guard's value equals \p pToGuard
613 template <typename T>
614 T * protect( size_t nIndex, atomic_ref<T> const& link )
618 p = assign( nIndex, link.load(atomics::memory_order_relaxed) );
619 } while ( p != link.load(atomics::memory_order_acquire) );
623 /// Protects a atomic marked reference \p link in slot \p nIndex
625 Returns current value of \p link.
627 The function tries to load \p link and to store it
628 to the slot \p nIndex repeatedly until the guard's value equals \p link
630 template <typename T>
631 typename atomic_marked_ptr<T>::marked_ptr protect( size_t nIndex, atomic_marked_ptr<T> const& link )
633 typename atomic_marked_ptr<T>::marked_ptr p;
635 assign( nIndex, ( p = link.load(atomics::memory_order_relaxed)).ptr() );
636 } while ( p != link.load(atomics::memory_order_acquire) );
640 /// Protects a pointer of type \p atomic<T*>
642 Return the value of \p toGuard
644 The function tries to load \p toGuard and to store it
645 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
647 The function is useful for intrusive containers when \p toGuard is a node pointer
648 that should be converted to a pointer to the value type before guarding.
649 The parameter \p f of type Func is a functor that makes this conversion:
652 value_type * operator()( T * p );
655 Really, the result of <tt> f( toGuard.load() ) </tt> is assigned to the hazard pointer.
657 template <typename T, class Func>
658 T * protect(size_t nIndex, atomic_ref<T> const& toGuard, Func f )
662 assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_relaxed) ));
663 } while ( pRet != toGuard.load(atomics::memory_order_acquire));
668 /// Protects a atomic marked reference \p link in slot \p nIndex
670 Returns current value of \p link.
672 The function tries to load \p link and to store it
673 to the slot \p nIndex repeatedly until the guard's value equals \p link
675 The function is useful for intrusive containers when \p link is a node pointer
676 that should be converted to a pointer to the value type before guarding.
677 The parameter \p f of type Func is a functor that makes this conversion:
680 value_type * operator()( T p );
683 Really, the result of <tt> f( link.load() ) </tt> is assigned to the hazard pointer.
685 template <typename T, typename Func>
686 typename atomic_marked_ptr<T>::marked_ptr protect( size_t nIndex, atomic_marked_ptr<T> const& link, Func f )
688 typename atomic_marked_ptr<T>::marked_ptr p;
690 p = link.load(atomics::memory_order_relaxed);
691 assign( nIndex, f( p ) );
692 } while ( p != link.load(atomics::memory_order_acquire) );
696 /// Store \p to the slot \p nIndex
698 The function equals to a simple assignment, no loop is performed.
700 template <typename T>
701 T * assign( size_t nIndex, T * p )
703 base_class::set(nIndex, p);
707 /// Store marked pointer \p p to the guard
709 The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
710 Can be used for a marked pointer that cannot be changed concurrently.
712 template <typename T, int Bitmask>
713 T * assign( size_t nIndex, cds::details::marked_ptr<T, Bitmask> p )
715 return base_class::set( nIndex, p.ptr() );
718 /// Copy guarded value from \p src guard to slot at index \p nIndex
719 void copy( size_t nIndex, Guard const& src )
721 assign( nIndex, src.get_native() );
724 /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex
725 void copy( size_t nDestIndex, size_t nSrcIndex )
727 assign( nDestIndex, get_native( nSrcIndex ));
730 /// Clear value of the slot \p nIndex
731 void clear( size_t nIndex)
733 base_class::clear( nIndex );
736 /// Get current value of slot \p nIndex
737 template <typename T>
738 T * get( size_t nIndex) const
740 return static_cast<T *>( get_native( nIndex ) );
743 /// Get native hazard pointer stored
744 guarded_pointer get_native( size_t nIndex ) const
746 return base_class::operator[](nIndex).get();
749 /// Capacity of the guard array
750 static CDS_CONSTEXPR size_t capacity()
757 /// Initializes hrc::GarbageCollector singleton
759 The constructor calls hrc::GarbageCollector::Construct with passed parameters.
760 See hrc::GarbageCollector::Construct for explanation of parameters meaning.
763 size_t nHazardPtrCount = 0, ///< number of hazard pointers
764 size_t nMaxThreadCount = 0, ///< max threads count
765 size_t nMaxNodeLinkCount = 0, ///< max number of links a @ref hrc::ContainerNode can contain
766 size_t nMaxTransientLinks = 0 ///< max number of links in live nodes that may transiently point to a deleted node
769 hrc::GarbageCollector::Construct(
777 /// Terminates hrc::GarbageCollector singleton
779 The destructor calls \code hrc::GarbageCollector::Destruct() \endcode
783 hrc::GarbageCollector::Destruct();
786 /// Checks if count of hazard pointer is no less than \p nCountNeeded
788 If \p bRaiseException is \p true (that is the default), the function raises an exception gc::too_few_hazard_pointers
789 if \p nCountNeeded is more than the count of hazard pointer per thread.
791 static bool check_available_guards( size_t nCountNeeded, bool bRaiseException = true )
793 if ( hrc::GarbageCollector::instance().getHazardPointerCount() < nCountNeeded ) {
794 if ( bRaiseException )
795 throw cds::gc::too_few_hazard_pointers();
801 /// Retire pointer \p p with function \p pFunc
803 The function places pointer \p p to array of pointers ready for removing.
804 (so called retired pointer array). The pointer can be safely removed when no guarded pointer points to it.
805 Deleting the pointer is the function \p pFunc call.
807 template <typename T>
808 static void retire( T * p, void (* pFunc)(T *) ) ; // inline in hrc_impl.h
810 /// Retire pointer \p p with functor of type \p Disposer
812 The function places pointer \p p to array of pointers ready for removing.
813 (so called retired pointer array). The pointer can be safely removed when no guard points to it.
815 See gc::HP::retire for \p Disposer requirements.
817 template <class Disposer, typename T>
818 static void retire( T * p ) ; // inline in hrc_impl.h
820 /// Checks if HRC GC is constructed and may be used
823 return hrc::GarbageCollector::isUsed();
826 /// Forced GC cycle call for current thread
828 Usually, this function should not be called directly.
830 static void scan() ; // inline in hrc_impl.h
832 /// Synonym for \ref scan()
833 static void force_dispose()
838 }} // namespace cds::gc
840 #endif // #ifndef __CDS_GC_HRC_DECL_H