3 #ifndef __CDS_GC_HRC_DECL_H
4 #define __CDS_GC_HRC_DECL_H
6 #include <cds/gc/hrc/hrc.h>
7 #include <cds/details/marked_ptr.h>
9 namespace cds { namespace gc {
11 /// Gidenstam's garbage collector
12 /** @ingroup cds_garbage_collector
13 @headerfile cds/gc/hrc.h
15 This class is a wrapper for Gidenstam's memory reclamation schema (HRC - Hazard pointer + Reference Counting)
16 internal implementation.
19 - [2006] A.Gidenstam "Algorithms for synchronization and consistency
20 in concurrent system services", Chapter 5 "Lock-Free Memory Reclamation"
21 Thesis for the degree of Doctor of Philosophy
22 - [2005] Anders Gidenstam, Marina Papatriantafilou and Philippas Tsigas "Allocating
23 memory in a lock-free manner", Proceedings of the 13th Annual European
24 Symposium on Algorithms (ESA 2005), Lecture Notes in Computer
25 Science Vol. 3669, pages 229
\96 242, Springer-Verlag, 2005
27 Note that HRC schema does not support cyclic references that significantly limits the applicability of this GC.
30 In your \p main function you declare a object of class cds::gc::HRC. This declaration
31 initializes internal hrc::GarbageCollector singleton.
33 #include <cds/init.h> // for cds::Initialize and cds::Terminate
34 #include <cds/gc/hrc.h>
36 int main(int argc, char** argv)
42 // Initialize HRC singleton
54 Each thread that uses cds::gc::HRC -based containers must be attached to HRC
55 singleton. To make attachment you should declare a object of class HRC::thread_gc:
57 #include <cds/gc/hrc.h>
59 int myThreadEntryPoint()
61 // Attach the thread to HRC singleton
62 cds::gc::HRC::thread_gc myThreadGC();
67 // The destructor of myThreadGC object detaches the thread from HRC singleton
71 In some cases, you should work in a external thread. For example, your application
72 is a plug-in for a server that calls your code in the threads that has been created by server.
73 In this case, you may use persistent mode of HRC::thread_gc. In this mode, the thread attaches
74 to the HRC singleton only if it is not yet attached and never call detaching:
76 #include <cds/gc/hrc.h>
78 int myThreadEntryPoint()
80 // Attach the thread in persistent mode
81 cds::gc::HRC::thread_gc myThreadGC( true );
86 // The destructor of myThreadGC object does NOT detach the thread from HRC singleton
95 /// Thread GC implementation for internal usage
96 typedef hrc::ThreadGC thread_gc_impl;
98 /// Wrapper for hrc::ThreadGC class
100 @headerfile cds/gc/hrc.h
101 This class performs automatically attaching/detaching Gidenstam's GC
102 for the current thread.
104 class thread_gc: public thread_gc_impl
112 The constructor attaches the current thread to the Gidenstam's GC
113 if it is not yet attached.
114 The \p bPersistent parameter specifies attachment persistence:
115 - \p true - the class destructor will not detach the thread from Gidenstam's GC.
116 - \p false (default) - the class destructor will detach the thread from Gidenstam's GC.
119 bool bPersistent = false
120 ) ; // inline in hrc_impl.h
124 If the object has been created in persistent mode, the destructor does nothing.
125 Otherwise it detaches the current thread from HRC GC.
127 ~thread_gc() ; // inline in hrc_impl.h
130 ///@anchor hrc_gc_HRC_container_node Base for container node
131 typedef hrc::ContainerNode container_node;
133 /// Native hazard pointer type
134 typedef container_node * guarded_pointer;
138 @headerfile cds/gc/hrc.h
140 template <typename T>
141 class atomic_ref: protected atomics::atomic<T *>
144 typedef atomics::atomic<T *> base_class;
148 atomic_ref() = default;
149 explicit CDS_CONSTEXPR atomic_ref(T * p) CDS_NOEXCEPT
154 /// Read reference value
155 T * load( atomics::memory_order order ) const CDS_NOEXCEPT
157 return base_class::load( order );
160 T * load( atomics::memory_order order ) const volatile CDS_NOEXCEPT
162 return base_class::load( order );
166 /// Store new value to reference
167 void store( T * pNew, atomics::memory_order order ) CDS_NOEXCEPT
169 before_store( pNew );
170 T * pOld = base_class::exchange( pNew, order );
171 after_store( pOld, pNew );
174 void store( T * pNew, atomics::memory_order order ) volatile CDS_NOEXCEPT
176 before_store( pNew );
177 T * pOld = base_class::exchange( pNew, order );
178 after_store( pOld, pNew );
182 /// Updates atomic reference from current value \p pOld to new value \p pNew (strong CAS)
184 May be used when concurrent updates are possible
186 \p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type
188 bool compare_exchange_strong( T *& pOld, T * pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) CDS_NOEXCEPT
191 bool bSuccess = base_class::compare_exchange_strong( pOld, pNew, mo_success, mo_fail );
192 after_cas( bSuccess, pOld, pNew );
196 bool compare_exchange_strong( T *& pOld, T * pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) volatile CDS_NOEXCEPT
199 bool bSuccess = base_class::compare_exchange_strong( pOld, pNew, mo_success, mo_fail );
200 after_cas( bSuccess, pOld, pNew );
203 bool compare_exchange_strong( T *& pOld, T * pNew, atomics::memory_order mo_success ) CDS_NOEXCEPT
205 return compare_exchange_strong( pOld, pNew, mo_success, atomics::memory_order_relaxed );
207 bool compare_exchange_strong( T *& pOld, T * pNew, atomics::memory_order mo_success ) volatile CDS_NOEXCEPT
209 return compare_exchange_strong( pOld, pNew, mo_success, atomics::memory_order_relaxed );
213 /// Updates atomic reference from current value \p pOld to new value \p pNew (weak CAS)
215 May be used when concurrent updates are possible
217 \p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type
219 bool compare_exchange_weak( T *& pOld, T * pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) CDS_NOEXCEPT
222 bool bSuccess = base_class::compare_exchange_weak( pOld, pNew, mo_success, mo_fail );
223 after_cas( bSuccess, pOld, pNew );
227 bool compare_exchange_weak( T *& pOld, T * pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) volatile CDS_NOEXCEPT
230 bool bSuccess = base_class::compare_exchange_weak( pOld, pNew, mo_success, mo_fail );
231 after_cas( bSuccess, pOld, pNew );
234 bool compare_exchange_weak( T *& pOld, T * pNew, atomics::memory_order mo_success ) CDS_NOEXCEPT
236 return compare_exchange_weak( pOld, pNew, mo_success, atomics::memory_order_relaxed );
238 bool compare_exchange_weak( T *& pOld, T * pNew, atomics::memory_order mo_success ) volatile CDS_NOEXCEPT
240 return compare_exchange_weak( pOld, pNew, mo_success, atomics::memory_order_relaxed );
246 static void before_store( T * pNew ) CDS_NOEXCEPT
251 static void after_store( T * pOld, T * pNew ) CDS_NOEXCEPT
254 pNew->m_bTrace.store( false, atomics::memory_order_release );
258 static void before_cas( T * p ) CDS_NOEXCEPT
262 p->m_bTrace.store( false, atomics::memory_order_release );
265 static void after_cas( bool bSuccess, T * pOld, T * pNew ) CDS_NOEXCEPT
279 /// Atomic marked pointer
281 @headerfile cds/gc/hrc.h
283 template <typename MarkedPtr>
284 class atomic_marked_ptr
287 atomics::atomic< MarkedPtr > m_a;
290 /// Marked pointer type
291 typedef MarkedPtr marked_ptr;
294 atomic_marked_ptr() CDS_NOEXCEPT
295 : m_a( marked_ptr() )
298 explicit CDS_CONSTEXPR atomic_marked_ptr( typename marked_ptr::value_type * p ) CDS_NOEXCEPT
299 : m_a( marked_ptr(p) )
302 atomic_marked_ptr( typename marked_ptr::value_type * ptr, int nMask ) CDS_NOEXCEPT
303 : m_a( marked_ptr(ptr, nMask) )
306 explicit atomic_marked_ptr( marked_ptr const& ptr ) CDS_NOEXCEPT
312 /// Read reference value
313 marked_ptr load(atomics::memory_order order) const CDS_NOEXCEPT
315 return m_a.load(order);
318 /// Store new value to reference
319 void store( marked_ptr pNew, atomics::memory_order order ) CDS_NOEXCEPT
321 before_store( pNew.ptr() );
322 marked_ptr pOld = m_a.exchange( pNew, order );
323 after_store( pOld.ptr(), pNew.ptr() );
326 /// Store new value to reference
327 void store( typename marked_ptr::pointer_type pNew, atomics::memory_order order ) CDS_NOEXCEPT
329 before_store( pNew );
330 marked_ptr pOld = m_a.exchange( marked_ptr(pNew), order );
331 after_store( pOld.ptr(), pNew );
334 /// Updates atomic reference from current value \p pOld to new value \p pNew (weak CAS)
336 May be used when concurrent updates are possible
338 \p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type
340 bool compare_exchange_weak( marked_ptr& pOld, marked_ptr pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) CDS_NOEXCEPT
342 before_cas( pNew.ptr() );
343 bool bSuccess = m_a.compare_exchange_weak( pOld, pNew, mo_success, mo_fail );
344 after_cas( bSuccess, pOld.ptr(), pNew.ptr() );
348 bool compare_exchange_weak( marked_ptr& pOld, marked_ptr pNew, atomics::memory_order mo_success ) CDS_NOEXCEPT
350 before_cas( pNew.ptr() );
351 bool bSuccess = m_a.compare_exchange_weak( pOld, pNew, mo_success );
352 after_cas( bSuccess, pOld.ptr(), pNew.ptr() );
357 /// Updates atomic reference from current value \p pOld to new value \p pNew (strong CAS)
359 May be used when concurrent updates are possible
361 \p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type
363 bool compare_exchange_strong( marked_ptr& pOld, marked_ptr pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) CDS_NOEXCEPT
366 before_cas( pNew.ptr() );
367 bool bSuccess = m_a.compare_exchange_strong( pOld, pNew, mo_success, mo_fail );
368 after_cas( bSuccess, pOld.ptr(), pNew.ptr() );
372 bool compare_exchange_strong( marked_ptr& pOld, marked_ptr pNew, atomics::memory_order mo_success ) CDS_NOEXCEPT
374 before_cas( pNew.ptr() );
375 bool bSuccess = m_a.compare_exchange_strong( pOld, pNew, mo_success );
376 after_cas( bSuccess, pOld.ptr(), pNew.ptr() );
383 static void before_store( typename marked_ptr::pointer_type p ) CDS_NOEXCEPT
388 static void after_store( typename marked_ptr::pointer_type pOld, typename marked_ptr::pointer_type pNew ) CDS_NOEXCEPT
391 pNew->m_bTrace.store( false, atomics::memory_order_release );
395 static void before_cas( typename marked_ptr::pointer_type p ) CDS_NOEXCEPT
399 p->m_bTrace.store( false, atomics::memory_order_release );
402 static void after_cas( bool bSuccess, typename marked_ptr::pointer_type pOld, typename marked_ptr::pointer_type pNew ) CDS_NOEXCEPT
418 @headerfile cds/gc/hrc.h
419 This class is a wrapper for hrc::AutoHPGuard.
421 class Guard: public hrc::AutoHPGuard
424 typedef hrc::AutoHPGuard base_class;
428 /// Default constructor
429 Guard() ; // inline in hrc_impl.h
431 /// Protects atomic pointer
433 Return the value of \p toGuard
435 The function tries to load \p toGuard and to store it
436 to the HP slot repeatedly until the guard's value equals \p toGuard
438 template <typename T>
439 T * protect( atomic_ref<T> const& toGuard )
441 T * pCur = toGuard.load(atomics::memory_order_relaxed);
444 pRet = assign( pCur );
445 pCur = toGuard.load(atomics::memory_order_acquire);
446 } while ( pRet != pCur );
450 /// Protects a converted pointer of type \p atomic<T*>
452 Return the value of \p toGuard
454 The function tries to load \p toGuard and to store result of \p f functor
455 to the HP slot repeatedly until the guard's value equals \p toGuard.
457 The function is useful for intrusive containers when \p toGuard is a node pointer
458 that should be converted to a pointer to the value type before guarding.
459 The parameter \p f of type Func is a functor that makes this conversion:
462 value_type * operator()( T * p );
465 Really, the result of <tt> f( toGuard.load() ) </tt> is assigned to the hazard pointer.
467 template <typename T, class Func>
468 T * protect( atomic_ref<T> const& toGuard, Func f )
470 T * pCur = toGuard.load(atomics::memory_order_relaxed);
475 pCur = toGuard.load(atomics::memory_order_acquire);
476 } while ( pRet != pCur );
480 /// Protects a atomic marked reference \p link
482 Returns current value of \p link.
484 The function tries to load \p link and to store it
485 to the guard repeatedly until the guard's value equals \p link
487 template <typename T>
488 typename atomic_marked_ptr<T>::marked_ptr protect( atomic_marked_ptr<T> const& link )
490 typename atomic_marked_ptr<T>::marked_ptr p;
492 assign( ( p = link.load(atomics::memory_order_relaxed)).ptr() );
493 } while ( p != link.load(atomics::memory_order_acquire) );
497 /// Protects a atomic marked reference \p link
499 Returns current value of \p link.
501 The function tries to load \p link and to store it
502 to the guard repeatedly until the guard's value equals \p link
504 The function is useful for intrusive containers when \p link is a node pointer
505 that should be converted to a pointer to the value type before guarding.
506 The parameter \p f of type Func is a functor that makes this conversion:
509 value_type * operator()( T p );
512 Really, the result of <tt> f( link.load() ) </tt> is assigned to the hazard pointer.
514 template <typename T, typename Func>
515 typename atomic_marked_ptr<T>::marked_ptr protect( atomic_marked_ptr<T> const& link, Func f )
517 typename atomic_marked_ptr<T>::marked_ptr pCur;
519 pCur = link.load(atomics::memory_order_relaxed);
521 } while ( pCur != link.load(atomics::memory_order_acquire) );
525 /// Stores \p p to the guard
527 The function equals to a simple assignment, no loop is performed.
528 Can be used for a pointer that cannot be changed concurrently.
530 template <typename T>
533 return base_class::operator =(p);
536 /// Stores marked pointer \p p to the guard
538 The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
539 Can be used for a marked pointer that cannot be changed concurrently.
541 template <typename T, int Bitmask>
542 T * assign( cds::details::marked_ptr<T, Bitmask> p )
544 return base_class::operator =( p.ptr() );
547 /// Copy from \p src guard to \p this guard
548 void copy( Guard const& src )
550 assign( src.get_native() );
553 /// Clear value of the guard
559 /// Get the value currently protected
560 template <typename T>
563 return static_cast<T *>( get_native());
566 /// Get native hazard pointer stored
567 guarded_pointer get_native() const
569 return base_class::get();
575 @headerfile cds/gc/hrc.h
576 This class is a wrapper for AutoHPArray template.
577 Template parameter \p Limit defines the size of HP array.
579 template <size_t Limit>
580 class GuardArray: public hrc::AutoHPArray<Limit>
583 typedef hrc::AutoHPArray<Limit> base_class;
586 /// Rebind array for other size \p OtherLimit
587 template <size_t OtherLimit>
589 typedef GuardArray<OtherLimit> other ; ///< rebinding result
594 GuardArray() ; // inline in hrc_impl.h
595 GuardArray( thread_gc_impl& threadGC )
596 : base_class( threadGC )
600 /// Protects an atomic reference \p link in slot \p nIndex
602 Returns current value of \p link.
604 The function tries to load \p pToGuard and to store it
605 to the slot \p nIndex repeatedly until the guard's value equals \p pToGuard
607 template <typename T>
608 T * protect( size_t nIndex, atomic_ref<T> const& link )
612 p = assign( nIndex, link.load(atomics::memory_order_relaxed) );
613 } while ( p != link.load(atomics::memory_order_acquire) );
617 /// Protects a atomic marked reference \p link in slot \p nIndex
619 Returns current value of \p link.
621 The function tries to load \p link and to store it
622 to the slot \p nIndex repeatedly until the guard's value equals \p link
624 template <typename T>
625 typename atomic_marked_ptr<T>::marked_ptr protect( size_t nIndex, atomic_marked_ptr<T> const& link )
627 typename atomic_marked_ptr<T>::marked_ptr p;
629 assign( nIndex, ( p = link.load(atomics::memory_order_relaxed)).ptr() );
630 } while ( p != link.load(atomics::memory_order_acquire) );
634 /// Protects a pointer of type \p atomic<T*>
636 Return the value of \p toGuard
638 The function tries to load \p toGuard and to store it
639 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
641 The function is useful for intrusive containers when \p toGuard is a node pointer
642 that should be converted to a pointer to the value type before guarding.
643 The parameter \p f of type Func is a functor that makes this conversion:
646 value_type * operator()( T * p );
649 Really, the result of <tt> f( toGuard.load() ) </tt> is assigned to the hazard pointer.
651 template <typename T, class Func>
652 T * protect(size_t nIndex, atomic_ref<T> const& toGuard, Func f )
656 assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_relaxed) ));
657 } while ( pRet != toGuard.load(atomics::memory_order_acquire));
662 /// Protects a atomic marked reference \p link in slot \p nIndex
664 Returns current value of \p link.
666 The function tries to load \p link and to store it
667 to the slot \p nIndex repeatedly until the guard's value equals \p link
669 The function is useful for intrusive containers when \p link is a node pointer
670 that should be converted to a pointer to the value type before guarding.
671 The parameter \p f of type Func is a functor that makes this conversion:
674 value_type * operator()( T p );
677 Really, the result of <tt> f( link.load() ) </tt> is assigned to the hazard pointer.
679 template <typename T, typename Func>
680 typename atomic_marked_ptr<T>::marked_ptr protect( size_t nIndex, atomic_marked_ptr<T> const& link, Func f )
682 typename atomic_marked_ptr<T>::marked_ptr p;
684 p = link.load(atomics::memory_order_relaxed);
685 assign( nIndex, f( p ) );
686 } while ( p != link.load(atomics::memory_order_acquire) );
690 /// Store \p to the slot \p nIndex
692 The function equals to a simple assignment, no loop is performed.
694 template <typename T>
695 T * assign( size_t nIndex, T * p )
697 base_class::set(nIndex, p);
701 /// Store marked pointer \p p to the guard
703 The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
704 Can be used for a marked pointer that cannot be changed concurrently.
706 template <typename T, int Bitmask>
707 T * assign( size_t nIndex, cds::details::marked_ptr<T, Bitmask> p )
709 return base_class::set( nIndex, p.ptr() );
712 /// Copy guarded value from \p src guard to slot at index \p nIndex
713 void copy( size_t nIndex, Guard const& src )
715 assign( nIndex, src.get_native() );
718 /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex
719 void copy( size_t nDestIndex, size_t nSrcIndex )
721 assign( nDestIndex, get_native( nSrcIndex ));
724 /// Clear value of the slot \p nIndex
725 void clear( size_t nIndex)
727 base_class::clear( nIndex );
730 /// Get current value of slot \p nIndex
731 template <typename T>
732 T * get( size_t nIndex) const
734 return static_cast<T *>( get_native( nIndex ) );
737 /// Get native hazard pointer stored
738 guarded_pointer get_native( size_t nIndex ) const
740 return base_class::operator[](nIndex).get();
743 /// Capacity of the guard array
744 static CDS_CONSTEXPR size_t capacity()
751 /// Initializes hrc::GarbageCollector singleton
753 The constructor calls hrc::GarbageCollector::Construct with passed parameters.
754 See hrc::GarbageCollector::Construct for explanation of parameters meaning.
757 size_t nHazardPtrCount = 0, ///< number of hazard pointers
758 size_t nMaxThreadCount = 0, ///< max threads count
759 size_t nMaxNodeLinkCount = 0, ///< max number of links a @ref hrc::ContainerNode can contain
760 size_t nMaxTransientLinks = 0 ///< max number of links in live nodes that may transiently point to a deleted node
763 hrc::GarbageCollector::Construct(
771 /// Terminates hrc::GarbageCollector singleton
773 The destructor calls \code hrc::GarbageCollector::Destruct() \endcode
777 hrc::GarbageCollector::Destruct();
780 /// Checks if count of hazard pointer is no less than \p nCountNeeded
782 If \p bRaiseException is \p true (that is the default), the function raises an exception gc::too_few_hazard_pointers
783 if \p nCountNeeded is more than the count of hazard pointer per thread.
785 static bool check_available_guards( size_t nCountNeeded, bool bRaiseException = true )
787 if ( hrc::GarbageCollector::instance().getHazardPointerCount() < nCountNeeded ) {
788 if ( bRaiseException )
789 throw cds::gc::too_few_hazard_pointers();
795 /// Retire pointer \p p with function \p pFunc
797 The function places pointer \p p to array of pointers ready for removing.
798 (so called retired pointer array). The pointer can be safely removed when no guarded pointer points to it.
799 Deleting the pointer is the function \p pFunc call.
801 template <typename T>
802 static void retire( T * p, void (* pFunc)(T *) ) ; // inline in hrc_impl.h
804 /// Retire pointer \p p with functor of type \p Disposer
806 The function places pointer \p p to array of pointers ready for removing.
807 (so called retired pointer array). The pointer can be safely removed when no guard points to it.
809 See gc::HP::retire for \p Disposer requirements.
811 template <class Disposer, typename T>
812 static void retire( T * p ) ; // inline in hrc_impl.h
814 /// Checks if HRC GC is constructed and may be used
817 return hrc::GarbageCollector::isUsed();
820 /// Forced GC cycle call for current thread
822 Usually, this function should not be called directly.
824 static void scan() ; // inline in hrc_impl.h
826 /// Synonym for \ref scan()
827 static void force_dispose()
832 }} // namespace cds::gc
834 #endif // #ifndef __CDS_GC_HRC_DECL_H