2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_GC_DHP_SMR_H
32 #define CDSLIB_GC_DHP_SMR_H
35 #include <cds/gc/details/hp_common.h>
36 #include <cds/intrusive/free_list_selector.h>
37 #include <cds/details/throw_exception.h>
38 #include <cds/details/static_functor.h>
39 #include <cds/details/marked_ptr.h>
40 #include <cds/user_setup/cache_line.h>
42 namespace cds { namespace gc {
44 using namespace cds::gc::hp::common;
46 /// Exception "Dynamic Hazard Pointer SMR is not initialized"
47 class not_initialized: public std::runtime_error
51 : std::runtime_error( "Global DHP SMR object is not initialized" )
55 struct guard_block: public cds::intrusive::FreeListImpl::node
57 guard_block* next_; // next block in the thread list
65 return reinterpret_cast<guard*>( this + 1 );
69 /// \p guard_block allocator (global object)
74 static hp_allocator& instance();
76 CDS_EXPORT_API guard_block* alloc();
77 void free( guard_block* block )
79 free_list_.put( block );
85 CDS_EXPORT_API ~hp_allocator();
88 cds::intrusive::FreeListImpl free_list_; ///< list of free \p guard_block
91 /// Per-thread hazard pointer storage
92 class thread_hp_storage
96 thread_hp_storage( guard* arr, size_t nSize ) CDS_NOEXCEPT
98 , extended_list_( nullptr )
100 , initial_capacity_( nSize )
103 thread_hp_storage() = delete;
104 thread_hp_storage( thread_hp_storage const& ) = delete;
105 thread_hp_storage( thread_hp_storage&& ) = delete;
114 if ( cds_unlikely( free_head_ == nullptr )) {
116 assert( free_head_ != nullptr );
119 guard* g = free_head_;
120 free_head_ = g->next_;
124 void free( guard* g ) CDS_NOEXCEPT
128 g->next_ = free_head_;
133 template< size_t Capacity>
134 size_t alloc( guard_array<Capacity>& arr )
136 for ( size_t i = 0; i < Capacity; ++i ) {
137 if ( cds_unlikely( free_head_ == nullptr ))
139 arr.reset( i, free_head_ );
140 free_head_ = free_head_->next_;
145 template <size_t Capacity>
146 void free( guard_array<Capacity>& arr ) CDS_NOEXCEPT
148 guard* gList = free_head_;
149 for ( size_t i = 0; i < Capacity; ++i ) {
163 for ( guard* cur = array_, *last = array_ + initial_capacity_; cur < last; ++cur )
166 // free all extended blocks
167 hp_allocator& alloc = hp_allocator::instance();
168 for ( guard_block* p = extended_list_; p; ) {
169 guard_block* next = p->next_;
174 extended_list_ = nullptr;
179 assert( extended_list_ == nullptr );
182 for ( guard* pEnd = p + initial_capacity_ - 1; p != pEnd; ++p )
191 assert( free_head_ == nullptr );
193 guard_block* block = hp_allocator::instance().alloc();
194 block->next_ = extended_list_;
195 extended_list_ = block;
196 free_head_ = block->first();
200 guard* free_head_; ///< Head of free guard list
201 guard_block* extended_list_; ///< Head of extended guard blocks allocated for the thread
202 guard* const array_; ///< initial HP array
203 size_t const initial_capacity_; ///< Capacity of \p array_
206 struct retired_block: public cds::intrusive::FreeListImpl::node
208 retired_block* next_; ///< Next block in thread-private retired array
210 static size_t const c_capacity = 256;
216 retired_ptr* first() const
218 return reinterpret_cast<retired_ptr*>( const_cast<retired_block*>( this ) + 1 );
221 retired_ptr* last() const
223 return first() + c_capacity;
227 class retired_allocator
231 static retired_allocator& instance();
233 CDS_EXPORT_API retired_block* alloc();
234 void free( retired_block* block )
236 block->next_ = nullptr;
237 free_list_.put( block );
243 CDS_EXPORT_API ~retired_allocator();
246 cds::intrusive::FreeListImpl free_list_; ///< list of free \p guard_block
249 /// Per-thread retired array
254 retired_array() CDS_NOEXCEPT
255 : current_block_( nullptr )
256 , current_cell_( nullptr )
257 , list_head_( nullptr )
258 , list_tail_( nullptr )
262 retired_array( retired_array const& ) = delete;
263 retired_array( retired_array&& ) = delete;
271 bool push( retired_ptr const& p ) CDS_NOEXCEPT
273 assert( current_block_ != nullptr );
274 assert( current_block_->first() <= current_cell_ );
275 assert( current_cell_ < current_block_->last() );
276 //assert( &p != current_cell_ );
279 if ( ++current_cell_ == current_block_->last() ) {
280 // goto next block if exists
281 if ( current_block_->next_ ) {
282 current_block_ = current_block_->next_;
283 current_cell_ = current_block_->first();
288 // smr::scan() extend retired_array if needed
295 bool safe_push( retired_ptr* p ) CDS_NOEXCEPT
297 bool ret = push( *p );
302 private: // called by smr
305 if ( list_head_ == nullptr ) {
306 retired_block* block = retired_allocator::instance().alloc();
307 assert( block->next_ == nullptr );
312 current_cell_ = block->first();
320 retired_allocator& alloc = retired_allocator::instance();
321 for ( retired_block* p = list_head_; p; ) {
322 retired_block* next = p->next_;
329 list_tail_ = nullptr;
330 current_cell_ = nullptr;
337 assert( list_head_ != nullptr );
338 assert( current_block_ == list_tail_ );
339 assert( current_cell_ == current_block_->last() );
341 retired_block* block = retired_allocator::instance().alloc();
342 assert( block->next_ == nullptr );
344 list_tail_ = list_tail_->next_ = block;
345 current_cell_ = block->first();
351 return current_block_ == nullptr
352 || ( current_block_ == list_head_ && current_cell_ == current_block_->first());
356 retired_block* current_block_;
357 retired_ptr* current_cell_; // in current_block_
359 retired_block* list_head_;
360 retired_block* list_tail_;
366 thread_hp_storage hazards_; ///< Hazard pointers private to the thread
367 retired_array retired_; ///< Retired data private to the thread
369 char pad1_[cds::c_nCacheLineSize];
370 atomics::atomic<unsigned int> sync_; ///< dummy var to introduce synchronizes-with relationship between threads
371 char pad2_[cds::c_nCacheLineSize];
373 // CppCheck warn: pad1_ and pad2_ is uninitialized in ctor
374 // cppcheck-suppress uninitMemberVar
375 thread_data( guard* guards, size_t guard_count )
376 : hazards_( guards, guard_count )
380 thread_data() = delete;
381 thread_data( thread_data const& ) = delete;
382 thread_data( thread_data&& ) = delete;
386 sync_.fetch_add( 1, atomics::memory_order_acq_rel );
390 // Hazard Pointer SMR (Safe Memory Reclamation)
393 struct thread_record;
396 /// Returns the instance of Hazard Pointer \ref smr
397 static smr& instance()
399 # ifdef CDS_DISABLE_SMR_EXCEPTION
400 assert( instance_ != nullptr );
403 CDS_THROW_EXCEPTION( not_initialized() );
408 /// Creates Dynamic Hazard Pointer SMR singleton
410 Dynamic Hazard Pointer SMR is a singleton. If DHP instance is not initialized then the function creates the instance.
411 Otherwise it does nothing.
413 The Michael's HP reclamation schema depends of three parameters:
414 - \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from
415 the data structure algorithms. By default, if \p nHazardPtrCount = 0,
416 the function uses maximum of HP count for CDS library
417 - \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100.
418 - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
419 <tt> nHazardPtrCount * nMaxThreadCount </tt>
420 Default is <tt>2 * nHazardPtrCount * nMaxThreadCount</tt>
422 static CDS_EXPORT_API void construct(
423 size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread
427 // for back-copatibility
428 static void Construct(
429 size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread
432 construct( nInitialHazardPtrCount );
436 /// Destroys global instance of \ref smr
438 The parameter \p bDetachAll should be used carefully: if its value is \p true,
439 then the object destroyed automatically detaches all attached threads. This feature
440 can be useful when you have no control over the thread termination, for example,
441 when \p libcds is injected into existing external thread.
443 static CDS_EXPORT_API void destruct(
444 bool bDetachAll = false ///< Detach all threads
448 // for back-copatibility
449 static void Destruct(
450 bool bDetachAll = false ///< Detach all threads
453 destruct( bDetachAll );
457 /// Checks if global SMR object is constructed and may be used
458 static bool isUsed() CDS_NOEXCEPT
460 return instance_ != nullptr;
463 /// Set memory management functions
465 @note This function may be called <b>BEFORE</b> creating an instance
466 of Dynamic Hazard Pointer SMR
468 SMR object allocates some memory for thread-specific data and for
470 By default, a standard \p new and \p delete operators are used for this.
472 static CDS_EXPORT_API void set_memory_allocator(
473 void* ( *alloc_func )( size_t size ),
474 void( *free_func )( void * p )
477 /// Returns thread-local data for the current thread
478 static CDS_EXPORT_API thread_data* tls();
480 static CDS_EXPORT_API void attach_thread();
481 static CDS_EXPORT_API void detach_thread();
483 public: // for internal use only
484 /// The main garbage collecting function
485 CDS_EXPORT_API void scan( thread_data* pRec );
487 /// Helper scan routine
489 The function guarantees that every node that is eligible for reuse is eventually freed, barring
490 thread failures. To do so, after executing \p scan(), a thread executes a \p %help_scan(),
491 where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers
492 to thread's list of reclaimed pointers.
494 The function is called internally by \p scan().
496 CDS_EXPORT_API void help_scan( thread_data* pThis );
498 hp_allocator& get_hp_allocator()
500 return hp_allocator_;
503 retired_allocator& get_retired_allocator()
505 return retired_allocator_;
509 CDS_EXPORT_API explicit smr(
510 size_t nInitialHazardPtrCount
513 CDS_EXPORT_API ~smr();
515 CDS_EXPORT_API void detach_all_thread();
519 CDS_EXPORT_API thread_record* create_thread_data();
520 static CDS_EXPORT_API void destroy_thread_data( thread_record* pRec );
522 /// Allocates Hazard Pointer SMR thread private data
523 CDS_EXPORT_API thread_record* alloc_thread_data();
525 /// Free HP SMR thread-private data
526 CDS_EXPORT_API void free_thread_data( thread_record* pRec );
530 static CDS_EXPORT_API smr* instance_;
532 atomics::atomic< thread_record*> thread_list_; ///< Head of thread list
533 size_t const initial_hazard_count_; ///< initial number of hazard pointers per thread
534 hp_allocator hp_allocator_;
535 retired_allocator retired_allocator_;
538 std::atomic<size_t> last_plist_size_; ///< HP array size in last scan() call
541 // for backward compatibility
542 typedef smr GarbageCollector;
546 inline hp_allocator& hp_allocator::instance()
548 return smr::instance().get_hp_allocator();
551 inline retired_allocator& retired_allocator::instance()
553 return smr::instance().get_retired_allocator();
559 /// Dynamic Hazard Pointer garbage collector
560 /** @ingroup cds_garbage_collector
561 @headerfile cds/gc/dhp.h
563 Implementation of Dynamic Hazard Pointer garbage collector.
566 - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
567 - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
568 - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
570 Dynamic Hazard Pointers SMR (safe memory reclamation) provides an unbounded number of hazard pointer per thread
571 despite of classic Hazard Pointer SMR in which the count of the hazard pointef per thread is limited.
573 See \ref cds_how_to_use "How to use" section for details how to apply garbage collector.
578 /// Native guarded pointer type
579 typedef void* guarded_pointer;
582 template <typename T> using atomic_ref = atomics::atomic<T *>;
586 @headerfile cds/gc/dhp.h
588 template <typename T> using atomic_type = atomics::atomic<T>;
590 /// Atomic marked pointer
591 template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
594 /// Dynamic Hazard Pointer guard
596 A guard is a hazard pointer.
597 Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer
599 \p %Guard object is movable but not copyable.
601 The guard object can be in two states:
602 - unlinked - the guard is not linked with any internal hazard pointer.
603 In this state no operation except \p link() and move assignment is supported.
604 - linked (default) - the guard allocates an internal hazard pointer and fully operable.
606 Due to performance reason the implementation does not check state of the guard in runtime.
608 @warning Move assignment can transfer the guard in unlinked state, use with care.
613 /// Default ctor allocates a guard (hazard pointer) from thread-private storage
615 : guard_( dhp::smr::tls()->hazards_.alloc() )
618 /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
619 explicit Guard( std::nullptr_t ) CDS_NOEXCEPT
623 /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership)
624 Guard( Guard&& src ) CDS_NOEXCEPT
625 : guard_( src.guard_ )
627 src.guard_ = nullptr;
630 /// Move assignment: the internal guards are swapped between \p src and \p this
632 @warning \p src will become in unlinked state if \p this was unlinked on entry.
634 Guard& operator=( Guard&& src ) CDS_NOEXCEPT
636 std::swap( guard_, src.guard_ );
640 /// Copy ctor is prohibited - the guard is not copyable
641 Guard( Guard const& ) = delete;
643 /// Copy assignment is prohibited
644 Guard& operator=( Guard const& ) = delete;
646 /// Frees the internal hazard pointer if the guard is in linked state
652 /// Checks if the guard object linked with any internal hazard pointer
653 bool is_linked() const
655 return guard_ != nullptr;
658 /// Links the guard with internal hazard pointer if the guard is in unlinked state
662 guard_ = dhp::smr::tls()->hazards_.alloc();
665 /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state
669 dhp::smr::tls()->hazards_.free( guard_ );
674 /// Protects a pointer of type <tt> atomic<T*> </tt>
676 Return the value of \p toGuard
678 The function tries to load \p toGuard and to store it
679 to the HP slot repeatedly until the guard's value equals \p toGuard
681 template <typename T>
682 T protect( atomics::atomic<T> const& toGuard )
684 assert( guard_ != nullptr );
686 T pCur = toGuard.load(atomics::memory_order_acquire);
689 pRet = assign( pCur );
690 pCur = toGuard.load(atomics::memory_order_acquire);
691 } while ( pRet != pCur );
695 /// Protects a converted pointer of type <tt> atomic<T*> </tt>
697 Return the value of \p toGuard
699 The function tries to load \p toGuard and to store result of \p f functor
700 to the HP slot repeatedly until the guard's value equals \p toGuard.
702 The function is useful for intrusive containers when \p toGuard is a node pointer
703 that should be converted to a pointer to the value type before guarding.
704 The parameter \p f of type Func is a functor that makes this conversion:
707 value_type * operator()( T * p );
710 Really, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
712 template <typename T, class Func>
713 T protect( atomics::atomic<T> const& toGuard, Func f )
715 assert( guard_ != nullptr );
717 T pCur = toGuard.load(atomics::memory_order_acquire);
722 pCur = toGuard.load(atomics::memory_order_acquire);
723 } while ( pRet != pCur );
727 /// Store \p p to the guard
729 The function is just an assignment, no loop is performed.
730 Can be used for a pointer that cannot be changed concurrently
731 or for already guarded pointer.
733 template <typename T>
736 assert( guard_ != nullptr );
739 dhp::smr::tls()->sync();
744 std::nullptr_t assign( std::nullptr_t )
746 assert( guard_ != nullptr );
753 /// Store marked pointer \p p to the guard
755 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
756 Can be used for a marked pointer that cannot be changed concurrently
757 or for already guarded pointer.
759 template <typename T, int BITMASK>
760 T* assign( cds::details::marked_ptr<T, BITMASK> p )
762 return assign( p.ptr());
765 /// Copy from \p src guard to \p this guard
766 void copy( Guard const& src )
768 assign( src.get_native());
771 /// Clears value of the guard
774 assert( guard_ != nullptr );
779 /// Gets the value currently protected (relaxed read)
780 template <typename T>
783 assert( guard_ != nullptr );
784 return guard_->get_as<T>();
787 /// Gets native guarded pointer stored
788 void* get_native() const
790 assert( guard_ != nullptr );
791 return guard_->get();
795 dhp::guard* release()
797 dhp::guard* g = guard_;
802 dhp::guard*& guard_ref()
814 /// Array of Dynamic Hazard Pointer guards
816 The class is intended for allocating an array of hazard pointer guards.
817 Template parameter \p Count defines the size of the array.
819 A \p %GuardArray object is not copy- and move-constructible
820 and not copy- and move-assignable.
822 template <size_t Count>
826 /// Rebind array for other size \p OtherCount
827 template <size_t OtherCount>
829 typedef GuardArray<OtherCount> other ; ///< rebinding result
833 static CDS_CONSTEXPR const size_t c_nCapacity = Count;
836 /// Default ctor allocates \p Count hazard pointers
839 dhp::smr::tls()->hazards_.alloc( guards_ );
842 /// Move ctor is prohibited
843 GuardArray( GuardArray&& ) = delete;
845 /// Move assignment is prohibited
846 GuardArray& operator=( GuardArray&& ) = delete;
848 /// Copy ctor is prohibited
849 GuardArray( GuardArray const& ) = delete;
851 /// Copy assignment is prohibited
852 GuardArray& operator=( GuardArray const& ) = delete;
854 /// Frees allocated hazard pointers
857 dhp::smr::tls()->hazards_.free( guards_ );
860 /// Protects a pointer of type \p atomic<T*>
862 Return the value of \p toGuard
864 The function tries to load \p toGuard and to store it
865 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
867 template <typename T>
868 T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
870 assert( nIndex < capacity() );
874 pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire));
875 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
880 /// Protects a pointer of type \p atomic<T*>
882 Return the value of \p toGuard
884 The function tries to load \p toGuard and to store it
885 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
887 The function is useful for intrusive containers when \p toGuard is a node pointer
888 that should be converted to a pointer to the value type before guarding.
889 The parameter \p f of type Func is a functor to make that conversion:
892 value_type * operator()( T * p );
895 Actually, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
897 template <typename T, class Func>
898 T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
900 assert( nIndex < capacity() );
904 assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire)));
905 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
910 /// Store \p p to the slot \p nIndex
912 The function is just an assignment, no loop is performed.
914 template <typename T>
915 T * assign( size_t nIndex, T * p )
917 assert( nIndex < capacity() );
919 guards_.set( nIndex, p );
920 dhp::smr::tls()->sync();
924 /// Store marked pointer \p p to the guard
926 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
927 Can be used for a marked pointer that cannot be changed concurrently
928 or for already guarded pointer.
930 template <typename T, int Bitmask>
931 T * assign( size_t nIndex, cds::details::marked_ptr<T, Bitmask> p )
933 return assign( nIndex, p.ptr());
936 /// Copy guarded value from \p src guard to slot at index \p nIndex
937 void copy( size_t nIndex, Guard const& src )
939 assign( nIndex, src.get_native());
942 /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex
943 void copy( size_t nDestIndex, size_t nSrcIndex )
945 assign( nDestIndex, get_native( nSrcIndex ));
948 /// Clear value of the slot \p nIndex
949 void clear( size_t nIndex )
951 guards_.clear( nIndex );
954 /// Get current value of slot \p nIndex
955 template <typename T>
956 T * get( size_t nIndex ) const
958 assert( nIndex < capacity() );
959 return guards_[nIndex]->template get_as<T>();
962 /// Get native guarded pointer stored
963 guarded_pointer get_native( size_t nIndex ) const
965 assert( nIndex < capacity() );
966 return guards_[nIndex]->get();
970 dhp::guard* release( size_t nIndex ) CDS_NOEXCEPT
972 return guards_.release( nIndex );
976 /// Capacity of the guard array
977 static CDS_CONSTEXPR size_t capacity()
984 dhp::guard_array<c_nCapacity> guards_;
990 A guarded pointer is a pair of a pointer and GC's guard.
991 Usually, it is used for returning a pointer to the item from an lock-free container.
992 The guard prevents the pointer to be early disposed (freed) by GC.
993 After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time.
996 - \p GuardedType - a type which the guard stores
997 - \p ValueType - a value type
998 - \p Cast - a functor for converting <tt>GuardedType*</tt> to <tt>ValueType*</tt>. Default is \p void (no casting).
1000 For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed.
1001 In such case the \p %guarded_ptr is:
1003 typedef cds::gc::DHP::guarded_ptr< foo > intrusive_guarded_ptr;
1006 For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed.
1014 struct value_accessor {
1015 std::string* operator()( foo* pFoo ) const
1017 return &(pFoo->value);
1022 typedef cds::gc::DHP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr;
1025 You don't need use this class directly.
1026 All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor.
1028 template <typename GuardedType, typename ValueType=GuardedType, typename Cast=void >
1032 struct trivial_cast {
1033 ValueType * operator()( GuardedType * p ) const
1039 template <typename GT, typename VT, typename C> friend class guarded_ptr;
1043 typedef GuardedType guarded_type; ///< Guarded type
1044 typedef ValueType value_type; ///< Value type
1046 /// Functor for casting \p guarded_type to \p value_type
1047 typedef typename std::conditional< std::is_same<Cast, void>::value, trivial_cast, Cast >::type value_cast;
1050 /// Creates empty guarded pointer
1051 guarded_ptr() CDS_NOEXCEPT
1056 explicit guarded_ptr( dhp::guard* g ) CDS_NOEXCEPT
1060 /// Initializes guarded pointer with \p p
1061 explicit guarded_ptr( guarded_type * p ) CDS_NOEXCEPT
1066 explicit guarded_ptr( std::nullptr_t ) CDS_NOEXCEPT
1072 guarded_ptr( guarded_ptr&& gp ) CDS_NOEXCEPT
1073 : guard_( gp.guard_ )
1075 gp.guard_ = nullptr;
1079 template <typename GT, typename VT, typename C>
1080 guarded_ptr( guarded_ptr<GT, VT, C>&& gp ) CDS_NOEXCEPT
1081 : guard_( gp.guard_ )
1083 gp.guard_ = nullptr;
1086 /// Ctor from \p Guard
1087 explicit guarded_ptr( Guard&& g ) CDS_NOEXCEPT
1088 : guard_( g.release())
1091 /// The guarded pointer is not copy-constructible
1092 guarded_ptr( guarded_ptr const& gp ) = delete;
1094 /// Clears the guarded pointer
1096 \ref release is called if guarded pointer is not \ref empty
1098 ~guarded_ptr() CDS_NOEXCEPT
1103 /// Move-assignment operator
1104 guarded_ptr& operator=( guarded_ptr&& gp ) CDS_NOEXCEPT
1106 std::swap( guard_, gp.guard_ );
1110 /// Move-assignment from \p Guard
1111 guarded_ptr& operator=( Guard&& g ) CDS_NOEXCEPT
1113 std::swap( guard_, g.guard_ref());
1117 /// The guarded pointer is not copy-assignable
1118 guarded_ptr& operator=(guarded_ptr const& gp) = delete;
1120 /// Returns a pointer to guarded value
1121 value_type * operator ->() const CDS_NOEXCEPT
1124 return value_cast()( guard_->get_as<guarded_type>() );
1127 /// Returns a reference to guarded value
1128 value_type& operator *() CDS_NOEXCEPT
1131 return *value_cast()( guard_->get_as<guarded_type>() );
1134 /// Returns const reference to guarded value
1135 value_type const& operator *() const CDS_NOEXCEPT
1138 return *value_cast()(reinterpret_cast<guarded_type *>(guard_->get()));
1141 /// Checks if the guarded pointer is \p nullptr
1142 bool empty() const CDS_NOEXCEPT
1144 return guard_ == nullptr || guard_->get( atomics::memory_order_relaxed ) == nullptr;
1147 /// \p bool operator returns <tt>!empty()</tt>
1148 explicit operator bool() const CDS_NOEXCEPT
1153 /// Clears guarded pointer
1155 If the guarded pointer has been released, the pointer can be disposed (freed) at any time.
1156 Dereferncing the guarded pointer after \p release() is dangerous.
1158 void release() CDS_NOEXCEPT
1164 // For internal use only!!!
1165 void reset(guarded_type * p) CDS_NOEXCEPT
1179 guard_ = dhp::smr::tls()->hazards_.alloc();
1185 dhp::smr::tls()->hazards_.free( guard_ );
1198 /// Initializes %DHP memory manager singleton
1200 Constructor creates and initializes %DHP global object.
1201 %DHP object should be created before using CDS data structure based on \p %cds::gc::DHP. Usually,
1202 it is created in the beginning of \p main() function.
1203 After creating of global object you may use CDS data structures based on \p %cds::gc::DHP.
1205 \p nInitialThreadGuardCount - initial count of guard allocated for each thread.
1206 When a thread is initialized the GC allocates local guard pool for the thread from a common guard pool.
1207 By perforce the local thread's guard pool is grown automatically from common pool.
1208 When the thread terminated its guard pool is backed to common GC's pool.
1211 size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread
1214 dhp::smr::construct( nInitialHazardPtrCount );
1217 /// Destroys %DHP memory manager
1219 The destructor destroys %DHP global object. After calling of this function you may \b NOT
1220 use CDS data structures based on \p %cds::gc::DHP.
1221 Usually, %DHP object is destroyed at the end of your \p main().
1225 dhp::GarbageCollector::destruct( true );
1228 /// Checks if count of hazard pointer is no less than \p nCountNeeded
1230 The function always returns \p true since the guard count is unlimited for
1231 \p %gc::DHP garbage collector.
1233 static CDS_CONSTEXPR bool check_available_guards(
1234 #ifdef CDS_DOXYGEN_INVOKED
1235 size_t nCountNeeded,
1244 /// Set memory management functions
1246 @note This function may be called <b>BEFORE</b> creating an instance
1247 of Dynamic Hazard Pointer SMR
1249 SMR object allocates some memory for thread-specific data and for
1250 creating SMR object.
1251 By default, a standard \p new and \p delete operators are used for this.
1253 static void set_memory_allocator(
1254 void* ( *alloc_func )( size_t size ), ///< \p malloc() function
1255 void( *free_func )( void * p ) ///< \p free() function
1258 dhp::smr::set_memory_allocator( alloc_func, free_func );
1261 /// Retire pointer \p p with function \p pFunc
1263 The function places pointer \p p to array of pointers ready for removing.
1264 (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1265 \p func is a disposer: when \p p can be safely removed, \p func is called.
1267 template <typename T>
1268 static void retire( T * p, void (* func)(T *))
1270 dhp::thread_data* rec = dhp::smr::tls();
1271 if ( !rec->retired_.push( dhp::retired_ptr( p, func ) ) )
1272 dhp::smr::instance().scan( rec );
1275 /// Retire pointer \p p with functor of type \p Disposer
1277 The function places pointer \p p to array of pointers ready for removing.
1278 (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1280 Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is:
1282 template <typename T>
1284 void operator()( T * p ) ; // disposing operator
1287 Since the functor call can happen at any time after \p retire() call, additional restrictions are imposed to \p Disposer type:
1288 - it should be stateless functor
1289 - it should be default-constructible
1290 - the result of functor call with argument \p p should not depend on where the functor will be called.
1293 Operator \p delete functor:
1295 template <typename T>
1297 void operator ()( T * p ) {
1302 // How to call HP::retire method
1305 // ... use p in lock-free manner
1307 cds::gc::DHP::retire<disposer>( p ) ; // place p to retired pointer array of DHP SMR
1310 Functor based on \p std::allocator :
1312 template <typename Alloc = std::allocator<int> >
1314 template <typename T>
1315 void operator()( T * p ) {
1316 typedef typename Alloc::templare rebind<T>::other alloc_t;
1319 a.deallocate( p, 1 );
1324 template <class Disposer, typename T>
1325 static void retire( T * p )
1327 if ( !dhp::smr::tls()->retired_.push( dhp::retired_ptr( p, cds::details::static_functor<Disposer, T>::call )))
1331 /// Checks if Dynamic Hazard Pointer GC is constructed and may be used
1332 static bool isUsed()
1334 return dhp::smr::isUsed();
1337 /// Forced GC cycle call for current thread
1339 Usually, this function should not be called directly.
1343 dhp::smr::instance().scan( dhp::smr::tls() );
1346 /// Synonym for \p scan()
1347 static void force_dispose()
1353 }} // namespace cds::gc
1355 #endif // #ifndef CDSLIB_GC_DHP_SMR_H