2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_GC_DHP_SMR_H
32 #define CDSLIB_GC_DHP_SMR_H
35 #include <cds/gc/details/hp_common.h>
36 #include <cds/details/lib.h>
37 #include <cds/threading/model.h>
38 #include <cds/intrusive/free_list_selector.h>
39 #include <cds/details/throw_exception.h>
40 #include <cds/details/static_functor.h>
41 #include <cds/details/marked_ptr.h>
42 #include <cds/user_setup/cache_line.h>
44 namespace cds { namespace gc {
46 /// Dynamic (adaptive) Hazard Pointer implementation details
48 using namespace cds::gc::hp::common;
50 /// Exception "Dynamic Hazard Pointer SMR is not initialized"
51 class not_initialized: public std::runtime_error
56 : std::runtime_error( "Global DHP SMR object is not initialized" )
62 struct guard_block: public cds::intrusive::FreeListImpl::node
64 guard_block* next_; // next block in the thread list
72 return reinterpret_cast<guard*>( this + 1 );
78 /// \p guard_block allocator (global object)
83 static hp_allocator& instance();
85 CDS_EXPORT_API guard_block* alloc();
86 void free( guard_block* block )
88 free_list_.put( block );
94 CDS_EXPORT_API ~hp_allocator();
97 cds::intrusive::FreeListImpl free_list_; ///< list of free \p guard_block
102 /// Per-thread hazard pointer storage
103 class thread_hp_storage
107 thread_hp_storage( guard* arr, size_t nSize ) CDS_NOEXCEPT
109 , extended_list_( nullptr )
111 , initial_capacity_( nSize )
114 new( arr ) guard[nSize];
117 thread_hp_storage() = delete;
118 thread_hp_storage( thread_hp_storage const& ) = delete;
119 thread_hp_storage( thread_hp_storage&& ) = delete;
128 if ( cds_unlikely( free_head_ == nullptr )) {
130 assert( free_head_ != nullptr );
133 guard* g = free_head_;
134 free_head_ = g->next_;
138 void free( guard* g ) CDS_NOEXCEPT
142 g->next_ = free_head_;
147 template< size_t Capacity>
148 size_t alloc( guard_array<Capacity>& arr )
150 for ( size_t i = 0; i < Capacity; ++i ) {
151 if ( cds_unlikely( free_head_ == nullptr ))
153 arr.reset( i, free_head_ );
154 free_head_ = free_head_->next_;
159 template <size_t Capacity>
160 void free( guard_array<Capacity>& arr ) CDS_NOEXCEPT
162 guard* gList = free_head_;
163 for ( size_t i = 0; i < Capacity; ++i ) {
177 for ( guard* cur = array_, *last = array_ + initial_capacity_; cur < last; ++cur )
180 // free all extended blocks
181 hp_allocator& alloc = hp_allocator::instance();
182 for ( guard_block* p = extended_list_; p; ) {
183 guard_block* next = p->next_;
188 extended_list_ = nullptr;
193 assert( extended_list_ == nullptr );
196 for ( guard* pEnd = p + initial_capacity_ - 1; p != pEnd; ++p )
205 assert( free_head_ == nullptr );
207 guard_block* block = hp_allocator::instance().alloc();
208 block->next_ = extended_list_;
209 extended_list_ = block;
210 free_head_ = block->first();
214 guard* free_head_; ///< Head of free guard list
215 guard_block* extended_list_; ///< Head of extended guard blocks allocated for the thread
216 guard* const array_; ///< initial HP array
217 size_t const initial_capacity_; ///< Capacity of \p array_
222 struct retired_block: public cds::intrusive::FreeListImpl::node
224 retired_block* next_; ///< Next block in thread-private retired array
226 static size_t const c_capacity = 256;
232 retired_ptr* first() const
234 return reinterpret_cast<retired_ptr*>( const_cast<retired_block*>( this ) + 1 );
237 retired_ptr* last() const
239 return first() + c_capacity;
245 class retired_allocator
249 static retired_allocator& instance();
251 CDS_EXPORT_API retired_block* alloc();
252 void free( retired_block* block )
254 block->next_ = nullptr;
255 free_list_.put( block );
261 CDS_EXPORT_API ~retired_allocator();
264 cds::intrusive::FreeListImpl free_list_; ///< list of free \p guard_block
269 /// Per-thread retired array
274 retired_array() CDS_NOEXCEPT
275 : current_block_( nullptr )
276 , current_cell_( nullptr )
277 , list_head_( nullptr )
278 , list_tail_( nullptr )
282 retired_array( retired_array const& ) = delete;
283 retired_array( retired_array&& ) = delete;
291 bool push( retired_ptr const& p ) CDS_NOEXCEPT
293 assert( current_block_ != nullptr );
294 assert( current_block_->first() <= current_cell_ );
295 assert( current_cell_ < current_block_->last() );
296 //assert( &p != current_cell_ );
299 if ( ++current_cell_ == current_block_->last() ) {
300 // goto next block if exists
301 if ( current_block_->next_ ) {
302 current_block_ = current_block_->next_;
303 current_cell_ = current_block_->first();
308 // smr::scan() extend retired_array if needed
315 bool safe_push( retired_ptr* p ) CDS_NOEXCEPT
317 bool ret = push( *p );
322 private: // called by smr
325 if ( list_head_ == nullptr ) {
326 retired_block* block = retired_allocator::instance().alloc();
327 assert( block->next_ == nullptr );
332 current_cell_ = block->first();
340 retired_allocator& alloc = retired_allocator::instance();
341 for ( retired_block* p = list_head_; p; ) {
342 retired_block* next = p->next_;
349 list_tail_ = nullptr;
350 current_cell_ = nullptr;
357 assert( list_head_ != nullptr );
358 assert( current_block_ == list_tail_ );
359 assert( current_cell_ == current_block_->last() );
361 retired_block* block = retired_allocator::instance().alloc();
362 assert( block->next_ == nullptr );
364 list_tail_ = list_tail_->next_ = block;
365 current_cell_ = block->first();
371 return current_block_ == nullptr
372 || ( current_block_ == list_head_ && current_cell_ == current_block_->first());
376 retired_block* current_block_;
377 retired_ptr* current_cell_; // in current_block_
379 retired_block* list_head_;
380 retired_block* list_tail_;
388 thread_hp_storage hazards_; ///< Hazard pointers private to the thread
389 retired_array retired_; ///< Retired data private to the thread
391 char pad1_[cds::c_nCacheLineSize];
392 atomics::atomic<unsigned int> sync_; ///< dummy var to introduce synchronizes-with relationship between threads
393 char pad2_[cds::c_nCacheLineSize];
395 // CppCheck warn: pad1_ and pad2_ is uninitialized in ctor
396 // cppcheck-suppress uninitMemberVar
397 thread_data( guard* guards, size_t guard_count )
398 : hazards_( guards, guard_count )
402 thread_data() = delete;
403 thread_data( thread_data const& ) = delete;
404 thread_data( thread_data&& ) = delete;
408 sync_.fetch_add( 1, atomics::memory_order_acq_rel );
414 // Dynmic (adaptive) Hazard Pointer SMR (Safe Memory Reclamation)
417 struct thread_record;
420 /// Returns the instance of Hazard Pointer \ref smr
421 static smr& instance()
423 # ifdef CDS_DISABLE_SMR_EXCEPTION
424 assert( instance_ != nullptr );
427 CDS_THROW_EXCEPTION( not_initialized() );
432 /// Creates Dynamic Hazard Pointer SMR singleton
434 Dynamic Hazard Pointer SMR is a singleton. If DHP instance is not initialized then the function creates the instance.
435 Otherwise it does nothing.
437 The Michael's HP reclamation schema depends of three parameters:
438 - \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from
439 the data structure algorithms. By default, if \p nHazardPtrCount = 0,
440 the function uses maximum of HP count for CDS library
441 - \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100.
442 - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
443 <tt> nHazardPtrCount * nMaxThreadCount </tt>
444 Default is <tt>2 * nHazardPtrCount * nMaxThreadCount</tt>
446 static CDS_EXPORT_API void construct(
447 size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread
450 // for back-copatibility
451 static void Construct(
452 size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread
455 construct( nInitialHazardPtrCount );
458 /// Destroys global instance of \ref smr
460 The parameter \p bDetachAll should be used carefully: if its value is \p true,
461 then the object destroyed automatically detaches all attached threads. This feature
462 can be useful when you have no control over the thread termination, for example,
463 when \p libcds is injected into existing external thread.
465 static CDS_EXPORT_API void destruct(
466 bool bDetachAll = false ///< Detach all threads
469 // for back-compatibility
470 static void Destruct(
471 bool bDetachAll = false ///< Detach all threads
474 destruct( bDetachAll );
477 /// Checks if global SMR object is constructed and may be used
478 static bool isUsed() CDS_NOEXCEPT
480 return instance_ != nullptr;
483 /// Set memory management functions
485 @note This function may be called <b>BEFORE</b> creating an instance
486 of Dynamic Hazard Pointer SMR
488 SMR object allocates some memory for thread-specific data and for
490 By default, a standard \p new and \p delete operators are used for this.
492 static CDS_EXPORT_API void set_memory_allocator(
493 void* ( *alloc_func )( size_t size ),
494 void( *free_func )( void * p )
497 /// Returns thread-local data for the current thread
498 static CDS_EXPORT_API thread_data* tls();
500 static CDS_EXPORT_API void attach_thread();
501 static CDS_EXPORT_API void detach_thread();
503 public: // for internal use only
504 /// The main garbage collecting function
505 CDS_EXPORT_API void scan( thread_data* pRec );
507 /// Helper scan routine
509 The function guarantees that every node that is eligible for reuse is eventually freed, barring
510 thread failures. To do so, after executing \p scan(), a thread executes a \p %help_scan(),
511 where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers
512 to thread's list of reclaimed pointers.
514 The function is called internally by \p scan().
516 CDS_EXPORT_API void help_scan( thread_data* pThis );
518 hp_allocator& get_hp_allocator()
520 return hp_allocator_;
523 retired_allocator& get_retired_allocator()
525 return retired_allocator_;
529 CDS_EXPORT_API explicit smr(
530 size_t nInitialHazardPtrCount
533 CDS_EXPORT_API ~smr();
535 CDS_EXPORT_API void detach_all_thread();
538 CDS_EXPORT_API thread_record* create_thread_data();
539 static CDS_EXPORT_API void destroy_thread_data( thread_record* pRec );
541 /// Allocates Hazard Pointer SMR thread private data
542 CDS_EXPORT_API thread_record* alloc_thread_data();
544 /// Free HP SMR thread-private data
545 CDS_EXPORT_API void free_thread_data( thread_record* pRec );
548 static CDS_EXPORT_API smr* instance_;
550 atomics::atomic< thread_record*> thread_list_; ///< Head of thread list
551 size_t const initial_hazard_count_; ///< initial number of hazard pointers per thread
552 hp_allocator hp_allocator_;
553 retired_allocator retired_allocator_;
556 std::atomic<size_t> last_plist_size_; ///< HP array size in last scan() call
561 // for backward compatibility
562 typedef smr GarbageCollector;
566 inline hp_allocator& hp_allocator::instance()
568 return smr::instance().get_hp_allocator();
571 inline retired_allocator& retired_allocator::instance()
573 return smr::instance().get_retired_allocator();
580 /// Dynamic (adaptie) Hazard Pointer SMR
581 /** @ingroup cds_garbage_collector
583 Implementation of Dynamic (adaptive) Hazard Pointer SMR
586 - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
587 - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
588 - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
590 %DHP is an adaptive variant of classic \p cds::gc::HP, see @ref cds_garbage_collectors_comparison "Compare HP implementation"
592 See \ref cds_how_to_use "How to use" section for details how to apply SMR.
597 /// Native guarded pointer type
598 typedef void* guarded_pointer;
601 template <typename T> using atomic_ref = atomics::atomic<T *>;
605 @headerfile cds/gc/dhp.h
607 template <typename T> using atomic_type = atomics::atomic<T>;
609 /// Atomic marked pointer
610 template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
613 /// Dynamic Hazard Pointer guard
615 A guard is a hazard pointer.
616 Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer
618 \p %Guard object is movable but not copyable.
620 The guard object can be in two states:
621 - unlinked - the guard is not linked with any internal hazard pointer.
622 In this state no operation except \p link() and move assignment is supported.
623 - linked (default) - the guard allocates an internal hazard pointer and fully operable.
625 Due to performance reason the implementation does not check state of the guard in runtime.
627 @warning Move assignment can transfer the guard in unlinked state, use with care.
632 /// Default ctor allocates a guard (hazard pointer) from thread-private storage
634 : guard_( dhp::smr::tls()->hazards_.alloc() )
637 /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
638 explicit Guard( std::nullptr_t ) CDS_NOEXCEPT
642 /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership)
643 Guard( Guard&& src ) CDS_NOEXCEPT
644 : guard_( src.guard_ )
646 src.guard_ = nullptr;
649 /// Move assignment: the internal guards are swapped between \p src and \p this
651 @warning \p src will become in unlinked state if \p this was unlinked on entry.
653 Guard& operator=( Guard&& src ) CDS_NOEXCEPT
655 std::swap( guard_, src.guard_ );
659 /// Copy ctor is prohibited - the guard is not copyable
660 Guard( Guard const& ) = delete;
662 /// Copy assignment is prohibited
663 Guard& operator=( Guard const& ) = delete;
665 /// Frees the internal hazard pointer if the guard is in linked state
671 /// Checks if the guard object linked with any internal hazard pointer
672 bool is_linked() const
674 return guard_ != nullptr;
677 /// Links the guard with internal hazard pointer if the guard is in unlinked state
681 guard_ = dhp::smr::tls()->hazards_.alloc();
684 /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state
688 dhp::smr::tls()->hazards_.free( guard_ );
693 /// Protects a pointer of type <tt> atomic<T*> </tt>
695 Return the value of \p toGuard
697 The function tries to load \p toGuard and to store it
698 to the HP slot repeatedly until the guard's value equals \p toGuard
700 template <typename T>
701 T protect( atomics::atomic<T> const& toGuard )
703 assert( guard_ != nullptr );
705 T pCur = toGuard.load(atomics::memory_order_acquire);
708 pRet = assign( pCur );
709 pCur = toGuard.load(atomics::memory_order_acquire);
710 } while ( pRet != pCur );
714 /// Protects a converted pointer of type <tt> atomic<T*> </tt>
716 Return the value of \p toGuard
718 The function tries to load \p toGuard and to store result of \p f functor
719 to the HP slot repeatedly until the guard's value equals \p toGuard.
721 The function is useful for intrusive containers when \p toGuard is a node pointer
722 that should be converted to a pointer to the value type before guarding.
723 The parameter \p f of type Func is a functor that makes this conversion:
726 value_type * operator()( T * p );
729 Really, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
731 template <typename T, class Func>
732 T protect( atomics::atomic<T> const& toGuard, Func f )
734 assert( guard_ != nullptr );
736 T pCur = toGuard.load(atomics::memory_order_acquire);
741 pCur = toGuard.load(atomics::memory_order_acquire);
742 } while ( pRet != pCur );
746 /// Store \p p to the guard
748 The function is just an assignment, no loop is performed.
749 Can be used for a pointer that cannot be changed concurrently
750 or for already guarded pointer.
752 template <typename T>
755 assert( guard_ != nullptr );
758 dhp::smr::tls()->sync();
763 std::nullptr_t assign( std::nullptr_t )
765 assert( guard_ != nullptr );
772 /// Store marked pointer \p p to the guard
774 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
775 Can be used for a marked pointer that cannot be changed concurrently
776 or for already guarded pointer.
778 template <typename T, int BITMASK>
779 T* assign( cds::details::marked_ptr<T, BITMASK> p )
781 return assign( p.ptr());
784 /// Copy from \p src guard to \p this guard
785 void copy( Guard const& src )
787 assign( src.get_native());
790 /// Clears value of the guard
793 assert( guard_ != nullptr );
798 /// Gets the value currently protected (relaxed read)
799 template <typename T>
802 assert( guard_ != nullptr );
803 return guard_->get_as<T>();
806 /// Gets native guarded pointer stored
807 void* get_native() const
809 assert( guard_ != nullptr );
810 return guard_->get();
814 dhp::guard* release()
816 dhp::guard* g = guard_;
821 dhp::guard*& guard_ref()
833 /// Array of Dynamic Hazard Pointer guards
835 The class is intended for allocating an array of hazard pointer guards.
836 Template parameter \p Count defines the size of the array.
838 A \p %GuardArray object is not copy- and move-constructible
839 and not copy- and move-assignable.
841 template <size_t Count>
845 /// Rebind array for other size \p OtherCount
846 template <size_t OtherCount>
848 typedef GuardArray<OtherCount> other ; ///< rebinding result
852 static CDS_CONSTEXPR const size_t c_nCapacity = Count;
855 /// Default ctor allocates \p Count hazard pointers
858 dhp::smr::tls()->hazards_.alloc( guards_ );
861 /// Move ctor is prohibited
862 GuardArray( GuardArray&& ) = delete;
864 /// Move assignment is prohibited
865 GuardArray& operator=( GuardArray&& ) = delete;
867 /// Copy ctor is prohibited
868 GuardArray( GuardArray const& ) = delete;
870 /// Copy assignment is prohibited
871 GuardArray& operator=( GuardArray const& ) = delete;
873 /// Frees allocated hazard pointers
876 dhp::smr::tls()->hazards_.free( guards_ );
879 /// Protects a pointer of type \p atomic<T*>
881 Return the value of \p toGuard
883 The function tries to load \p toGuard and to store it
884 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
886 template <typename T>
887 T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
889 assert( nIndex < capacity() );
893 pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire));
894 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
899 /// Protects a pointer of type \p atomic<T*>
901 Return the value of \p toGuard
903 The function tries to load \p toGuard and to store it
904 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
906 The function is useful for intrusive containers when \p toGuard is a node pointer
907 that should be converted to a pointer to the value type before guarding.
908 The parameter \p f of type Func is a functor to make that conversion:
911 value_type * operator()( T * p );
914 Actually, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
916 template <typename T, class Func>
917 T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
919 assert( nIndex < capacity() );
923 assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire)));
924 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
929 /// Store \p p to the slot \p nIndex
931 The function is just an assignment, no loop is performed.
933 template <typename T>
934 T * assign( size_t nIndex, T * p )
936 assert( nIndex < capacity() );
938 guards_.set( nIndex, p );
939 dhp::smr::tls()->sync();
943 /// Store marked pointer \p p to the guard
945 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
946 Can be used for a marked pointer that cannot be changed concurrently
947 or for already guarded pointer.
949 template <typename T, int Bitmask>
950 T * assign( size_t nIndex, cds::details::marked_ptr<T, Bitmask> p )
952 return assign( nIndex, p.ptr());
955 /// Copy guarded value from \p src guard to slot at index \p nIndex
956 void copy( size_t nIndex, Guard const& src )
958 assign( nIndex, src.get_native());
961 /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex
962 void copy( size_t nDestIndex, size_t nSrcIndex )
964 assign( nDestIndex, get_native( nSrcIndex ));
967 /// Clear value of the slot \p nIndex
968 void clear( size_t nIndex )
970 guards_.clear( nIndex );
973 /// Get current value of slot \p nIndex
974 template <typename T>
975 T * get( size_t nIndex ) const
977 assert( nIndex < capacity() );
978 return guards_[nIndex]->template get_as<T>();
981 /// Get native guarded pointer stored
982 guarded_pointer get_native( size_t nIndex ) const
984 assert( nIndex < capacity() );
985 return guards_[nIndex]->get();
989 dhp::guard* release( size_t nIndex ) CDS_NOEXCEPT
991 return guards_.release( nIndex );
995 /// Capacity of the guard array
996 static CDS_CONSTEXPR size_t capacity()
1003 dhp::guard_array<c_nCapacity> guards_;
1009 A guarded pointer is a pair of a pointer and GC's guard.
1010 Usually, it is used for returning a pointer to the item from an lock-free container.
1011 The guard prevents the pointer to be early disposed (freed) by GC.
1012 After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time.
1015 - \p GuardedType - a type which the guard stores
1016 - \p ValueType - a value type
1017 - \p Cast - a functor for converting <tt>GuardedType*</tt> to <tt>ValueType*</tt>. Default is \p void (no casting).
1019 For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed.
1020 In such case the \p %guarded_ptr is:
1022 typedef cds::gc::DHP::guarded_ptr< foo > intrusive_guarded_ptr;
1025 For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed.
1033 struct value_accessor {
1034 std::string* operator()( foo* pFoo ) const
1036 return &(pFoo->value);
1041 typedef cds::gc::DHP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr;
1044 You don't need use this class directly.
1045 All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor.
1047 template <typename GuardedType, typename ValueType=GuardedType, typename Cast=void >
1051 struct trivial_cast {
1052 ValueType * operator()( GuardedType * p ) const
1058 template <typename GT, typename VT, typename C> friend class guarded_ptr;
1062 typedef GuardedType guarded_type; ///< Guarded type
1063 typedef ValueType value_type; ///< Value type
1065 /// Functor for casting \p guarded_type to \p value_type
1066 typedef typename std::conditional< std::is_same<Cast, void>::value, trivial_cast, Cast >::type value_cast;
1069 /// Creates empty guarded pointer
1070 guarded_ptr() CDS_NOEXCEPT
1075 explicit guarded_ptr( dhp::guard* g ) CDS_NOEXCEPT
1079 /// Initializes guarded pointer with \p p
1080 explicit guarded_ptr( guarded_type * p ) CDS_NOEXCEPT
1085 explicit guarded_ptr( std::nullptr_t ) CDS_NOEXCEPT
1091 guarded_ptr( guarded_ptr&& gp ) CDS_NOEXCEPT
1092 : guard_( gp.guard_ )
1094 gp.guard_ = nullptr;
1098 template <typename GT, typename VT, typename C>
1099 guarded_ptr( guarded_ptr<GT, VT, C>&& gp ) CDS_NOEXCEPT
1100 : guard_( gp.guard_ )
1102 gp.guard_ = nullptr;
1105 /// Ctor from \p Guard
1106 explicit guarded_ptr( Guard&& g ) CDS_NOEXCEPT
1107 : guard_( g.release())
1110 /// The guarded pointer is not copy-constructible
1111 guarded_ptr( guarded_ptr const& gp ) = delete;
1113 /// Clears the guarded pointer
1115 \ref release is called if guarded pointer is not \ref empty
1117 ~guarded_ptr() CDS_NOEXCEPT
1122 /// Move-assignment operator
1123 guarded_ptr& operator=( guarded_ptr&& gp ) CDS_NOEXCEPT
1125 std::swap( guard_, gp.guard_ );
1129 /// Move-assignment from \p Guard
1130 guarded_ptr& operator=( Guard&& g ) CDS_NOEXCEPT
1132 std::swap( guard_, g.guard_ref());
1136 /// The guarded pointer is not copy-assignable
1137 guarded_ptr& operator=(guarded_ptr const& gp) = delete;
1139 /// Returns a pointer to guarded value
1140 value_type * operator ->() const CDS_NOEXCEPT
1143 return value_cast()( guard_->get_as<guarded_type>() );
1146 /// Returns a reference to guarded value
1147 value_type& operator *() CDS_NOEXCEPT
1150 return *value_cast()( guard_->get_as<guarded_type>() );
1153 /// Returns const reference to guarded value
1154 value_type const& operator *() const CDS_NOEXCEPT
1157 return *value_cast()(reinterpret_cast<guarded_type *>(guard_->get()));
1160 /// Checks if the guarded pointer is \p nullptr
1161 bool empty() const CDS_NOEXCEPT
1163 return guard_ == nullptr || guard_->get( atomics::memory_order_relaxed ) == nullptr;
1166 /// \p bool operator returns <tt>!empty()</tt>
1167 explicit operator bool() const CDS_NOEXCEPT
1172 /// Clears guarded pointer
1174 If the guarded pointer has been released, the pointer can be disposed (freed) at any time.
1175 Dereferncing the guarded pointer after \p release() is dangerous.
1177 void release() CDS_NOEXCEPT
1183 // For internal use only!!!
1184 void reset(guarded_type * p) CDS_NOEXCEPT
1198 guard_ = dhp::smr::tls()->hazards_.alloc();
1204 dhp::smr::tls()->hazards_.free( guard_ );
1217 /// Initializes %DHP memory manager singleton
1219 Constructor creates and initializes %DHP global object.
1220 %DHP object should be created before using CDS data structure based on \p %cds::gc::DHP. Usually,
1221 it is created in the beginning of \p main() function.
1222 After creating of global object you may use CDS data structures based on \p %cds::gc::DHP.
1224 \p nInitialThreadGuardCount - initial count of guard allocated for each thread.
1225 When a thread is initialized the GC allocates local guard pool for the thread from a common guard pool.
1226 By perforce the local thread's guard pool is grown automatically from common pool.
1227 When the thread terminated its guard pool is backed to common GC's pool.
1230 size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread
1233 dhp::smr::construct( nInitialHazardPtrCount );
1236 /// Destroys %DHP memory manager
1238 The destructor destroys %DHP global object. After calling of this function you may \b NOT
1239 use CDS data structures based on \p %cds::gc::DHP.
1240 Usually, %DHP object is destroyed at the end of your \p main().
1244 dhp::GarbageCollector::destruct( true );
1247 /// Checks if count of hazard pointer is no less than \p nCountNeeded
1249 The function always returns \p true since the guard count is unlimited for
1250 \p %gc::DHP garbage collector.
1252 static CDS_CONSTEXPR bool check_available_guards(
1253 #ifdef CDS_DOXYGEN_INVOKED
1254 size_t nCountNeeded,
1263 /// Set memory management functions
1265 @note This function may be called <b>BEFORE</b> creating an instance
1266 of Dynamic Hazard Pointer SMR
1268 SMR object allocates some memory for thread-specific data and for creating SMR object.
1269 By default, a standard \p new and \p delete operators are used for this.
1271 static void set_memory_allocator(
1272 void* ( *alloc_func )( size_t size ), ///< \p malloc() function
1273 void( *free_func )( void * p ) ///< \p free() function
1276 dhp::smr::set_memory_allocator( alloc_func, free_func );
1279 /// Retire pointer \p p with function \p pFunc
1281 The function places pointer \p p to array of pointers ready for removing.
1282 (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1283 \p func is a disposer: when \p p can be safely removed, \p func is called.
1285 template <typename T>
1286 static void retire( T * p, void (* func)(T *))
1288 dhp::thread_data* rec = dhp::smr::tls();
1289 if ( !rec->retired_.push( dhp::retired_ptr( p, func ) ) )
1290 dhp::smr::instance().scan( rec );
1293 /// Retire pointer \p p with functor of type \p Disposer
1295 The function places pointer \p p to array of pointers ready for removing.
1296 (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1298 Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is:
1300 template <typename T>
1302 void operator()( T * p ) ; // disposing operator
1305 Since the functor call can happen at any time after \p retire() call, additional restrictions are imposed to \p Disposer type:
1306 - it should be stateless functor
1307 - it should be default-constructible
1308 - the result of functor call with argument \p p should not depend on where the functor will be called.
1311 Operator \p delete functor:
1313 template <typename T>
1315 void operator ()( T * p ) {
1320 // How to call HP::retire method
1323 // ... use p in lock-free manner
1325 cds::gc::DHP::retire<disposer>( p ) ; // place p to retired pointer array of DHP SMR
1328 Functor based on \p std::allocator :
1330 template <typename Alloc = std::allocator<int> >
1332 template <typename T>
1333 void operator()( T * p ) {
1334 typedef typename Alloc::templare rebind<T>::other alloc_t;
1337 a.deallocate( p, 1 );
1342 template <class Disposer, typename T>
1343 static void retire( T * p )
1345 if ( !dhp::smr::tls()->retired_.push( dhp::retired_ptr( p, cds::details::static_functor<Disposer, T>::call )))
1349 /// Checks if Dynamic Hazard Pointer GC is constructed and may be used
1350 static bool isUsed()
1352 return dhp::smr::isUsed();
1355 /// Forced GC cycle call for current thread
1357 Usually, this function should not be called directly.
1361 dhp::smr::instance().scan( dhp::smr::tls() );
1364 /// Synonym for \p scan()
1365 static void force_dispose()
1371 }} // namespace cds::gc
1373 #endif // #ifndef CDSLIB_GC_DHP_SMR_H