2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_GC_DHP_SMR_H
32 #define CDSLIB_GC_DHP_SMR_H
35 #include <cds/gc/details/hp_common.h>
36 #include <cds/intrusive/free_list_selector.h>
37 #include <cds/details/throw_exception.h>
38 #include <cds/details/static_functor.h>
39 #include <cds/details/marked_ptr.h>
40 #include <cds/user_setup/cache_line.h>
42 namespace cds { namespace gc {
44 using namespace cds::gc::hp::common;
46 /// Exception "Dynamic Hazard Pointer SMR is not initialized"
47 class not_initialized: public std::runtime_error
51 : std::runtime_error( "Global DHP SMR object is not initialized" )
55 struct guard_block: public cds::intrusive::FreeListImpl::node
57 guard_block* next_; // next block in the thread list
65 return reinterpret_cast<guard*>( this + 1 );
69 /// \p guard_block allocator (global object)
74 static hp_allocator& instance();
76 CDS_EXPORT_API guard_block* alloc();
77 void free( guard_block* block )
79 free_list_.put( block );
85 CDS_EXPORT_API ~hp_allocator();
88 cds::intrusive::FreeListImpl free_list_; ///< list of free \p guard_block
91 /// Per-thread hazard pointer storage
92 class thread_hp_storage
96 thread_hp_storage( guard* arr, size_t nSize ) CDS_NOEXCEPT
98 , extended_list_( nullptr )
100 , initial_capacity_( nSize )
103 new( arr ) guard[nSize];
106 thread_hp_storage() = delete;
107 thread_hp_storage( thread_hp_storage const& ) = delete;
108 thread_hp_storage( thread_hp_storage&& ) = delete;
117 if ( cds_unlikely( free_head_ == nullptr )) {
119 assert( free_head_ != nullptr );
122 guard* g = free_head_;
123 free_head_ = g->next_;
127 void free( guard* g ) CDS_NOEXCEPT
131 g->next_ = free_head_;
136 template< size_t Capacity>
137 size_t alloc( guard_array<Capacity>& arr )
139 for ( size_t i = 0; i < Capacity; ++i ) {
140 if ( cds_unlikely( free_head_ == nullptr ))
142 arr.reset( i, free_head_ );
143 free_head_ = free_head_->next_;
148 template <size_t Capacity>
149 void free( guard_array<Capacity>& arr ) CDS_NOEXCEPT
151 guard* gList = free_head_;
152 for ( size_t i = 0; i < Capacity; ++i ) {
166 for ( guard* cur = array_, *last = array_ + initial_capacity_; cur < last; ++cur )
169 // free all extended blocks
170 hp_allocator& alloc = hp_allocator::instance();
171 for ( guard_block* p = extended_list_; p; ) {
172 guard_block* next = p->next_;
177 extended_list_ = nullptr;
182 assert( extended_list_ == nullptr );
185 for ( guard* pEnd = p + initial_capacity_ - 1; p != pEnd; ++p )
194 assert( free_head_ == nullptr );
196 guard_block* block = hp_allocator::instance().alloc();
197 block->next_ = extended_list_;
198 extended_list_ = block;
199 free_head_ = block->first();
203 guard* free_head_; ///< Head of free guard list
204 guard_block* extended_list_; ///< Head of extended guard blocks allocated for the thread
205 guard* const array_; ///< initial HP array
206 size_t const initial_capacity_; ///< Capacity of \p array_
209 struct retired_block: public cds::intrusive::FreeListImpl::node
211 retired_block* next_; ///< Next block in thread-private retired array
213 static size_t const c_capacity = 256;
219 retired_ptr* first() const
221 return reinterpret_cast<retired_ptr*>( const_cast<retired_block*>( this ) + 1 );
224 retired_ptr* last() const
226 return first() + c_capacity;
230 class retired_allocator
234 static retired_allocator& instance();
236 CDS_EXPORT_API retired_block* alloc();
237 void free( retired_block* block )
239 block->next_ = nullptr;
240 free_list_.put( block );
246 CDS_EXPORT_API ~retired_allocator();
249 cds::intrusive::FreeListImpl free_list_; ///< list of free \p guard_block
252 /// Per-thread retired array
257 retired_array() CDS_NOEXCEPT
258 : current_block_( nullptr )
259 , current_cell_( nullptr )
260 , list_head_( nullptr )
261 , list_tail_( nullptr )
265 retired_array( retired_array const& ) = delete;
266 retired_array( retired_array&& ) = delete;
274 bool push( retired_ptr const& p ) CDS_NOEXCEPT
276 assert( current_block_ != nullptr );
277 assert( current_block_->first() <= current_cell_ );
278 assert( current_cell_ < current_block_->last() );
279 //assert( &p != current_cell_ );
282 if ( ++current_cell_ == current_block_->last() ) {
283 // goto next block if exists
284 if ( current_block_->next_ ) {
285 current_block_ = current_block_->next_;
286 current_cell_ = current_block_->first();
291 // smr::scan() extend retired_array if needed
298 bool safe_push( retired_ptr* p ) CDS_NOEXCEPT
300 bool ret = push( *p );
305 private: // called by smr
308 if ( list_head_ == nullptr ) {
309 retired_block* block = retired_allocator::instance().alloc();
310 assert( block->next_ == nullptr );
315 current_cell_ = block->first();
323 retired_allocator& alloc = retired_allocator::instance();
324 for ( retired_block* p = list_head_; p; ) {
325 retired_block* next = p->next_;
332 list_tail_ = nullptr;
333 current_cell_ = nullptr;
340 assert( list_head_ != nullptr );
341 assert( current_block_ == list_tail_ );
342 assert( current_cell_ == current_block_->last() );
344 retired_block* block = retired_allocator::instance().alloc();
345 assert( block->next_ == nullptr );
347 list_tail_ = list_tail_->next_ = block;
348 current_cell_ = block->first();
354 return current_block_ == nullptr
355 || ( current_block_ == list_head_ && current_cell_ == current_block_->first());
359 retired_block* current_block_;
360 retired_ptr* current_cell_; // in current_block_
362 retired_block* list_head_;
363 retired_block* list_tail_;
369 thread_hp_storage hazards_; ///< Hazard pointers private to the thread
370 retired_array retired_; ///< Retired data private to the thread
372 char pad1_[cds::c_nCacheLineSize];
373 atomics::atomic<unsigned int> sync_; ///< dummy var to introduce synchronizes-with relationship between threads
374 char pad2_[cds::c_nCacheLineSize];
376 // CppCheck warn: pad1_ and pad2_ is uninitialized in ctor
377 // cppcheck-suppress uninitMemberVar
378 thread_data( guard* guards, size_t guard_count )
379 : hazards_( guards, guard_count )
383 thread_data() = delete;
384 thread_data( thread_data const& ) = delete;
385 thread_data( thread_data&& ) = delete;
389 sync_.fetch_add( 1, atomics::memory_order_acq_rel );
393 // Hazard Pointer SMR (Safe Memory Reclamation)
396 struct thread_record;
399 /// Returns the instance of Hazard Pointer \ref smr
400 static smr& instance()
402 # ifdef CDS_DISABLE_SMR_EXCEPTION
403 assert( instance_ != nullptr );
406 CDS_THROW_EXCEPTION( not_initialized() );
411 /// Creates Dynamic Hazard Pointer SMR singleton
413 Dynamic Hazard Pointer SMR is a singleton. If DHP instance is not initialized then the function creates the instance.
414 Otherwise it does nothing.
416 The Michael's HP reclamation schema depends of three parameters:
417 - \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from
418 the data structure algorithms. By default, if \p nHazardPtrCount = 0,
419 the function uses maximum of HP count for CDS library
420 - \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100.
421 - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
422 <tt> nHazardPtrCount * nMaxThreadCount </tt>
423 Default is <tt>2 * nHazardPtrCount * nMaxThreadCount</tt>
425 static CDS_EXPORT_API void construct(
426 size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread
430 // for back-copatibility
431 static void Construct(
432 size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread
435 construct( nInitialHazardPtrCount );
439 /// Destroys global instance of \ref smr
441 The parameter \p bDetachAll should be used carefully: if its value is \p true,
442 then the object destroyed automatically detaches all attached threads. This feature
443 can be useful when you have no control over the thread termination, for example,
444 when \p libcds is injected into existing external thread.
446 static CDS_EXPORT_API void destruct(
447 bool bDetachAll = false ///< Detach all threads
451 // for back-copatibility
452 static void Destruct(
453 bool bDetachAll = false ///< Detach all threads
456 destruct( bDetachAll );
460 /// Checks if global SMR object is constructed and may be used
461 static bool isUsed() CDS_NOEXCEPT
463 return instance_ != nullptr;
466 /// Set memory management functions
468 @note This function may be called <b>BEFORE</b> creating an instance
469 of Dynamic Hazard Pointer SMR
471 SMR object allocates some memory for thread-specific data and for
473 By default, a standard \p new and \p delete operators are used for this.
475 static CDS_EXPORT_API void set_memory_allocator(
476 void* ( *alloc_func )( size_t size ),
477 void( *free_func )( void * p )
480 /// Returns thread-local data for the current thread
481 static CDS_EXPORT_API thread_data* tls();
483 static CDS_EXPORT_API void attach_thread();
484 static CDS_EXPORT_API void detach_thread();
486 public: // for internal use only
487 /// The main garbage collecting function
488 CDS_EXPORT_API void scan( thread_data* pRec );
490 /// Helper scan routine
492 The function guarantees that every node that is eligible for reuse is eventually freed, barring
493 thread failures. To do so, after executing \p scan(), a thread executes a \p %help_scan(),
494 where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers
495 to thread's list of reclaimed pointers.
497 The function is called internally by \p scan().
499 CDS_EXPORT_API void help_scan( thread_data* pThis );
501 hp_allocator& get_hp_allocator()
503 return hp_allocator_;
506 retired_allocator& get_retired_allocator()
508 return retired_allocator_;
512 CDS_EXPORT_API explicit smr(
513 size_t nInitialHazardPtrCount
516 CDS_EXPORT_API ~smr();
518 CDS_EXPORT_API void detach_all_thread();
522 CDS_EXPORT_API thread_record* create_thread_data();
523 static CDS_EXPORT_API void destroy_thread_data( thread_record* pRec );
525 /// Allocates Hazard Pointer SMR thread private data
526 CDS_EXPORT_API thread_record* alloc_thread_data();
528 /// Free HP SMR thread-private data
529 CDS_EXPORT_API void free_thread_data( thread_record* pRec );
533 static CDS_EXPORT_API smr* instance_;
535 atomics::atomic< thread_record*> thread_list_; ///< Head of thread list
536 size_t const initial_hazard_count_; ///< initial number of hazard pointers per thread
537 hp_allocator hp_allocator_;
538 retired_allocator retired_allocator_;
541 std::atomic<size_t> last_plist_size_; ///< HP array size in last scan() call
544 // for backward compatibility
545 typedef smr GarbageCollector;
549 inline hp_allocator& hp_allocator::instance()
551 return smr::instance().get_hp_allocator();
554 inline retired_allocator& retired_allocator::instance()
556 return smr::instance().get_retired_allocator();
562 /// Dynamic Hazard Pointer garbage collector
563 /** @ingroup cds_garbage_collector
564 @headerfile cds/gc/dhp.h
566 Implementation of Dynamic Hazard Pointer garbage collector.
569 - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
570 - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
571 - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
573 Dynamic Hazard Pointers SMR (safe memory reclamation) provides an unbounded number of hazard pointer per thread
574 despite of classic Hazard Pointer SMR in which the count of the hazard pointef per thread is limited.
576 See \ref cds_how_to_use "How to use" section for details how to apply garbage collector.
581 /// Native guarded pointer type
582 typedef void* guarded_pointer;
585 template <typename T> using atomic_ref = atomics::atomic<T *>;
589 @headerfile cds/gc/dhp.h
591 template <typename T> using atomic_type = atomics::atomic<T>;
593 /// Atomic marked pointer
594 template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
597 /// Dynamic Hazard Pointer guard
599 A guard is a hazard pointer.
600 Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer
602 \p %Guard object is movable but not copyable.
604 The guard object can be in two states:
605 - unlinked - the guard is not linked with any internal hazard pointer.
606 In this state no operation except \p link() and move assignment is supported.
607 - linked (default) - the guard allocates an internal hazard pointer and fully operable.
609 Due to performance reason the implementation does not check state of the guard in runtime.
611 @warning Move assignment can transfer the guard in unlinked state, use with care.
616 /// Default ctor allocates a guard (hazard pointer) from thread-private storage
618 : guard_( dhp::smr::tls()->hazards_.alloc() )
621 /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
622 explicit Guard( std::nullptr_t ) CDS_NOEXCEPT
626 /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership)
627 Guard( Guard&& src ) CDS_NOEXCEPT
628 : guard_( src.guard_ )
630 src.guard_ = nullptr;
633 /// Move assignment: the internal guards are swapped between \p src and \p this
635 @warning \p src will become in unlinked state if \p this was unlinked on entry.
637 Guard& operator=( Guard&& src ) CDS_NOEXCEPT
639 std::swap( guard_, src.guard_ );
643 /// Copy ctor is prohibited - the guard is not copyable
644 Guard( Guard const& ) = delete;
646 /// Copy assignment is prohibited
647 Guard& operator=( Guard const& ) = delete;
649 /// Frees the internal hazard pointer if the guard is in linked state
655 /// Checks if the guard object linked with any internal hazard pointer
656 bool is_linked() const
658 return guard_ != nullptr;
661 /// Links the guard with internal hazard pointer if the guard is in unlinked state
665 guard_ = dhp::smr::tls()->hazards_.alloc();
668 /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state
672 dhp::smr::tls()->hazards_.free( guard_ );
677 /// Protects a pointer of type <tt> atomic<T*> </tt>
679 Return the value of \p toGuard
681 The function tries to load \p toGuard and to store it
682 to the HP slot repeatedly until the guard's value equals \p toGuard
684 template <typename T>
685 T protect( atomics::atomic<T> const& toGuard )
687 assert( guard_ != nullptr );
689 T pCur = toGuard.load(atomics::memory_order_acquire);
692 pRet = assign( pCur );
693 pCur = toGuard.load(atomics::memory_order_acquire);
694 } while ( pRet != pCur );
698 /// Protects a converted pointer of type <tt> atomic<T*> </tt>
700 Return the value of \p toGuard
702 The function tries to load \p toGuard and to store result of \p f functor
703 to the HP slot repeatedly until the guard's value equals \p toGuard.
705 The function is useful for intrusive containers when \p toGuard is a node pointer
706 that should be converted to a pointer to the value type before guarding.
707 The parameter \p f of type Func is a functor that makes this conversion:
710 value_type * operator()( T * p );
713 Really, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
715 template <typename T, class Func>
716 T protect( atomics::atomic<T> const& toGuard, Func f )
718 assert( guard_ != nullptr );
720 T pCur = toGuard.load(atomics::memory_order_acquire);
725 pCur = toGuard.load(atomics::memory_order_acquire);
726 } while ( pRet != pCur );
730 /// Store \p p to the guard
732 The function is just an assignment, no loop is performed.
733 Can be used for a pointer that cannot be changed concurrently
734 or for already guarded pointer.
736 template <typename T>
739 assert( guard_ != nullptr );
742 dhp::smr::tls()->sync();
747 std::nullptr_t assign( std::nullptr_t )
749 assert( guard_ != nullptr );
756 /// Store marked pointer \p p to the guard
758 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
759 Can be used for a marked pointer that cannot be changed concurrently
760 or for already guarded pointer.
762 template <typename T, int BITMASK>
763 T* assign( cds::details::marked_ptr<T, BITMASK> p )
765 return assign( p.ptr());
768 /// Copy from \p src guard to \p this guard
769 void copy( Guard const& src )
771 assign( src.get_native());
774 /// Clears value of the guard
777 assert( guard_ != nullptr );
782 /// Gets the value currently protected (relaxed read)
783 template <typename T>
786 assert( guard_ != nullptr );
787 return guard_->get_as<T>();
790 /// Gets native guarded pointer stored
791 void* get_native() const
793 assert( guard_ != nullptr );
794 return guard_->get();
798 dhp::guard* release()
800 dhp::guard* g = guard_;
805 dhp::guard*& guard_ref()
817 /// Array of Dynamic Hazard Pointer guards
819 The class is intended for allocating an array of hazard pointer guards.
820 Template parameter \p Count defines the size of the array.
822 A \p %GuardArray object is not copy- and move-constructible
823 and not copy- and move-assignable.
825 template <size_t Count>
829 /// Rebind array for other size \p OtherCount
830 template <size_t OtherCount>
832 typedef GuardArray<OtherCount> other ; ///< rebinding result
836 static CDS_CONSTEXPR const size_t c_nCapacity = Count;
839 /// Default ctor allocates \p Count hazard pointers
842 dhp::smr::tls()->hazards_.alloc( guards_ );
845 /// Move ctor is prohibited
846 GuardArray( GuardArray&& ) = delete;
848 /// Move assignment is prohibited
849 GuardArray& operator=( GuardArray&& ) = delete;
851 /// Copy ctor is prohibited
852 GuardArray( GuardArray const& ) = delete;
854 /// Copy assignment is prohibited
855 GuardArray& operator=( GuardArray const& ) = delete;
857 /// Frees allocated hazard pointers
860 dhp::smr::tls()->hazards_.free( guards_ );
863 /// Protects a pointer of type \p atomic<T*>
865 Return the value of \p toGuard
867 The function tries to load \p toGuard and to store it
868 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
870 template <typename T>
871 T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
873 assert( nIndex < capacity() );
877 pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire));
878 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
883 /// Protects a pointer of type \p atomic<T*>
885 Return the value of \p toGuard
887 The function tries to load \p toGuard and to store it
888 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
890 The function is useful for intrusive containers when \p toGuard is a node pointer
891 that should be converted to a pointer to the value type before guarding.
892 The parameter \p f of type Func is a functor to make that conversion:
895 value_type * operator()( T * p );
898 Actually, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
900 template <typename T, class Func>
901 T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
903 assert( nIndex < capacity() );
907 assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire)));
908 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
913 /// Store \p p to the slot \p nIndex
915 The function is just an assignment, no loop is performed.
917 template <typename T>
918 T * assign( size_t nIndex, T * p )
920 assert( nIndex < capacity() );
922 guards_.set( nIndex, p );
923 dhp::smr::tls()->sync();
927 /// Store marked pointer \p p to the guard
929 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
930 Can be used for a marked pointer that cannot be changed concurrently
931 or for already guarded pointer.
933 template <typename T, int Bitmask>
934 T * assign( size_t nIndex, cds::details::marked_ptr<T, Bitmask> p )
936 return assign( nIndex, p.ptr());
939 /// Copy guarded value from \p src guard to slot at index \p nIndex
940 void copy( size_t nIndex, Guard const& src )
942 assign( nIndex, src.get_native());
945 /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex
946 void copy( size_t nDestIndex, size_t nSrcIndex )
948 assign( nDestIndex, get_native( nSrcIndex ));
951 /// Clear value of the slot \p nIndex
952 void clear( size_t nIndex )
954 guards_.clear( nIndex );
957 /// Get current value of slot \p nIndex
958 template <typename T>
959 T * get( size_t nIndex ) const
961 assert( nIndex < capacity() );
962 return guards_[nIndex]->template get_as<T>();
965 /// Get native guarded pointer stored
966 guarded_pointer get_native( size_t nIndex ) const
968 assert( nIndex < capacity() );
969 return guards_[nIndex]->get();
973 dhp::guard* release( size_t nIndex ) CDS_NOEXCEPT
975 return guards_.release( nIndex );
979 /// Capacity of the guard array
980 static CDS_CONSTEXPR size_t capacity()
987 dhp::guard_array<c_nCapacity> guards_;
993 A guarded pointer is a pair of a pointer and GC's guard.
994 Usually, it is used for returning a pointer to the item from an lock-free container.
995 The guard prevents the pointer to be early disposed (freed) by GC.
996 After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time.
999 - \p GuardedType - a type which the guard stores
1000 - \p ValueType - a value type
1001 - \p Cast - a functor for converting <tt>GuardedType*</tt> to <tt>ValueType*</tt>. Default is \p void (no casting).
1003 For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed.
1004 In such case the \p %guarded_ptr is:
1006 typedef cds::gc::DHP::guarded_ptr< foo > intrusive_guarded_ptr;
1009 For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed.
1017 struct value_accessor {
1018 std::string* operator()( foo* pFoo ) const
1020 return &(pFoo->value);
1025 typedef cds::gc::DHP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr;
1028 You don't need use this class directly.
1029 All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor.
1031 template <typename GuardedType, typename ValueType=GuardedType, typename Cast=void >
1035 struct trivial_cast {
1036 ValueType * operator()( GuardedType * p ) const
1042 template <typename GT, typename VT, typename C> friend class guarded_ptr;
1046 typedef GuardedType guarded_type; ///< Guarded type
1047 typedef ValueType value_type; ///< Value type
1049 /// Functor for casting \p guarded_type to \p value_type
1050 typedef typename std::conditional< std::is_same<Cast, void>::value, trivial_cast, Cast >::type value_cast;
1053 /// Creates empty guarded pointer
1054 guarded_ptr() CDS_NOEXCEPT
1059 explicit guarded_ptr( dhp::guard* g ) CDS_NOEXCEPT
1063 /// Initializes guarded pointer with \p p
1064 explicit guarded_ptr( guarded_type * p ) CDS_NOEXCEPT
1069 explicit guarded_ptr( std::nullptr_t ) CDS_NOEXCEPT
1075 guarded_ptr( guarded_ptr&& gp ) CDS_NOEXCEPT
1076 : guard_( gp.guard_ )
1078 gp.guard_ = nullptr;
1082 template <typename GT, typename VT, typename C>
1083 guarded_ptr( guarded_ptr<GT, VT, C>&& gp ) CDS_NOEXCEPT
1084 : guard_( gp.guard_ )
1086 gp.guard_ = nullptr;
1089 /// Ctor from \p Guard
1090 explicit guarded_ptr( Guard&& g ) CDS_NOEXCEPT
1091 : guard_( g.release())
1094 /// The guarded pointer is not copy-constructible
1095 guarded_ptr( guarded_ptr const& gp ) = delete;
1097 /// Clears the guarded pointer
1099 \ref release is called if guarded pointer is not \ref empty
1101 ~guarded_ptr() CDS_NOEXCEPT
1106 /// Move-assignment operator
1107 guarded_ptr& operator=( guarded_ptr&& gp ) CDS_NOEXCEPT
1109 std::swap( guard_, gp.guard_ );
1113 /// Move-assignment from \p Guard
1114 guarded_ptr& operator=( Guard&& g ) CDS_NOEXCEPT
1116 std::swap( guard_, g.guard_ref());
1120 /// The guarded pointer is not copy-assignable
1121 guarded_ptr& operator=(guarded_ptr const& gp) = delete;
1123 /// Returns a pointer to guarded value
1124 value_type * operator ->() const CDS_NOEXCEPT
1127 return value_cast()( guard_->get_as<guarded_type>() );
1130 /// Returns a reference to guarded value
1131 value_type& operator *() CDS_NOEXCEPT
1134 return *value_cast()( guard_->get_as<guarded_type>() );
1137 /// Returns const reference to guarded value
1138 value_type const& operator *() const CDS_NOEXCEPT
1141 return *value_cast()(reinterpret_cast<guarded_type *>(guard_->get()));
1144 /// Checks if the guarded pointer is \p nullptr
1145 bool empty() const CDS_NOEXCEPT
1147 return guard_ == nullptr || guard_->get( atomics::memory_order_relaxed ) == nullptr;
1150 /// \p bool operator returns <tt>!empty()</tt>
1151 explicit operator bool() const CDS_NOEXCEPT
1156 /// Clears guarded pointer
1158 If the guarded pointer has been released, the pointer can be disposed (freed) at any time.
1159 Dereferncing the guarded pointer after \p release() is dangerous.
1161 void release() CDS_NOEXCEPT
1167 // For internal use only!!!
1168 void reset(guarded_type * p) CDS_NOEXCEPT
1182 guard_ = dhp::smr::tls()->hazards_.alloc();
1188 dhp::smr::tls()->hazards_.free( guard_ );
1201 /// Initializes %DHP memory manager singleton
1203 Constructor creates and initializes %DHP global object.
1204 %DHP object should be created before using CDS data structure based on \p %cds::gc::DHP. Usually,
1205 it is created in the beginning of \p main() function.
1206 After creating of global object you may use CDS data structures based on \p %cds::gc::DHP.
1208 \p nInitialThreadGuardCount - initial count of guard allocated for each thread.
1209 When a thread is initialized the GC allocates local guard pool for the thread from a common guard pool.
1210 By perforce the local thread's guard pool is grown automatically from common pool.
1211 When the thread terminated its guard pool is backed to common GC's pool.
1214 size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread
1217 dhp::smr::construct( nInitialHazardPtrCount );
1220 /// Destroys %DHP memory manager
1222 The destructor destroys %DHP global object. After calling of this function you may \b NOT
1223 use CDS data structures based on \p %cds::gc::DHP.
1224 Usually, %DHP object is destroyed at the end of your \p main().
1228 dhp::GarbageCollector::destruct( true );
1231 /// Checks if count of hazard pointer is no less than \p nCountNeeded
1233 The function always returns \p true since the guard count is unlimited for
1234 \p %gc::DHP garbage collector.
1236 static CDS_CONSTEXPR bool check_available_guards(
1237 #ifdef CDS_DOXYGEN_INVOKED
1238 size_t nCountNeeded,
1247 /// Set memory management functions
1249 @note This function may be called <b>BEFORE</b> creating an instance
1250 of Dynamic Hazard Pointer SMR
1252 SMR object allocates some memory for thread-specific data and for
1253 creating SMR object.
1254 By default, a standard \p new and \p delete operators are used for this.
1256 static void set_memory_allocator(
1257 void* ( *alloc_func )( size_t size ), ///< \p malloc() function
1258 void( *free_func )( void * p ) ///< \p free() function
1261 dhp::smr::set_memory_allocator( alloc_func, free_func );
1264 /// Retire pointer \p p with function \p pFunc
1266 The function places pointer \p p to array of pointers ready for removing.
1267 (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1268 \p func is a disposer: when \p p can be safely removed, \p func is called.
1270 template <typename T>
1271 static void retire( T * p, void (* func)(T *))
1273 dhp::thread_data* rec = dhp::smr::tls();
1274 if ( !rec->retired_.push( dhp::retired_ptr( p, func ) ) )
1275 dhp::smr::instance().scan( rec );
1278 /// Retire pointer \p p with functor of type \p Disposer
1280 The function places pointer \p p to array of pointers ready for removing.
1281 (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1283 Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is:
1285 template <typename T>
1287 void operator()( T * p ) ; // disposing operator
1290 Since the functor call can happen at any time after \p retire() call, additional restrictions are imposed to \p Disposer type:
1291 - it should be stateless functor
1292 - it should be default-constructible
1293 - the result of functor call with argument \p p should not depend on where the functor will be called.
1296 Operator \p delete functor:
1298 template <typename T>
1300 void operator ()( T * p ) {
1305 // How to call HP::retire method
1308 // ... use p in lock-free manner
1310 cds::gc::DHP::retire<disposer>( p ) ; // place p to retired pointer array of DHP SMR
1313 Functor based on \p std::allocator :
1315 template <typename Alloc = std::allocator<int> >
1317 template <typename T>
1318 void operator()( T * p ) {
1319 typedef typename Alloc::templare rebind<T>::other alloc_t;
1322 a.deallocate( p, 1 );
1327 template <class Disposer, typename T>
1328 static void retire( T * p )
1330 if ( !dhp::smr::tls()->retired_.push( dhp::retired_ptr( p, cds::details::static_functor<Disposer, T>::call )))
1334 /// Checks if Dynamic Hazard Pointer GC is constructed and may be used
1335 static bool isUsed()
1337 return dhp::smr::isUsed();
1340 /// Forced GC cycle call for current thread
1342 Usually, this function should not be called directly.
1346 dhp::smr::instance().scan( dhp::smr::tls() );
1349 /// Synonym for \p scan()
1350 static void force_dispose()
1356 }} // namespace cds::gc
1358 #endif // #ifndef CDSLIB_GC_DHP_SMR_H