2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_GC_HP_SMR_H
32 #define CDSLIB_GC_HP_SMR_H
35 #include <cds/gc/details/hp_common.h>
36 #include <cds/details/throw_exception.h>
37 #include <cds/details/static_functor.h>
38 #include <cds/details/marked_ptr.h>
39 #include <cds/user_setup/cache_line.h>
41 namespace cds { namespace gc {
43 using namespace cds::gc::hp::common;
45 /// Exception "Not enough Hazard Pointer"
46 class not_enought_hazard_ptr: public std::length_error
50 not_enought_hazard_ptr()
51 : std::length_error( "Not enough Hazard Pointer" )
56 /// Exception "Hazard Pointer SMR is not initialized"
57 class not_initialized: public std::runtime_error
61 : std::runtime_error( "Global Hazard Pointer SMR object is not initialized" )
65 /// Per-thread hazard pointer storage
66 class thread_hp_storage {
68 thread_hp_storage( guard* arr, size_t nSize ) CDS_NOEXCEPT
72 # ifdef CDS_ENABLE_HPSTAT
73 , alloc_guard_count_(0)
74 , free_guard_count_(0)
77 for ( guard* pEnd = arr + nSize - 1; arr < pEnd; ++arr )
82 thread_hp_storage() = delete;
83 thread_hp_storage( thread_hp_storage const& ) = delete;
84 thread_hp_storage( thread_hp_storage&& ) = delete;
86 size_t capacity() const CDS_NOEXCEPT
91 bool full() const CDS_NOEXCEPT
93 return free_head_ == nullptr;
98 # ifdef CDS_DISABLE_SMR_EXCEPTION
102 CDS_THROW_EXCEPTION( not_enought_hazard_ptr());
104 guard* g = free_head_;
105 free_head_ = g->next_;
106 CDS_HPSTAT( ++alloc_guard_count_ );
110 void free( guard* g ) CDS_NOEXCEPT
112 assert( g >= array_ && g < array_ + capacity() );
116 g->next_ = free_head_;
118 CDS_HPSTAT( ++free_guard_count_ );
122 template< size_t Capacity>
123 size_t alloc( guard_array<Capacity>& arr )
126 guard* g = free_head_;
127 for ( i = 0; i < Capacity && g; ++i ) {
132 # ifdef CDS_DISABLE_SMR_EXCEPTION
133 assert( i == Capacity );
136 CDS_THROW_EXCEPTION( not_enought_hazard_ptr());
139 CDS_HPSTAT( alloc_guard_count_ += Capacity );
143 template <size_t Capacity>
144 void free( guard_array<Capacity>& arr ) CDS_NOEXCEPT
146 guard* gList = free_head_;
147 for ( size_t i = 0; i < Capacity; ++i ) {
153 CDS_HPSTAT( ++free_guard_count_ );
161 for ( guard* cur = array_, *last = array_ + capacity(); cur < last; ++cur )
165 guard& operator[]( size_t idx )
167 assert( idx < capacity() );
172 static size_t calc_array_size( size_t capacity )
174 return sizeof( guard ) * capacity;
178 guard* free_head_; ///< Head of free guard list
179 guard* const array_; ///< HP array
180 size_t const capacity_; ///< HP array capacity
181 # ifdef CDS_ENABLE_HPSTAT
183 size_t alloc_guard_count_;
184 size_t free_guard_count_;
188 /// Per-thread retired array
192 retired_array( retired_ptr* arr, size_t capacity ) CDS_NOEXCEPT
194 , last_( arr + capacity )
196 # ifdef CDS_ENABLE_HPSTAT
197 , retire_call_count_(0)
201 retired_array() = delete;
202 retired_array( retired_array const& ) = delete;
203 retired_array( retired_array&& ) = delete;
205 size_t capacity() const CDS_NOEXCEPT
207 return last_ - retired_;
210 size_t size() const CDS_NOEXCEPT
212 return current_ - retired_;
215 bool push( retired_ptr&& p ) CDS_NOEXCEPT
218 CDS_HPSTAT( ++retire_call_count_ );
219 return ++current_ < last_;
222 retired_ptr* first() const CDS_NOEXCEPT
227 retired_ptr* last() const CDS_NOEXCEPT
232 void reset( size_t nSize ) CDS_NOEXCEPT
234 current_ = first() + nSize;
237 bool full() const CDS_NOEXCEPT
239 return current_ == last_;
242 static size_t calc_array_size( size_t capacity )
244 return sizeof( retired_ptr ) * capacity;
248 retired_ptr* current_;
249 retired_ptr* const last_;
250 retired_ptr* const retired_;
251 # ifdef CDS_ENABLE_HPSTAT
253 size_t retire_call_count_;
258 /// Internal statistics
260 size_t guard_allocated; ///< Count of allocated HP guards
261 size_t guard_freed; ///< Count of freed HP guards
262 size_t retired_count; ///< Count of retired pointers
263 size_t free_count; ///< Count of free pointers
264 size_t scan_count; ///< Count of \p scan() call
265 size_t help_scan_count; ///< Count of \p help_scan() call
267 size_t thread_rec_count; ///< Count of thread records
282 thread_rec_count = 0;
288 thread_hp_storage hazards_; ///< Hazard pointers private to the thread
289 retired_array retired_; ///< Retired data private to the thread
291 stat stat_; ///< Internal statistics for the thread
293 char pad1_[cds::c_nCacheLineSize];
294 atomics::atomic<unsigned int> sync_; ///< dummy var to introduce synchronizes-with relationship between threads
295 char pad2_[cds::c_nCacheLineSize];
297 thread_data( guard* guards, size_t guard_count, retired_ptr* retired_arr, size_t retired_capacity )
298 : hazards_( guards, guard_count )
299 , retired_( retired_arr, retired_capacity )
303 thread_data() = delete;
304 thread_data( thread_data const& ) = delete;
305 thread_data( thread_data&& ) = delete;
309 sync_.fetch_add( 1, atomics::memory_order_acq_rel );
313 /// smr::scan() strategy
315 classic, ///< classic scan as described in Michael's works (see smr::classic_scan() )
316 inplace ///< inplace scan without allocation (see smr::inplace_scan() )
319 // Hazard Pointer SMR (Safe Memory Reclamation)
322 struct thread_record;
325 /// Returns the instance of Hazard Pointer \ref smr
326 static smr& instance()
328 # ifdef CDS_DISABLE_SMR_EXCEPTION
329 assert( instance_ != nullptr );
332 CDS_THROW_EXCEPTION( not_initialized());
337 /// Creates Hazard Pointer SMR singleton
339 Hazard Pointer SMR is a singleton. If HP instance is not initialized then the function creates the instance.
340 Otherwise it does nothing.
342 The Michael's HP reclamation schema depends of three parameters:
343 - \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from
344 the data structure algorithms. By default, if \p nHazardPtrCount = 0,
345 the function uses maximum of HP count for CDS library
346 - \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100.
347 - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
348 <tt> nHazardPtrCount * nMaxThreadCount </tt>
349 Default is <tt>2 * nHazardPtrCount * nMaxThreadCount</tt>
351 static CDS_EXPORT_API void construct(
352 size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread
353 size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application
354 size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread
355 scan_type nScanType = inplace ///< Scan type (see \ref scan_type enum)
359 // for back-copatibility
360 static void Construct(
361 size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread
362 size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application
363 size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread
364 scan_type nScanType = inplace ///< Scan type (see \ref scan_type enum)
367 construct( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType );
372 /// Destroys global instance of \ref smr
374 The parameter \p bDetachAll should be used carefully: if its value is \p true,
375 then the object destroyed automatically detaches all attached threads. This feature
376 can be useful when you have no control over the thread termination, for example,
377 when \p libcds is injected into existing external thread.
379 static CDS_EXPORT_API void destruct(
380 bool bDetachAll = false ///< Detach all threads
384 // for back-copatibility
385 static void Destruct(
386 bool bDetachAll = false ///< Detach all threads
389 destruct( bDetachAll );
393 /// Checks if global SMR object is constructed and may be used
394 static bool isUsed() CDS_NOEXCEPT
396 return instance_ != nullptr;
399 /// Set memory management functions
401 @note This function may be called <b>BEFORE</b> creating an instance
402 of Hazard Pointer SMR
404 SMR object allocates some memory for thread-specific data and for
406 By default, a standard \p new and \p delete operators are used for this.
408 static CDS_EXPORT_API void set_memory_allocator(
409 void* ( *alloc_func )( size_t size ),
410 void (*free_func )( void * p )
413 /// Returns max Hazard Pointer count per thread
414 size_t get_hazard_ptr_count() const CDS_NOEXCEPT
416 return hazard_ptr_count_;
419 /// Returns max thread count
420 size_t get_max_thread_count() const CDS_NOEXCEPT
422 return max_thread_count_;
425 /// Returns max size of retired objects array
426 size_t get_max_retired_ptr_count() const CDS_NOEXCEPT
428 return max_retired_ptr_count_;
431 /// Get current scan strategy
432 scan_type get_scan_type() const
437 /// Checks that required hazard pointer count \p nRequiredCount is less or equal then max hazard pointer count
439 If <tt> nRequiredCount > get_hazard_ptr_count()</tt> then the exception \p not_enought_hazard_ptr is thrown
441 static void check_hazard_ptr_count( size_t nRequiredCount )
443 if ( instance().get_hazard_ptr_count() < nRequiredCount ) {
444 # ifdef CDS_DISABLE_SMR_EXCEPTION
445 assert( false ); // not enough hazard ptr
447 CDS_THROW_EXCEPTION( not_enought_hazard_ptr() );
452 /// Returns thread-local data for the current thread
453 static CDS_EXPORT_API thread_data* tls();
455 static CDS_EXPORT_API void attach_thread();
456 static CDS_EXPORT_API void detach_thread();
458 /// Get internal statistics
459 void statistics( stat& st );
461 public: // for internal use only
462 /// The main garbage collecting function
464 This function is called internally when upper bound of thread's list of reclaimed pointers
467 There are the following scan algorithm:
468 - \ref hzp_gc_classic_scan "classic_scan" allocates memory for internal use
469 - \ref hzp_gc_inplace_scan "inplace_scan" does not allocate any memory
471 Use \p set_scan_type() member function to setup appropriate scan algorithm.
473 void scan( thread_data* pRec )
475 ( this->*scan_func_ )( pRec );
478 /// Helper scan routine
480 The function guarantees that every node that is eligible for reuse is eventually freed, barring
481 thread failures. To do so, after executing \p scan(), a thread executes a \p %help_scan(),
482 where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers
483 to thread's list of reclaimed pointers.
485 The function is called internally by \p scan().
487 CDS_EXPORT_API void help_scan( thread_data* pThis );
491 size_t nHazardPtrCount, ///< Hazard pointer count per thread
492 size_t nMaxThreadCount, ///< Max count of simultaneous working thread in your application
493 size_t nMaxRetiredPtrCount, ///< Capacity of the array of retired objects for the thread
494 scan_type nScanType ///< Scan type (see \ref scan_type enum)
497 CDS_EXPORT_API ~smr();
499 CDS_EXPORT_API void detach_all_thread();
501 /// Classic scan algorithm
502 /** @anchor hzp_gc_classic_scan
503 Classical scan algorithm as described in Michael's paper.
505 A scan includes four stages. The first stage involves scanning the array HP for non-null values.
506 Whenever a non-null value is encountered, it is inserted in a local list of currently protected pointer.
507 Only stage 1 accesses shared variables. The following stages operate only on private variables.
509 The second stage of a scan involves sorting local list of protected pointers to allow
510 binary search in the third stage.
512 The third stage of a scan involves checking each reclaimed node
513 against the pointers in local list of protected pointers. If the binary search yields
514 no match, the node is freed. Otherwise, it cannot be deleted now and must kept in thread's list
515 of reclaimed pointers.
517 The forth stage prepares new thread's private list of reclaimed pointers
518 that could not be freed during the current scan, where they remain until the next scan.
520 This algorithm allocates memory for internal HP array.
522 This function is called internally by ThreadGC object when upper bound of thread's list of reclaimed pointers
525 CDS_EXPORT_API void classic_scan( thread_data* pRec );
527 /// In-place scan algorithm
528 /** @anchor hzp_gc_inplace_scan
529 Unlike the \p classic_scan() algorithm, \p %inplace_scan() does not allocate any memory.
530 All operations are performed in-place.
532 CDS_EXPORT_API void inplace_scan( thread_data* pRec );
536 CDS_EXPORT_API thread_record* create_thread_data();
537 CDS_EXPORT_API void destroy_thread_data( thread_record* pRec );
539 /// Allocates Hazard Pointer SMR thread private data
540 CDS_EXPORT_API thread_record* alloc_thread_data();
542 /// Free HP SMR thread-private data
543 CDS_EXPORT_API void free_thread_data( thread_record* pRec );
548 static CDS_EXPORT_API smr* instance_;
550 atomics::atomic< thread_record*> thread_list_; ///< Head of thread list
552 size_t const hazard_ptr_count_; ///< max count of thread's hazard pointer
553 size_t const max_thread_count_; ///< max count of thread
554 size_t const max_retired_ptr_count_; ///< max count of retired ptr per thread
555 scan_type const scan_type_; ///< scan type (see \ref scan_type enum)
556 void ( smr::*scan_func_ )( thread_data* pRec );
559 // for backward compatibility
560 typedef smr GarbageCollector;
565 /// @defgroup cds_garbage_collector Garbage collectors
567 /// Hazard Pointer SMR (Safe Memory Reclamation)
568 /** @ingroup cds_garbage_collector
570 Implementation of classic Hazard Pointer garbage collector.
573 - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
574 - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
575 - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
577 Hazard Pointer garbage collector is a singleton. The main user-level part of Hazard Pointer schema is
578 \p %cds::gc::HP class and its nested classes. Before use any HP-related class you must initialize HP
579 by contructing \p %cds::gc::HP object in beginning of your \p main().
580 See \ref cds_how_to_use "How to use" section for details how to apply SMR schema.
585 /// Native guarded pointer type
586 typedef hp::hazard_ptr guarded_pointer;
589 template <typename T> using atomic_ref = atomics::atomic<T *>;
591 /// Atomic marked pointer
592 template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
595 template <typename T> using atomic_type = atomics::atomic<T>;
597 /// Exception "Not enough Hazard Pointer"
598 typedef hp::not_enought_hazard_ptr not_enought_hazard_ptr_exception;
600 /// Internal statistics
601 typedef hp::stat stat;
603 /// Hazard Pointer guard
605 A guard is a hazard pointer.
606 Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer.
608 \p %Guard object is movable but not copyable.
610 The guard object can be in two states:
611 - unlinked - the guard is not linked with any internal hazard pointer.
612 In this state no operation except \p link() and move assignment is supported.
613 - linked (default) - the guard allocates an internal hazard pointer and completely operable.
615 Due to performance reason the implementation does not check state of the guard in runtime.
617 @warning Move assignment transfers the guard in unlinked state, use with care.
622 /// Default ctor allocates a guard (hazard pointer) from thread-private storage
624 @warning Can throw \p too_many_hazard_ptr_exception if internal hazard pointer objects are exhausted.
627 : guard_( hp::smr::tls()->hazards_.alloc() )
630 /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
631 explicit Guard( std::nullptr_t ) CDS_NOEXCEPT
635 /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership)
636 Guard( Guard&& src ) CDS_NOEXCEPT
637 : guard_( src.guard_ )
639 src.guard_ = nullptr;
642 /// Move assignment: the internal guards are swapped between \p src and \p this
644 @warning \p src will become in unlinked state if \p this was unlinked on entry.
646 Guard& operator=( Guard&& src ) CDS_NOEXCEPT
648 std::swap( guard_, src.guard_ );
652 /// Copy ctor is prohibited - the guard is not copyable
653 Guard( Guard const& ) = delete;
655 /// Copy assignment is prohibited
656 Guard& operator=( Guard const& ) = delete;
658 /// Frees the internal hazard pointer if the guard is in linked state
664 /// Checks if the guard object linked with any internal hazard pointer
665 bool is_linked() const
667 return guard_ != nullptr;
670 /// Links the guard with internal hazard pointer if the guard is in unlinked state
672 @warning Can throw \p not_enought_hazard_ptr_exception if internal hazard pointer array is exhausted.
677 guard_ = hp::smr::tls()->hazards_.alloc();
680 /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state
684 hp::smr::tls()->hazards_.free( guard_ );
689 /// Protects a pointer of type \p atomic<T*>
691 Return the value of \p toGuard
693 The function tries to load \p toGuard and to store it
694 to the HP slot repeatedly until the guard's value equals \p toGuard
696 @warning The guad object should be in linked state, otherwise the result is undefined
698 template <typename T>
699 T protect( atomics::atomic<T> const& toGuard )
701 assert( guard_ != nullptr );
703 T pCur = toGuard.load(atomics::memory_order_acquire);
706 pRet = assign( pCur );
707 pCur = toGuard.load(atomics::memory_order_acquire);
708 } while ( pRet != pCur );
712 /// Protects a converted pointer of type \p atomic<T*>
714 Return the value of \p toGuard
716 The function tries to load \p toGuard and to store result of \p f functor
717 to the HP slot repeatedly until the guard's value equals \p toGuard.
719 The function is useful for intrusive containers when \p toGuard is a node pointer
720 that should be converted to a pointer to the value before protecting.
721 The parameter \p f of type Func is a functor that makes this conversion:
724 value_type * operator()( T * p );
727 Actually, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
729 @warning The guad object should be in linked state, otherwise the result is undefined
731 template <typename T, class Func>
732 T protect( atomics::atomic<T> const& toGuard, Func f )
734 assert( guard_ != nullptr );
736 T pCur = toGuard.load(atomics::memory_order_acquire);
741 pCur = toGuard.load(atomics::memory_order_acquire);
742 } while ( pRet != pCur );
746 /// Store \p p to the guard
748 The function equals to a simple assignment the value \p p to guard, no loop is performed.
749 Can be used for a pointer that cannot be changed concurrently or if the pointer is already
750 guarded by another guard.
752 @warning The guad object should be in linked state, otherwise the result is undefined
754 template <typename T>
757 assert( guard_ != nullptr );
760 hp::smr::tls()->sync();
765 std::nullptr_t assign( std::nullptr_t )
767 assert( guard_ != nullptr );
774 /// Copy a value guarded from \p src guard to \p this guard (valid only in linked state)
775 void copy( Guard const& src )
777 assign( src.get_native());
780 /// Store marked pointer \p p to the guard
782 The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
783 Can be used for a marked pointer that cannot be changed concurrently or if the marked pointer
784 is already guarded by another guard.
786 @warning The guard object should be in linked state, otherwise the result is undefined
788 template <typename T, int BITMASK>
789 T * assign( cds::details::marked_ptr<T, BITMASK> p )
791 return assign( p.ptr());
794 /// Clear value of the guard (valid only in linked state)
800 /// Get the value currently protected (valid only in linked state)
801 template <typename T>
804 assert( guard_ != nullptr );
805 return guard_->get_as<T>();
808 /// Get native hazard pointer stored (valid only in linked state)
809 guarded_pointer get_native() const
811 assert( guard_ != nullptr );
812 return guard_->get();
818 hp::guard* g = guard_;
823 hp::guard*& guard_ref()
835 /// Array of Hazard Pointer guards
837 The class is intended for allocating an array of hazard pointer guards.
838 Template parameter \p Count defines the size of the array.
840 template <size_t Count>
844 /// Rebind array for other size \p Count2
845 template <size_t Count2>
847 typedef GuardArray<Count2> other; ///< rebinding result
851 static CDS_CONSTEXPR const size_t c_nCapacity = Count;
854 /// Default ctor allocates \p Count hazard pointers
857 hp::smr::tls()->hazards_.alloc( guards_ );
860 /// Move ctor is prohibited
861 GuardArray( GuardArray&& ) = delete;
863 /// Move assignment is prohibited
864 GuardArray& operator=( GuardArray&& ) = delete;
866 /// Copy ctor is prohibited
867 GuardArray( GuardArray const& ) = delete;
869 /// Copy assignment is prohibited
870 GuardArray& operator=( GuardArray const& ) = delete;
872 /// Frees allocated hazard pointers
875 hp::smr::tls()->hazards_.free( guards_ );
878 /// Protects a pointer of type \p atomic<T*>
880 Return the value of \p toGuard
882 The function tries to load \p toGuard and to store it
883 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
885 template <typename T>
886 T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
888 assert( nIndex < capacity());
892 pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire));
893 } while ( pRet != toGuard.load(atomics::memory_order_acquire));
898 /// Protects a pointer of type \p atomic<T*>
900 Return the value of \p toGuard
902 The function tries to load \p toGuard and to store it
903 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
905 The function is useful for intrusive containers when \p toGuard is a node pointer
906 that should be converted to a pointer to the value type before guarding.
907 The parameter \p f of type Func is a functor that makes this conversion:
910 value_type * operator()( T * p );
913 Really, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
915 template <typename T, class Func>
916 T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
918 assert( nIndex < capacity());
922 assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire)));
923 } while ( pRet != toGuard.load(atomics::memory_order_acquire));
928 /// Store \p to the slot \p nIndex
930 The function equals to a simple assignment, no loop is performed.
932 template <typename T>
933 T * assign( size_t nIndex, T * p )
935 assert( nIndex < capacity() );
937 guards_.set( nIndex, p );
938 hp::smr::tls()->sync();
942 /// Store marked pointer \p p to the guard
944 The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
945 Can be used for a marked pointer that cannot be changed concurrently.
947 template <typename T, int BITMASK>
948 T * assign( size_t nIndex, cds::details::marked_ptr<T, BITMASK> p )
950 return assign( nIndex, p.ptr());
953 /// Copy guarded value from \p src guard to slot at index \p nIndex
954 void copy( size_t nIndex, Guard const& src )
956 assign( nIndex, src.get_native());
959 /// Copy guarded value from slot \p nSrcIndex to the slot \p nDestIndex
960 void copy( size_t nDestIndex, size_t nSrcIndex )
962 assign( nDestIndex, get_native( nSrcIndex ));
965 /// Clear value of the slot \p nIndex
966 void clear( size_t nIndex )
968 guards_.clear( nIndex );
971 /// Get current value of slot \p nIndex
972 template <typename T>
973 T * get( size_t nIndex ) const
975 assert( nIndex < capacity() );
976 return guards_[nIndex]->template get_as<T>();
979 /// Get native hazard pointer stored
980 guarded_pointer get_native( size_t nIndex ) const
982 assert( nIndex < capacity());
983 return guards_[nIndex]->get();
987 hp::guard* release( size_t nIndex ) CDS_NOEXCEPT
989 return guards_.release( nIndex );
993 /// Capacity of the guard array
994 static CDS_CONSTEXPR size_t capacity()
1001 hp::guard_array<c_nCapacity> guards_;
1007 A guarded pointer is a pair of a pointer and GC's guard.
1008 Usually, it is used for returning a pointer to an element of a lock-free container.
1009 The guard prevents the pointer to be early disposed (freed) by SMR.
1010 After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time.
1013 - \p GuardedType - a type which the guard stores
1014 - \p ValueType - a value type
1015 - \p Cast - a functor for converting <tt>GuardedType*</tt> to <tt>ValueType*</tt>. Default is \p void (no casting).
1017 For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed.
1018 In such case the \p %guarded_ptr is:
1020 typedef cds::gc::HP::guarded_ptr< foo > intrusive_guarded_ptr;
1023 For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed.
1031 struct value_accessor {
1032 std::string* operator()( foo* pFoo ) const
1034 return &(pFoo->value);
1039 typedef cds::gc::HP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr;
1042 You don't need use this class directly.
1043 All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor.
1045 template <typename GuardedType, typename ValueType=GuardedType, typename Cast=void >
1049 struct trivial_cast {
1050 ValueType * operator()( GuardedType * p ) const
1056 template <typename GT, typename VT, typename C> friend class guarded_ptr;
1060 typedef GuardedType guarded_type; ///< Guarded type
1061 typedef ValueType value_type; ///< Value type
1063 /// Functor for casting \p guarded_type to \p value_type
1064 typedef typename std::conditional< std::is_same<Cast, void>::value, trivial_cast, Cast >::type value_cast;
1067 /// Creates empty guarded pointer
1068 guarded_ptr() CDS_NOEXCEPT
1073 explicit guarded_ptr( hp::guard* g ) CDS_NOEXCEPT
1077 /// Initializes guarded pointer with \p p
1078 explicit guarded_ptr( guarded_type* p ) CDS_NOEXCEPT
1083 explicit guarded_ptr( std::nullptr_t ) CDS_NOEXCEPT
1089 guarded_ptr( guarded_ptr&& gp ) CDS_NOEXCEPT
1090 : guard_( gp.guard_ )
1092 gp.guard_ = nullptr;
1096 template <typename GT, typename VT, typename C>
1097 guarded_ptr( guarded_ptr<GT, VT, C>&& gp ) CDS_NOEXCEPT
1098 : guard_( gp.guard_ )
1100 gp.guard_ = nullptr;
1103 /// Ctor from \p Guard
1104 explicit guarded_ptr( Guard&& g ) CDS_NOEXCEPT
1105 : guard_( g.release())
1108 /// The guarded pointer is not copy-constructible
1109 guarded_ptr( guarded_ptr const& gp ) = delete;
1111 /// Clears the guarded pointer
1113 \ref release() is called if guarded pointer is not \ref empty()
1115 ~guarded_ptr() CDS_NOEXCEPT
1120 /// Move-assignment operator
1121 guarded_ptr& operator=( guarded_ptr&& gp ) CDS_NOEXCEPT
1123 std::swap( guard_, gp.guard_ );
1127 /// Move-assignment from \p Guard
1128 guarded_ptr& operator=( Guard&& g ) CDS_NOEXCEPT
1130 std::swap( guard_, g.guard_ref());
1134 /// The guarded pointer is not copy-assignable
1135 guarded_ptr& operator=(guarded_ptr const& gp) = delete;
1137 /// Returns a pointer to guarded value
1138 value_type * operator ->() const CDS_NOEXCEPT
1141 return value_cast()( guard_->get_as<guarded_type>());
1144 /// Returns a reference to guarded value
1145 value_type& operator *() CDS_NOEXCEPT
1148 return *value_cast()( guard_->get_as<guarded_type>());
1151 /// Returns const reference to guarded value
1152 value_type const& operator *() const CDS_NOEXCEPT
1155 return *value_cast()( guard_->get_as<guarded_type>());
1158 /// Checks if the guarded pointer is \p nullptr
1159 bool empty() const CDS_NOEXCEPT
1161 return !guard_ || guard_->get( atomics::memory_order_relaxed ) == nullptr;
1164 /// \p bool operator returns <tt>!empty()</tt>
1165 explicit operator bool() const CDS_NOEXCEPT
1170 /// Clears guarded pointer
1172 If the guarded pointer has been released, the pointer can be disposed (freed) at any time.
1173 Dereferncing the guarded pointer after \p release() is dangerous.
1175 void release() CDS_NOEXCEPT
1181 // For internal use only!!!
1182 void reset(guarded_type * p) CDS_NOEXCEPT
1195 guard_ = hp::smr::tls()->hazards_.alloc();
1201 hp::smr::tls()->hazards_.free( guard_ );
1215 enum class scan_type {
1216 classic = hp::classic, ///< classic scan as described in Michael's papers
1217 inplace = hp::inplace ///< inplace scan without allocation
1220 /// Initializes %HP singleton
1222 The constructor initializes Hazard Pointer SMR singleton with passed parameters.
1223 If the instance does not yet exist then the function creates the instance.
1224 Otherwise it does nothing.
1226 The Michael's %HP reclamation schema depends of three parameters:
1227 - \p nHazardPtrCount - hazard pointer count per thread. Usually it is small number (up to 10) depending from
1228 the data structure algorithms. If \p nHazardPtrCount = 0, the defaul value 8 is used
1229 - \p nMaxThreadCount - max count of thread with using Hazard Pointer GC in your application. Default is 100.
1230 - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
1231 <tt> nHazardPtrCount * nMaxThreadCount </tt>. Default is <tt>2 * nHazardPtrCount * nMaxThreadCount </tt>.
1234 size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread
1235 size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application
1236 size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread
1237 scan_type nScanType = scan_type::inplace ///< Scan type (see \p scan_type enum)
1243 nMaxRetiredPtrCount,
1244 static_cast<hp::scan_type>(nScanType)
1248 /// Terminates GC singleton
1250 The destructor destroys %HP global object. After calling of this function you may \b NOT
1251 use CDS data structures based on \p %cds::gc::HP.
1252 Usually, %HP object is destroyed at the end of your \p main().
1256 hp::smr::destruct( true );
1259 /// Checks that required hazard pointer count \p nCountNeeded is less or equal then max hazard pointer count
1261 If <tt> nRequiredCount > get_hazard_ptr_count()</tt> then the exception \p not_enought_hazard_ptr is thrown
1263 static void check_available_guards( size_t nCountNeeded )
1265 hp::smr::check_hazard_ptr_count( nCountNeeded );
1268 /// Set memory management functions
1270 @note This function may be called <b>BEFORE</b> creating an instance
1271 of Hazard Pointer SMR
1273 SMR object allocates some memory for thread-specific data and for
1274 creating SMR object.
1275 By default, a standard \p new and \p delete operators are used for this.
1277 static void set_memory_allocator(
1278 void* ( *alloc_func )( size_t size ), ///< \p malloc() function
1279 void( *free_func )( void * p ) ///< \p free() function
1282 hp::smr::set_memory_allocator( alloc_func, free_func );
1285 /// Returns max Hazard Pointer count
1286 static size_t max_hazard_count()
1288 return hp::smr::instance().get_hazard_ptr_count();
1291 /// Returns max count of thread
1292 static size_t max_thread_count()
1294 return hp::smr::instance().get_max_thread_count();
1297 /// Returns capacity of retired pointer array
1298 static size_t retired_array_capacity()
1300 return hp::smr::instance().get_max_retired_ptr_count();
1303 /// Retire pointer \p p with function \p func
1305 The function places pointer \p p to array of pointers ready for removing.
1306 (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1307 \p func is a disposer: when \p p can be safely removed, \p func is called.
1309 template <typename T>
1310 static void retire( T * p, void( *func )( T * ))
1312 hp::thread_data* rec = hp::smr::tls();
1313 if ( !rec->retired_.push( hp::retired_ptr( p, func )))
1314 hp::smr::instance().scan( rec );
1317 /// Retire pointer \p p with functor of type \p Disposer
1319 The function places pointer \p p to array of pointers ready for removing.
1320 (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1322 Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is:
1324 template <typename T>
1326 void operator()( T * p ) ; // disposing operator
1329 Since the functor call can happen at any time after \p retire() call, additional restrictions are imposed to \p Disposer type:
1330 - it should be stateless functor
1331 - it should be default-constructible
1332 - the result of functor call with argument \p p should not depend on where the functor will be called.
1335 Operator \p delete functor:
1337 template <typename T>
1339 void operator ()( T * p ) {
1344 // How to call HP::retire method
1347 // ... use p in lock-free manner
1349 cds::gc::HP::retire<disposer>( p ) ; // place p to retired pointer array of HP GC
1352 Functor based on \p std::allocator :
1354 template <typename Alloc = std::allocator<int> >
1356 template <typename T>
1357 void operator()( T * p ) {
1358 typedef typename Alloc::templare rebind<T>::other alloc_t;
1361 a.deallocate( p, 1 );
1366 template <class Disposer, typename T>
1367 static void retire( T * p )
1369 if ( !hp::smr::tls()->retired_.push( hp::retired_ptr( p, cds::details::static_functor<Disposer, T>::call )))
1373 /// Get current scan strategy
1374 static scan_type getScanType()
1376 return static_cast<scan_type>( hp::smr::instance().get_scan_type());
1379 /// Checks if Hazard Pointer GC is constructed and may be used
1380 static bool isUsed()
1382 return hp::smr::isUsed();
1385 /// Forces SMR call for current thread
1387 Usually, this function should not be called directly.
1391 hp::smr::instance().scan( hp::smr::tls());
1394 /// Synonym for \p scan()
1395 static void force_dispose()
1400 /// Returns internal statistics
1402 The function clears \p st before gathering statistics.
1404 @note Internal statistics is available only if you compile
1405 \p libcds and your program with \p -DCDS_ENABLE_HPSTAT key.
1407 static void statistics( stat& st )
1409 hp::smr::instance().statistics( st );
1412 /// Returns post-mortem statistics
1414 Post-mortem statistics is gathered in the \p %HP object destructor
1415 and can be accessible after destructing the global \p %HP object.
1417 @note Internal statistics is available only if you compile
1418 \p libcds and your program with \p -DCDS_ENABLE_HPSTAT key.
1426 // Initialize HP SMR
1429 // deal with HP-based data structured
1433 // HP object destroyed
1434 // Get total post-mortem statistics
1435 cds::gc::HP::stat const& st = cds::gc::HP::postmortem_statistics();
1437 printf( "HP statistics:\n"
1438 "\tthread count = %llu\n"
1439 "\tguard allocated = %llu\n"
1440 "\tguard freed = %llu\n"
1441 "\tretired data count = %llu\n"
1442 "\tfree data count = %llu\n"
1443 "\tscan() call count = %llu\n"
1444 "\thelp_scan() call count = %llu\n",
1445 st.thread_rec_count,
1446 st.guard_allocated, st.guard_freed,
1447 st.retired_count, st.free_count,
1448 st.scan_count, st.help_scan_count
1455 static stat const& postmortem_statistics();
1458 }} // namespace cds::gc
1460 #endif // #ifndef CDSLIB_GC_HP_SMR_H