2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_GC_HP_SMR_H
32 #define CDSLIB_GC_HP_SMR_H
35 #include <cds/gc/details/hp_common.h>
36 #include <cds/details/throw_exception.h>
37 #include <cds/details/static_functor.h>
38 #include <cds/details/marked_ptr.h>
39 #include <cds/user_setup/cache_line.h>
41 namespace cds { namespace gc {
43 using namespace cds::gc::hp::common;
45 /// Exception "Not enough Hazard Pointer"
46 class not_enought_hazard_ptr: public std::length_error
50 not_enought_hazard_ptr()
51 : std::length_error( "Not enough Hazard Pointer" )
56 /// Exception "Hazard Pointer SMR is not initialized"
57 class not_initialized: public std::runtime_error
61 : std::runtime_error( "Global Hazard Pointer SMR object is not initialized" )
65 /// Per-thread hazard pointer storage
66 class thread_hp_storage {
68 thread_hp_storage( guard* arr, size_t nSize ) CDS_NOEXCEPT
72 # ifdef CDS_ENABLE_HPSTAT
73 , alloc_guard_count_(0)
74 , free_guard_count_(0)
78 new( arr ) guard[nSize];
80 for ( guard* pEnd = arr + nSize - 1; arr < pEnd; ++arr )
85 thread_hp_storage() = delete;
86 thread_hp_storage( thread_hp_storage const& ) = delete;
87 thread_hp_storage( thread_hp_storage&& ) = delete;
89 size_t capacity() const CDS_NOEXCEPT
94 bool full() const CDS_NOEXCEPT
96 return free_head_ == nullptr;
101 # ifdef CDS_DISABLE_SMR_EXCEPTION
105 CDS_THROW_EXCEPTION( not_enought_hazard_ptr());
107 guard* g = free_head_;
108 free_head_ = g->next_;
109 CDS_HPSTAT( ++alloc_guard_count_ );
113 void free( guard* g ) CDS_NOEXCEPT
115 assert( g >= array_ && g < array_ + capacity() );
119 g->next_ = free_head_;
121 CDS_HPSTAT( ++free_guard_count_ );
125 template< size_t Capacity>
126 size_t alloc( guard_array<Capacity>& arr )
129 guard* g = free_head_;
130 for ( i = 0; i < Capacity && g; ++i ) {
135 # ifdef CDS_DISABLE_SMR_EXCEPTION
136 assert( i == Capacity );
139 CDS_THROW_EXCEPTION( not_enought_hazard_ptr());
142 CDS_HPSTAT( alloc_guard_count_ += Capacity );
146 template <size_t Capacity>
147 void free( guard_array<Capacity>& arr ) CDS_NOEXCEPT
149 guard* gList = free_head_;
150 for ( size_t i = 0; i < Capacity; ++i ) {
156 CDS_HPSTAT( ++free_guard_count_ );
162 // cppcheck-suppress functionConst
165 for ( guard* cur = array_, *last = array_ + capacity(); cur < last; ++cur )
169 guard& operator[]( size_t idx )
171 assert( idx < capacity() );
176 static size_t calc_array_size( size_t capacity )
178 return sizeof( guard ) * capacity;
182 guard* free_head_; ///< Head of free guard list
183 guard* const array_; ///< HP array
184 size_t const capacity_; ///< HP array capacity
185 # ifdef CDS_ENABLE_HPSTAT
187 size_t alloc_guard_count_;
188 size_t free_guard_count_;
192 /// Per-thread retired array
196 retired_array( retired_ptr* arr, size_t capacity ) CDS_NOEXCEPT
198 , last_( arr + capacity )
200 # ifdef CDS_ENABLE_HPSTAT
201 , retire_call_count_(0)
205 retired_array() = delete;
206 retired_array( retired_array const& ) = delete;
207 retired_array( retired_array&& ) = delete;
209 size_t capacity() const CDS_NOEXCEPT
211 return last_ - retired_;
214 size_t size() const CDS_NOEXCEPT
216 return current_ - retired_;
219 bool push( retired_ptr&& p ) CDS_NOEXCEPT
222 CDS_HPSTAT( ++retire_call_count_ );
223 return ++current_ < last_;
226 retired_ptr* first() const CDS_NOEXCEPT
231 retired_ptr* last() const CDS_NOEXCEPT
236 void reset( size_t nSize ) CDS_NOEXCEPT
238 current_ = first() + nSize;
241 bool full() const CDS_NOEXCEPT
243 return current_ == last_;
246 static size_t calc_array_size( size_t capacity )
248 return sizeof( retired_ptr ) * capacity;
252 retired_ptr* current_;
253 retired_ptr* const last_;
254 retired_ptr* const retired_;
255 # ifdef CDS_ENABLE_HPSTAT
257 size_t retire_call_count_;
262 /// Internal statistics
264 size_t guard_allocated; ///< Count of allocated HP guards
265 size_t guard_freed; ///< Count of freed HP guards
266 size_t retired_count; ///< Count of retired pointers
267 size_t free_count; ///< Count of free pointers
268 size_t scan_count; ///< Count of \p scan() call
269 size_t help_scan_count; ///< Count of \p help_scan() call
271 size_t thread_rec_count; ///< Count of thread records
286 thread_rec_count = 0;
292 thread_hp_storage hazards_; ///< Hazard pointers private to the thread
293 retired_array retired_; ///< Retired data private to the thread
295 stat stat_; ///< Internal statistics for the thread
297 char pad1_[cds::c_nCacheLineSize];
298 atomics::atomic<unsigned int> sync_; ///< dummy var to introduce synchronizes-with relationship between threads
299 char pad2_[cds::c_nCacheLineSize];
301 // CppCheck warn: pad1_ and pad2_ is uninitialized in ctor
302 // cppcheck-suppress uninitMemberVar
303 thread_data( guard* guards, size_t guard_count, retired_ptr* retired_arr, size_t retired_capacity )
304 : hazards_( guards, guard_count )
305 , retired_( retired_arr, retired_capacity )
309 thread_data() = delete;
310 thread_data( thread_data const& ) = delete;
311 thread_data( thread_data&& ) = delete;
315 sync_.fetch_add( 1, atomics::memory_order_acq_rel );
319 /// smr::scan() strategy
321 classic, ///< classic scan as described in Michael's works (see smr::classic_scan() )
322 inplace ///< inplace scan without allocation (see smr::inplace_scan() )
325 // Hazard Pointer SMR (Safe Memory Reclamation)
328 struct thread_record;
331 /// Returns the instance of Hazard Pointer \ref smr
332 static smr& instance()
334 # ifdef CDS_DISABLE_SMR_EXCEPTION
335 assert( instance_ != nullptr );
338 CDS_THROW_EXCEPTION( not_initialized());
343 /// Creates Hazard Pointer SMR singleton
345 Hazard Pointer SMR is a singleton. If HP instance is not initialized then the function creates the instance.
346 Otherwise it does nothing.
348 The Michael's HP reclamation schema depends of three parameters:
349 - \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from
350 the data structure algorithms. By default, if \p nHazardPtrCount = 0,
351 the function uses maximum of HP count for CDS library
352 - \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100.
353 - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
354 <tt> nHazardPtrCount * nMaxThreadCount </tt>
355 Default is <tt>2 * nHazardPtrCount * nMaxThreadCount</tt>
357 static CDS_EXPORT_API void construct(
358 size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread
359 size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application
360 size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread
361 scan_type nScanType = inplace ///< Scan type (see \ref scan_type enum)
365 // for back-copatibility
366 static void Construct(
367 size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread
368 size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application
369 size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread
370 scan_type nScanType = inplace ///< Scan type (see \ref scan_type enum)
373 construct( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType );
378 /// Destroys global instance of \ref smr
380 The parameter \p bDetachAll should be used carefully: if its value is \p true,
381 then the object destroyed automatically detaches all attached threads. This feature
382 can be useful when you have no control over the thread termination, for example,
383 when \p libcds is injected into existing external thread.
385 static CDS_EXPORT_API void destruct(
386 bool bDetachAll = false ///< Detach all threads
390 // for back-copatibility
391 static void Destruct(
392 bool bDetachAll = false ///< Detach all threads
395 destruct( bDetachAll );
399 /// Checks if global SMR object is constructed and may be used
400 static bool isUsed() CDS_NOEXCEPT
402 return instance_ != nullptr;
405 /// Set memory management functions
407 @note This function may be called <b>BEFORE</b> creating an instance
408 of Hazard Pointer SMR
410 SMR object allocates some memory for thread-specific data and for
412 By default, a standard \p new and \p delete operators are used for this.
414 static CDS_EXPORT_API void set_memory_allocator(
415 void* ( *alloc_func )( size_t size ),
416 void (*free_func )( void * p )
419 /// Returns max Hazard Pointer count per thread
420 size_t get_hazard_ptr_count() const CDS_NOEXCEPT
422 return hazard_ptr_count_;
425 /// Returns max thread count
426 size_t get_max_thread_count() const CDS_NOEXCEPT
428 return max_thread_count_;
431 /// Returns max size of retired objects array
432 size_t get_max_retired_ptr_count() const CDS_NOEXCEPT
434 return max_retired_ptr_count_;
437 /// Get current scan strategy
438 scan_type get_scan_type() const
443 /// Checks that required hazard pointer count \p nRequiredCount is less or equal then max hazard pointer count
445 If <tt> nRequiredCount > get_hazard_ptr_count()</tt> then the exception \p not_enought_hazard_ptr is thrown
447 static void check_hazard_ptr_count( size_t nRequiredCount )
449 if ( instance().get_hazard_ptr_count() < nRequiredCount ) {
450 # ifdef CDS_DISABLE_SMR_EXCEPTION
451 assert( false ); // not enough hazard ptr
453 CDS_THROW_EXCEPTION( not_enought_hazard_ptr() );
458 /// Returns thread-local data for the current thread
459 static CDS_EXPORT_API thread_data* tls();
461 static CDS_EXPORT_API void attach_thread();
462 static CDS_EXPORT_API void detach_thread();
464 /// Get internal statistics
465 void statistics( stat& st );
467 public: // for internal use only
468 /// The main garbage collecting function
470 This function is called internally when upper bound of thread's list of reclaimed pointers
473 There are the following scan algorithm:
474 - \ref hzp_gc_classic_scan "classic_scan" allocates memory for internal use
475 - \ref hzp_gc_inplace_scan "inplace_scan" does not allocate any memory
477 Use \p set_scan_type() member function to setup appropriate scan algorithm.
479 void scan( thread_data* pRec )
481 ( this->*scan_func_ )( pRec );
484 /// Helper scan routine
486 The function guarantees that every node that is eligible for reuse is eventually freed, barring
487 thread failures. To do so, after executing \p scan(), a thread executes a \p %help_scan(),
488 where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers
489 to thread's list of reclaimed pointers.
491 The function is called internally by \p scan().
493 CDS_EXPORT_API void help_scan( thread_data* pThis );
497 size_t nHazardPtrCount, ///< Hazard pointer count per thread
498 size_t nMaxThreadCount, ///< Max count of simultaneous working thread in your application
499 size_t nMaxRetiredPtrCount, ///< Capacity of the array of retired objects for the thread
500 scan_type nScanType ///< Scan type (see \ref scan_type enum)
503 CDS_EXPORT_API ~smr();
505 CDS_EXPORT_API void detach_all_thread();
507 /// Classic scan algorithm
508 /** @anchor hzp_gc_classic_scan
509 Classical scan algorithm as described in Michael's paper.
511 A scan includes four stages. The first stage involves scanning the array HP for non-null values.
512 Whenever a non-null value is encountered, it is inserted in a local list of currently protected pointer.
513 Only stage 1 accesses shared variables. The following stages operate only on private variables.
515 The second stage of a scan involves sorting local list of protected pointers to allow
516 binary search in the third stage.
518 The third stage of a scan involves checking each reclaimed node
519 against the pointers in local list of protected pointers. If the binary search yields
520 no match, the node is freed. Otherwise, it cannot be deleted now and must kept in thread's list
521 of reclaimed pointers.
523 The forth stage prepares new thread's private list of reclaimed pointers
524 that could not be freed during the current scan, where they remain until the next scan.
526 This algorithm allocates memory for internal HP array.
528 This function is called internally by ThreadGC object when upper bound of thread's list of reclaimed pointers
531 CDS_EXPORT_API void classic_scan( thread_data* pRec );
533 /// In-place scan algorithm
534 /** @anchor hzp_gc_inplace_scan
535 Unlike the \p classic_scan() algorithm, \p %inplace_scan() does not allocate any memory.
536 All operations are performed in-place.
538 CDS_EXPORT_API void inplace_scan( thread_data* pRec );
542 CDS_EXPORT_API thread_record* create_thread_data();
543 static CDS_EXPORT_API void destroy_thread_data( thread_record* pRec );
545 /// Allocates Hazard Pointer SMR thread private data
546 CDS_EXPORT_API thread_record* alloc_thread_data();
548 /// Free HP SMR thread-private data
549 CDS_EXPORT_API void free_thread_data( thread_record* pRec );
554 static CDS_EXPORT_API smr* instance_;
556 atomics::atomic< thread_record*> thread_list_; ///< Head of thread list
558 size_t const hazard_ptr_count_; ///< max count of thread's hazard pointer
559 size_t const max_thread_count_; ///< max count of thread
560 size_t const max_retired_ptr_count_; ///< max count of retired ptr per thread
561 scan_type const scan_type_; ///< scan type (see \ref scan_type enum)
562 void ( smr::*scan_func_ )( thread_data* pRec );
565 // for backward compatibility
566 typedef smr GarbageCollector;
571 /// @defgroup cds_garbage_collector Garbage collectors
573 /// Hazard Pointer SMR (Safe Memory Reclamation)
574 /** @ingroup cds_garbage_collector
576 Implementation of classic Hazard Pointer garbage collector.
579 - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
580 - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
581 - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
583 Hazard Pointer garbage collector is a singleton. The main user-level part of Hazard Pointer schema is
584 \p %cds::gc::HP class and its nested classes. Before use any HP-related class you must initialize HP
585 by contructing \p %cds::gc::HP object in beginning of your \p main().
586 See \ref cds_how_to_use "How to use" section for details how to apply SMR schema.
591 /// Native guarded pointer type
592 typedef hp::hazard_ptr guarded_pointer;
595 template <typename T> using atomic_ref = atomics::atomic<T *>;
597 /// Atomic marked pointer
598 template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
601 template <typename T> using atomic_type = atomics::atomic<T>;
603 /// Exception "Not enough Hazard Pointer"
604 typedef hp::not_enought_hazard_ptr not_enought_hazard_ptr_exception;
606 /// Internal statistics
607 typedef hp::stat stat;
609 /// Hazard Pointer guard
611 A guard is a hazard pointer.
612 Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer.
614 \p %Guard object is movable but not copyable.
616 The guard object can be in two states:
617 - unlinked - the guard is not linked with any internal hazard pointer.
618 In this state no operation except \p link() and move assignment is supported.
619 - linked (default) - the guard allocates an internal hazard pointer and completely operable.
621 Due to performance reason the implementation does not check state of the guard in runtime.
623 @warning Move assignment transfers the guard in unlinked state, use with care.
628 /// Default ctor allocates a guard (hazard pointer) from thread-private storage
630 @warning Can throw \p too_many_hazard_ptr_exception if internal hazard pointer objects are exhausted.
633 : guard_( hp::smr::tls()->hazards_.alloc() )
636 /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
637 explicit Guard( std::nullptr_t ) CDS_NOEXCEPT
641 /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership)
642 Guard( Guard&& src ) CDS_NOEXCEPT
643 : guard_( src.guard_ )
645 src.guard_ = nullptr;
648 /// Move assignment: the internal guards are swapped between \p src and \p this
650 @warning \p src will become in unlinked state if \p this was unlinked on entry.
652 Guard& operator=( Guard&& src ) CDS_NOEXCEPT
654 std::swap( guard_, src.guard_ );
658 /// Copy ctor is prohibited - the guard is not copyable
659 Guard( Guard const& ) = delete;
661 /// Copy assignment is prohibited
662 Guard& operator=( Guard const& ) = delete;
664 /// Frees the internal hazard pointer if the guard is in linked state
670 /// Checks if the guard object linked with any internal hazard pointer
671 bool is_linked() const
673 return guard_ != nullptr;
676 /// Links the guard with internal hazard pointer if the guard is in unlinked state
678 @warning Can throw \p not_enought_hazard_ptr_exception if internal hazard pointer array is exhausted.
683 guard_ = hp::smr::tls()->hazards_.alloc();
686 /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state
690 hp::smr::tls()->hazards_.free( guard_ );
695 /// Protects a pointer of type \p atomic<T*>
697 Return the value of \p toGuard
699 The function tries to load \p toGuard and to store it
700 to the HP slot repeatedly until the guard's value equals \p toGuard
702 @warning The guad object should be in linked state, otherwise the result is undefined
704 template <typename T>
705 T protect( atomics::atomic<T> const& toGuard )
707 assert( guard_ != nullptr );
709 T pCur = toGuard.load(atomics::memory_order_acquire);
712 pRet = assign( pCur );
713 pCur = toGuard.load(atomics::memory_order_acquire);
714 } while ( pRet != pCur );
718 /// Protects a converted pointer of type \p atomic<T*>
720 Return the value of \p toGuard
722 The function tries to load \p toGuard and to store result of \p f functor
723 to the HP slot repeatedly until the guard's value equals \p toGuard.
725 The function is useful for intrusive containers when \p toGuard is a node pointer
726 that should be converted to a pointer to the value before protecting.
727 The parameter \p f of type Func is a functor that makes this conversion:
730 value_type * operator()( T * p );
733 Actually, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
735 @warning The guad object should be in linked state, otherwise the result is undefined
737 template <typename T, class Func>
738 T protect( atomics::atomic<T> const& toGuard, Func f )
740 assert( guard_ != nullptr );
742 T pCur = toGuard.load(atomics::memory_order_acquire);
747 pCur = toGuard.load(atomics::memory_order_acquire);
748 } while ( pRet != pCur );
752 /// Store \p p to the guard
754 The function equals to a simple assignment the value \p p to guard, no loop is performed.
755 Can be used for a pointer that cannot be changed concurrently or if the pointer is already
756 guarded by another guard.
758 @warning The guad object should be in linked state, otherwise the result is undefined
760 template <typename T>
763 assert( guard_ != nullptr );
766 hp::smr::tls()->sync();
771 std::nullptr_t assign( std::nullptr_t )
773 assert( guard_ != nullptr );
780 /// Copy a value guarded from \p src guard to \p this guard (valid only in linked state)
781 void copy( Guard const& src )
783 assign( src.get_native());
786 /// Store marked pointer \p p to the guard
788 The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
789 Can be used for a marked pointer that cannot be changed concurrently or if the marked pointer
790 is already guarded by another guard.
792 @warning The guard object should be in linked state, otherwise the result is undefined
794 template <typename T, int BITMASK>
795 T * assign( cds::details::marked_ptr<T, BITMASK> p )
797 return assign( p.ptr());
800 /// Clear value of the guard (valid only in linked state)
806 /// Get the value currently protected (valid only in linked state)
807 template <typename T>
810 assert( guard_ != nullptr );
811 return guard_->get_as<T>();
814 /// Get native hazard pointer stored (valid only in linked state)
815 guarded_pointer get_native() const
817 assert( guard_ != nullptr );
818 return guard_->get();
824 hp::guard* g = guard_;
829 hp::guard*& guard_ref()
841 /// Array of Hazard Pointer guards
843 The class is intended for allocating an array of hazard pointer guards.
844 Template parameter \p Count defines the size of the array.
846 template <size_t Count>
850 /// Rebind array for other size \p Count2
851 template <size_t Count2>
853 typedef GuardArray<Count2> other; ///< rebinding result
857 static CDS_CONSTEXPR const size_t c_nCapacity = Count;
860 /// Default ctor allocates \p Count hazard pointers
863 hp::smr::tls()->hazards_.alloc( guards_ );
866 /// Move ctor is prohibited
867 GuardArray( GuardArray&& ) = delete;
869 /// Move assignment is prohibited
870 GuardArray& operator=( GuardArray&& ) = delete;
872 /// Copy ctor is prohibited
873 GuardArray( GuardArray const& ) = delete;
875 /// Copy assignment is prohibited
876 GuardArray& operator=( GuardArray const& ) = delete;
878 /// Frees allocated hazard pointers
881 hp::smr::tls()->hazards_.free( guards_ );
884 /// Protects a pointer of type \p atomic<T*>
886 Return the value of \p toGuard
888 The function tries to load \p toGuard and to store it
889 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
891 template <typename T>
892 T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
894 assert( nIndex < capacity());
898 pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire));
899 } while ( pRet != toGuard.load(atomics::memory_order_acquire));
904 /// Protects a pointer of type \p atomic<T*>
906 Return the value of \p toGuard
908 The function tries to load \p toGuard and to store it
909 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
911 The function is useful for intrusive containers when \p toGuard is a node pointer
912 that should be converted to a pointer to the value type before guarding.
913 The parameter \p f of type Func is a functor that makes this conversion:
916 value_type * operator()( T * p );
919 Really, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
921 template <typename T, class Func>
922 T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
924 assert( nIndex < capacity());
928 assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire)));
929 } while ( pRet != toGuard.load(atomics::memory_order_acquire));
934 /// Store \p to the slot \p nIndex
936 The function equals to a simple assignment, no loop is performed.
938 template <typename T>
939 T * assign( size_t nIndex, T * p )
941 assert( nIndex < capacity() );
943 guards_.set( nIndex, p );
944 hp::smr::tls()->sync();
948 /// Store marked pointer \p p to the guard
950 The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
951 Can be used for a marked pointer that cannot be changed concurrently.
953 template <typename T, int BITMASK>
954 T * assign( size_t nIndex, cds::details::marked_ptr<T, BITMASK> p )
956 return assign( nIndex, p.ptr());
959 /// Copy guarded value from \p src guard to slot at index \p nIndex
960 void copy( size_t nIndex, Guard const& src )
962 assign( nIndex, src.get_native());
965 /// Copy guarded value from slot \p nSrcIndex to the slot \p nDestIndex
966 void copy( size_t nDestIndex, size_t nSrcIndex )
968 assign( nDestIndex, get_native( nSrcIndex ));
971 /// Clear value of the slot \p nIndex
972 void clear( size_t nIndex )
974 guards_.clear( nIndex );
977 /// Get current value of slot \p nIndex
978 template <typename T>
979 T * get( size_t nIndex ) const
981 assert( nIndex < capacity() );
982 return guards_[nIndex]->template get_as<T>();
985 /// Get native hazard pointer stored
986 guarded_pointer get_native( size_t nIndex ) const
988 assert( nIndex < capacity());
989 return guards_[nIndex]->get();
993 hp::guard* release( size_t nIndex ) CDS_NOEXCEPT
995 return guards_.release( nIndex );
999 /// Capacity of the guard array
1000 static CDS_CONSTEXPR size_t capacity()
1007 hp::guard_array<c_nCapacity> guards_;
1013 A guarded pointer is a pair of a pointer and GC's guard.
1014 Usually, it is used for returning a pointer to an element of a lock-free container.
1015 The guard prevents the pointer to be early disposed (freed) by SMR.
1016 After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time.
1019 - \p GuardedType - a type which the guard stores
1020 - \p ValueType - a value type
1021 - \p Cast - a functor for converting <tt>GuardedType*</tt> to <tt>ValueType*</tt>. Default is \p void (no casting).
1023 For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed.
1024 In such case the \p %guarded_ptr is:
1026 typedef cds::gc::HP::guarded_ptr< foo > intrusive_guarded_ptr;
1029 For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed.
1037 struct value_accessor {
1038 std::string* operator()( foo* pFoo ) const
1040 return &(pFoo->value);
1045 typedef cds::gc::HP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr;
1048 You don't need use this class directly.
1049 All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor.
1051 template <typename GuardedType, typename ValueType=GuardedType, typename Cast=void >
1055 struct trivial_cast {
1056 ValueType * operator()( GuardedType * p ) const
1062 template <typename GT, typename VT, typename C> friend class guarded_ptr;
1066 typedef GuardedType guarded_type; ///< Guarded type
1067 typedef ValueType value_type; ///< Value type
1069 /// Functor for casting \p guarded_type to \p value_type
1070 typedef typename std::conditional< std::is_same<Cast, void>::value, trivial_cast, Cast >::type value_cast;
1073 /// Creates empty guarded pointer
1074 guarded_ptr() CDS_NOEXCEPT
1079 explicit guarded_ptr( hp::guard* g ) CDS_NOEXCEPT
1083 /// Initializes guarded pointer with \p p
1084 explicit guarded_ptr( guarded_type* p ) CDS_NOEXCEPT
1089 explicit guarded_ptr( std::nullptr_t ) CDS_NOEXCEPT
1095 guarded_ptr( guarded_ptr&& gp ) CDS_NOEXCEPT
1096 : guard_( gp.guard_ )
1098 gp.guard_ = nullptr;
1102 template <typename GT, typename VT, typename C>
1103 guarded_ptr( guarded_ptr<GT, VT, C>&& gp ) CDS_NOEXCEPT
1104 : guard_( gp.guard_ )
1106 gp.guard_ = nullptr;
1109 /// Ctor from \p Guard
1110 explicit guarded_ptr( Guard&& g ) CDS_NOEXCEPT
1111 : guard_( g.release())
1114 /// The guarded pointer is not copy-constructible
1115 guarded_ptr( guarded_ptr const& gp ) = delete;
1117 /// Clears the guarded pointer
1119 \ref release() is called if guarded pointer is not \ref empty()
1121 ~guarded_ptr() CDS_NOEXCEPT
1126 /// Move-assignment operator
1127 guarded_ptr& operator=( guarded_ptr&& gp ) CDS_NOEXCEPT
1129 std::swap( guard_, gp.guard_ );
1133 /// Move-assignment from \p Guard
1134 guarded_ptr& operator=( Guard&& g ) CDS_NOEXCEPT
1136 std::swap( guard_, g.guard_ref());
1140 /// The guarded pointer is not copy-assignable
1141 guarded_ptr& operator=(guarded_ptr const& gp) = delete;
1143 /// Returns a pointer to guarded value
1144 value_type * operator ->() const CDS_NOEXCEPT
1147 return value_cast()( guard_->get_as<guarded_type>());
1150 /// Returns a reference to guarded value
1151 value_type& operator *() CDS_NOEXCEPT
1154 return *value_cast()( guard_->get_as<guarded_type>());
1157 /// Returns const reference to guarded value
1158 value_type const& operator *() const CDS_NOEXCEPT
1161 return *value_cast()( guard_->get_as<guarded_type>());
1164 /// Checks if the guarded pointer is \p nullptr
1165 bool empty() const CDS_NOEXCEPT
1167 return !guard_ || guard_->get( atomics::memory_order_relaxed ) == nullptr;
1170 /// \p bool operator returns <tt>!empty()</tt>
1171 explicit operator bool() const CDS_NOEXCEPT
1176 /// Clears guarded pointer
1178 If the guarded pointer has been released, the pointer can be disposed (freed) at any time.
1179 Dereferncing the guarded pointer after \p release() is dangerous.
1181 void release() CDS_NOEXCEPT
1187 // For internal use only!!!
1188 void reset(guarded_type * p) CDS_NOEXCEPT
1201 guard_ = hp::smr::tls()->hazards_.alloc();
1207 hp::smr::tls()->hazards_.free( guard_ );
1221 enum class scan_type {
1222 classic = hp::classic, ///< classic scan as described in Michael's papers
1223 inplace = hp::inplace ///< inplace scan without allocation
1226 /// Initializes %HP singleton
1228 The constructor initializes Hazard Pointer SMR singleton with passed parameters.
1229 If the instance does not yet exist then the function creates the instance.
1230 Otherwise it does nothing.
1232 The Michael's %HP reclamation schema depends of three parameters:
1233 - \p nHazardPtrCount - hazard pointer count per thread. Usually it is small number (up to 10) depending from
1234 the data structure algorithms. If \p nHazardPtrCount = 0, the defaul value 8 is used
1235 - \p nMaxThreadCount - max count of thread with using Hazard Pointer GC in your application. Default is 100.
1236 - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
1237 <tt> nHazardPtrCount * nMaxThreadCount </tt>. Default is <tt>2 * nHazardPtrCount * nMaxThreadCount </tt>.
1240 size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread
1241 size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application
1242 size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread
1243 scan_type nScanType = scan_type::inplace ///< Scan type (see \p scan_type enum)
1249 nMaxRetiredPtrCount,
1250 static_cast<hp::scan_type>(nScanType)
1254 /// Terminates GC singleton
1256 The destructor destroys %HP global object. After calling of this function you may \b NOT
1257 use CDS data structures based on \p %cds::gc::HP.
1258 Usually, %HP object is destroyed at the end of your \p main().
1262 hp::smr::destruct( true );
1265 /// Checks that required hazard pointer count \p nCountNeeded is less or equal then max hazard pointer count
1267 If <tt> nRequiredCount > get_hazard_ptr_count()</tt> then the exception \p not_enought_hazard_ptr is thrown
1269 static void check_available_guards( size_t nCountNeeded )
1271 hp::smr::check_hazard_ptr_count( nCountNeeded );
1274 /// Set memory management functions
1276 @note This function may be called <b>BEFORE</b> creating an instance
1277 of Hazard Pointer SMR
1279 SMR object allocates some memory for thread-specific data and for
1280 creating SMR object.
1281 By default, a standard \p new and \p delete operators are used for this.
1283 static void set_memory_allocator(
1284 void* ( *alloc_func )( size_t size ), ///< \p malloc() function
1285 void( *free_func )( void * p ) ///< \p free() function
1288 hp::smr::set_memory_allocator( alloc_func, free_func );
1291 /// Returns max Hazard Pointer count
1292 static size_t max_hazard_count()
1294 return hp::smr::instance().get_hazard_ptr_count();
1297 /// Returns max count of thread
1298 static size_t max_thread_count()
1300 return hp::smr::instance().get_max_thread_count();
1303 /// Returns capacity of retired pointer array
1304 static size_t retired_array_capacity()
1306 return hp::smr::instance().get_max_retired_ptr_count();
1309 /// Retire pointer \p p with function \p func
1311 The function places pointer \p p to array of pointers ready for removing.
1312 (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1313 \p func is a disposer: when \p p can be safely removed, \p func is called.
1315 template <typename T>
1316 static void retire( T * p, void( *func )( T * ))
1318 hp::thread_data* rec = hp::smr::tls();
1319 if ( !rec->retired_.push( hp::retired_ptr( p, func )))
1320 hp::smr::instance().scan( rec );
1323 /// Retire pointer \p p with functor of type \p Disposer
1325 The function places pointer \p p to array of pointers ready for removing.
1326 (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1328 Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is:
1330 template <typename T>
1332 void operator()( T * p ) ; // disposing operator
1335 Since the functor call can happen at any time after \p retire() call, additional restrictions are imposed to \p Disposer type:
1336 - it should be stateless functor
1337 - it should be default-constructible
1338 - the result of functor call with argument \p p should not depend on where the functor will be called.
1341 Operator \p delete functor:
1343 template <typename T>
1345 void operator ()( T * p ) {
1350 // How to call HP::retire method
1353 // ... use p in lock-free manner
1355 cds::gc::HP::retire<disposer>( p ) ; // place p to retired pointer array of HP GC
1358 Functor based on \p std::allocator :
1360 template <typename Alloc = std::allocator<int> >
1362 template <typename T>
1363 void operator()( T * p ) {
1364 typedef typename Alloc::templare rebind<T>::other alloc_t;
1367 a.deallocate( p, 1 );
1372 template <class Disposer, typename T>
1373 static void retire( T * p )
1375 if ( !hp::smr::tls()->retired_.push( hp::retired_ptr( p, cds::details::static_functor<Disposer, T>::call )))
1379 /// Get current scan strategy
1380 static scan_type getScanType()
1382 return static_cast<scan_type>( hp::smr::instance().get_scan_type());
1385 /// Checks if Hazard Pointer GC is constructed and may be used
1386 static bool isUsed()
1388 return hp::smr::isUsed();
1391 /// Forces SMR call for current thread
1393 Usually, this function should not be called directly.
1397 hp::smr::instance().scan( hp::smr::tls());
1400 /// Synonym for \p scan()
1401 static void force_dispose()
1406 /// Returns internal statistics
1408 The function clears \p st before gathering statistics.
1410 @note Internal statistics is available only if you compile
1411 \p libcds and your program with \p -DCDS_ENABLE_HPSTAT key.
1413 static void statistics( stat& st )
1415 hp::smr::instance().statistics( st );
1418 /// Returns post-mortem statistics
1420 Post-mortem statistics is gathered in the \p %HP object destructor
1421 and can be accessible after destructing the global \p %HP object.
1423 @note Internal statistics is available only if you compile
1424 \p libcds and your program with \p -DCDS_ENABLE_HPSTAT key.
1432 // Initialize HP SMR
1435 // deal with HP-based data structured
1439 // HP object destroyed
1440 // Get total post-mortem statistics
1441 cds::gc::HP::stat const& st = cds::gc::HP::postmortem_statistics();
1443 printf( "HP statistics:\n"
1444 "\tthread count = %llu\n"
1445 "\tguard allocated = %llu\n"
1446 "\tguard freed = %llu\n"
1447 "\tretired data count = %llu\n"
1448 "\tfree data count = %llu\n"
1449 "\tscan() call count = %llu\n"
1450 "\thelp_scan() call count = %llu\n",
1451 st.thread_rec_count,
1452 st.guard_allocated, st.guard_freed,
1453 st.retired_count, st.free_count,
1454 st.scan_count, st.help_scan_count
1461 static stat const& postmortem_statistics();
1464 }} // namespace cds::gc
1466 #endif // #ifndef CDSLIB_GC_HP_SMR_H