2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_GC_HP_SMR_H
32 #define CDSLIB_GC_HP_SMR_H
35 #include <cds/gc/details/hp_common.h>
36 #include <cds/details/lib.h>
37 #include <cds/threading/model.h>
38 #include <cds/details/throw_exception.h>
39 #include <cds/details/static_functor.h>
40 #include <cds/details/marked_ptr.h>
41 #include <cds/user_setup/cache_line.h>
44 @page cds_garbage_collectors_comparison Hazard Pointer SMR implementations
45 @ingroup cds_garbage_collector
51 <th>%cds::gc::DHP</th>
54 <td>Max number of guarded (hazard) pointers per thread</td>
55 <td>limited (specified at construction time)</td>
56 <td>unlimited (dynamically allocated when needed)</td>
59 <td>Max number of retired pointers<sup>1</sup></td>
60 <td>bounded, specified at construction time</td>
61 <td>bounded, adaptive, depends on current thread count and number of hazard pointer for each thread</td>
65 <td>bounded, upper bound is specified at construction time</td>
70 <sup>1</sup>Unbounded count of retired pointers means a possibility of memory exhaustion.
74 /// @defgroup cds_garbage_collector Garbage collectors
77 /// Different safe memory reclamation schemas (garbage collectors)
78 /** @ingroup cds_garbage_collector
80 This namespace specifies different safe memory reclamation (SMR) algorithms.
81 See \ref cds_garbage_collector "Garbage collectors"
89 namespace cds { namespace gc {
90 /// Hazard pointer implementation details
92 using namespace cds::gc::hp::common;
94 /// Exception "Not enough Hazard Pointer"
95 class not_enought_hazard_ptr: public std::length_error
99 not_enought_hazard_ptr()
100 : std::length_error( "Not enough Hazard Pointer" )
105 /// Exception "Hazard Pointer SMR is not initialized"
106 class not_initialized: public std::runtime_error
111 : std::runtime_error( "Global Hazard Pointer SMR object is not initialized" )
117 /// Per-thread hazard pointer storage
118 class thread_hp_storage {
120 thread_hp_storage( guard* arr, size_t nSize ) CDS_NOEXCEPT
124 # ifdef CDS_ENABLE_HPSTAT
125 , alloc_guard_count_(0)
126 , free_guard_count_(0)
130 new( arr ) guard[nSize];
132 for ( guard* pEnd = arr + nSize - 1; arr < pEnd; ++arr )
133 arr->next_ = arr + 1;
134 arr->next_ = nullptr;
137 thread_hp_storage() = delete;
138 thread_hp_storage( thread_hp_storage const& ) = delete;
139 thread_hp_storage( thread_hp_storage&& ) = delete;
141 size_t capacity() const CDS_NOEXCEPT
146 bool full() const CDS_NOEXCEPT
148 return free_head_ == nullptr;
153 # ifdef CDS_DISABLE_SMR_EXCEPTION
157 CDS_THROW_EXCEPTION( not_enought_hazard_ptr());
159 guard* g = free_head_;
160 free_head_ = g->next_;
161 CDS_HPSTAT( ++alloc_guard_count_ );
165 void free( guard* g ) CDS_NOEXCEPT
167 assert( g >= array_ && g < array_ + capacity() );
171 g->next_ = free_head_;
173 CDS_HPSTAT( ++free_guard_count_ );
177 template< size_t Capacity>
178 size_t alloc( guard_array<Capacity>& arr )
181 guard* g = free_head_;
182 for ( i = 0; i < Capacity && g; ++i ) {
187 # ifdef CDS_DISABLE_SMR_EXCEPTION
188 assert( i == Capacity );
191 CDS_THROW_EXCEPTION( not_enought_hazard_ptr());
194 CDS_HPSTAT( alloc_guard_count_ += Capacity );
198 template <size_t Capacity>
199 void free( guard_array<Capacity>& arr ) CDS_NOEXCEPT
201 guard* gList = free_head_;
202 for ( size_t i = 0; i < Capacity; ++i ) {
208 CDS_HPSTAT( ++free_guard_count_ );
214 // cppcheck-suppress functionConst
217 for ( guard* cur = array_, *last = array_ + capacity(); cur < last; ++cur )
221 guard& operator[]( size_t idx )
223 assert( idx < capacity() );
228 static size_t calc_array_size( size_t capacity )
230 return sizeof( guard ) * capacity;
234 guard* free_head_; ///< Head of free guard list
235 guard* const array_; ///< HP array
236 size_t const capacity_; ///< HP array capacity
237 # ifdef CDS_ENABLE_HPSTAT
239 size_t alloc_guard_count_;
240 size_t free_guard_count_;
246 /// Per-thread retired array
250 retired_array( retired_ptr* arr, size_t capacity ) CDS_NOEXCEPT
252 , last_( arr + capacity )
254 # ifdef CDS_ENABLE_HPSTAT
255 , retire_call_count_(0)
259 retired_array() = delete;
260 retired_array( retired_array const& ) = delete;
261 retired_array( retired_array&& ) = delete;
263 size_t capacity() const CDS_NOEXCEPT
265 return last_ - retired_;
268 size_t size() const CDS_NOEXCEPT
270 return current_.load(atomics::memory_order_relaxed) - retired_;
273 bool push( retired_ptr&& p ) CDS_NOEXCEPT
275 retired_ptr* cur = current_.load( atomics::memory_order_relaxed );
277 CDS_HPSTAT( ++retire_call_count_ );
278 current_.store( cur + 1, atomics::memory_order_relaxed );
279 return cur + 1 < last_;
282 retired_ptr* first() const CDS_NOEXCEPT
287 retired_ptr* last() const CDS_NOEXCEPT
289 return current_.load( atomics::memory_order_relaxed );
292 void reset( size_t nSize ) CDS_NOEXCEPT
294 current_.store( first() + nSize, atomics::memory_order_relaxed );
297 void interthread_clear()
299 current_.exchange( first(), atomics::memory_order_acq_rel );
302 bool full() const CDS_NOEXCEPT
304 return current_.load( atomics::memory_order_relaxed ) == last_;
307 static size_t calc_array_size( size_t capacity )
309 return sizeof( retired_ptr ) * capacity;
313 atomics::atomic<retired_ptr*> current_;
314 retired_ptr* const last_;
315 retired_ptr* const retired_;
316 # ifdef CDS_ENABLE_HPSTAT
318 size_t retire_call_count_;
323 /// Internal statistics
325 size_t guard_allocated; ///< Count of allocated HP guards
326 size_t guard_freed; ///< Count of freed HP guards
327 size_t retired_count; ///< Count of retired pointers
328 size_t free_count; ///< Count of free pointers
329 size_t scan_count; ///< Count of \p scan() call
330 size_t help_scan_count; ///< Count of \p help_scan() call
332 size_t thread_rec_count; ///< Count of thread records
340 /// Clears all counters
349 thread_rec_count = 0;
356 thread_hp_storage hazards_; ///< Hazard pointers private to the thread
357 retired_array retired_; ///< Retired data private to the thread
359 char pad1_[cds::c_nCacheLineSize];
360 atomics::atomic<unsigned int> sync_; ///< dummy var to introduce synchronizes-with relationship between threads
361 char pad2_[cds::c_nCacheLineSize];
363 # ifdef CDS_ENABLE_HPSTAT
364 // Internal statistics:
367 size_t help_scan_count_;
370 // CppCheck warn: pad1_ and pad2_ is uninitialized in ctor
371 // cppcheck-suppress uninitMemberVar
372 thread_data( guard* guards, size_t guard_count, retired_ptr* retired_arr, size_t retired_capacity )
373 : hazards_( guards, guard_count )
374 , retired_( retired_arr, retired_capacity )
376 # ifdef CDS_ENABLE_HPSTAT
379 , help_scan_count_(0)
383 thread_data() = delete;
384 thread_data( thread_data const& ) = delete;
385 thread_data( thread_data&& ) = delete;
389 sync_.fetch_add( 1, atomics::memory_order_acq_rel );
394 /// \p smr::scan() strategy
396 classic, ///< classic scan as described in Michael's works (see smr::classic_scan() )
397 inplace ///< inplace scan without allocation (see smr::inplace_scan() )
401 /// Hazard Pointer SMR (Safe Memory Reclamation)
404 struct thread_record;
407 /// Returns the instance of Hazard Pointer \ref smr
408 static smr& instance()
410 # ifdef CDS_DISABLE_SMR_EXCEPTION
411 assert( instance_ != nullptr );
414 CDS_THROW_EXCEPTION( not_initialized());
419 /// Creates Hazard Pointer SMR singleton
421 Hazard Pointer SMR is a singleton. If HP instance is not initialized then the function creates the instance.
422 Otherwise it does nothing.
424 The Michael's HP reclamation schema depends of three parameters:
425 - \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from
426 the data structure algorithms. By default, if \p nHazardPtrCount = 0,
427 the function uses maximum of HP count for CDS library
428 - \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100.
429 - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
430 <tt> nHazardPtrCount * nMaxThreadCount </tt>
431 Default is <tt>2 * nHazardPtrCount * nMaxThreadCount</tt>
433 static CDS_EXPORT_API void construct(
434 size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread
435 size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application
436 size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread
437 scan_type nScanType = inplace ///< Scan type (see \ref scan_type enum)
440 // for back-copatibility
441 static void Construct(
442 size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread
443 size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application
444 size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread
445 scan_type nScanType = inplace ///< Scan type (see \ref scan_type enum)
448 construct( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType );
451 /// Destroys global instance of \ref smr
453 The parameter \p bDetachAll should be used carefully: if its value is \p true,
454 then the object destroyed automatically detaches all attached threads. This feature
455 can be useful when you have no control over the thread termination, for example,
456 when \p libcds is injected into existing external thread.
458 static CDS_EXPORT_API void destruct(
459 bool bDetachAll = false ///< Detach all threads
462 // for back-compatibility
463 static void Destruct(
464 bool bDetachAll = false ///< Detach all threads
467 destruct( bDetachAll );
470 /// Checks if global SMR object is constructed and may be used
471 static bool isUsed() CDS_NOEXCEPT
473 return instance_ != nullptr;
476 /// Set memory management functions
478 @note This function may be called <b>BEFORE</b> creating an instance
479 of Hazard Pointer SMR
481 SMR object allocates some memory for thread-specific data and for
483 By default, a standard \p new and \p delete operators are used for this.
485 static CDS_EXPORT_API void set_memory_allocator(
486 void* ( *alloc_func )( size_t size ),
487 void (*free_func )( void * p )
490 /// Returns max Hazard Pointer count per thread
491 size_t get_hazard_ptr_count() const CDS_NOEXCEPT
493 return hazard_ptr_count_;
496 /// Returns max thread count
497 size_t get_max_thread_count() const CDS_NOEXCEPT
499 return max_thread_count_;
502 /// Returns max size of retired objects array
503 size_t get_max_retired_ptr_count() const CDS_NOEXCEPT
505 return max_retired_ptr_count_;
508 /// Get current scan strategy
509 scan_type get_scan_type() const
514 /// Checks that required hazard pointer count \p nRequiredCount is less or equal then max hazard pointer count
516 If <tt> nRequiredCount > get_hazard_ptr_count()</tt> then the exception \p not_enought_hazard_ptr is thrown
518 static void check_hazard_ptr_count( size_t nRequiredCount )
520 if ( instance().get_hazard_ptr_count() < nRequiredCount ) {
521 # ifdef CDS_DISABLE_SMR_EXCEPTION
522 assert( false ); // not enough hazard ptr
524 CDS_THROW_EXCEPTION( not_enought_hazard_ptr() );
529 /// Returns thread-local data for the current thread
530 static CDS_EXPORT_API thread_data* tls();
532 static CDS_EXPORT_API void attach_thread();
533 static CDS_EXPORT_API void detach_thread();
535 /// Get internal statistics
536 CDS_EXPORT_API void statistics( stat& st );
538 public: // for internal use only
539 /// The main garbage collecting function
541 This function is called internally when upper bound of thread's list of reclaimed pointers
544 There are the following scan algorithm:
545 - \ref hzp_gc_classic_scan "classic_scan" allocates memory for internal use
546 - \ref hzp_gc_inplace_scan "inplace_scan" does not allocate any memory
548 Use \p set_scan_type() member function to setup appropriate scan algorithm.
550 void scan( thread_data* pRec )
552 ( this->*scan_func_ )( pRec );
555 /// Helper scan routine
557 The function guarantees that every node that is eligible for reuse is eventually freed, barring
558 thread failures. To do so, after executing \p scan(), a thread executes a \p %help_scan(),
559 where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers
560 to thread's list of reclaimed pointers.
562 The function is called internally by \p scan().
564 CDS_EXPORT_API void help_scan( thread_data* pThis );
568 size_t nHazardPtrCount, ///< Hazard pointer count per thread
569 size_t nMaxThreadCount, ///< Max count of simultaneous working thread in your application
570 size_t nMaxRetiredPtrCount, ///< Capacity of the array of retired objects for the thread
571 scan_type nScanType ///< Scan type (see \ref scan_type enum)
574 CDS_EXPORT_API ~smr();
576 CDS_EXPORT_API void detach_all_thread();
578 /// Classic scan algorithm
579 /** @anchor hzp_gc_classic_scan
580 Classical scan algorithm as described in Michael's paper.
582 A scan includes four stages. The first stage involves scanning the array HP for non-null values.
583 Whenever a non-null value is encountered, it is inserted in a local list of currently protected pointer.
584 Only stage 1 accesses shared variables. The following stages operate only on private variables.
586 The second stage of a scan involves sorting local list of protected pointers to allow
587 binary search in the third stage.
589 The third stage of a scan involves checking each reclaimed node
590 against the pointers in local list of protected pointers. If the binary search yields
591 no match, the node is freed. Otherwise, it cannot be deleted now and must kept in thread's list
592 of reclaimed pointers.
594 The forth stage prepares new thread's private list of reclaimed pointers
595 that could not be freed during the current scan, where they remain until the next scan.
597 This algorithm allocates memory for internal HP array.
599 This function is called internally by ThreadGC object when upper bound of thread's list of reclaimed pointers
602 CDS_EXPORT_API void classic_scan( thread_data* pRec );
604 /// In-place scan algorithm
605 /** @anchor hzp_gc_inplace_scan
606 Unlike the \p classic_scan() algorithm, \p %inplace_scan() does not allocate any memory.
607 All operations are performed in-place.
609 CDS_EXPORT_API void inplace_scan( thread_data* pRec );
612 CDS_EXPORT_API thread_record* create_thread_data();
613 static CDS_EXPORT_API void destroy_thread_data( thread_record* pRec );
615 /// Allocates Hazard Pointer SMR thread private data
616 CDS_EXPORT_API thread_record* alloc_thread_data();
618 /// Free HP SMR thread-private data
619 CDS_EXPORT_API void free_thread_data( thread_record* pRec );
622 static CDS_EXPORT_API smr* instance_;
624 atomics::atomic< thread_record*> thread_list_; ///< Head of thread list
626 size_t const hazard_ptr_count_; ///< max count of thread's hazard pointer
627 size_t const max_thread_count_; ///< max count of thread
628 size_t const max_retired_ptr_count_; ///< max count of retired ptr per thread
629 scan_type const scan_type_; ///< scan type (see \ref scan_type enum)
630 void ( smr::*scan_func_ )( thread_data* pRec );
635 // for backward compatibility
636 typedef smr GarbageCollector;
641 /// Hazard Pointer SMR (Safe Memory Reclamation)
642 /** @ingroup cds_garbage_collector
644 Implementation of classic Hazard Pointer SMR
647 - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
648 - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
649 - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
651 Hazard Pointer SMR is a singleton. The main user-level part of Hazard Pointer schema is
652 \p %cds::gc::HP class and its nested classes. Before use any HP-related class you must initialize \p %HP
653 by contructing \p %cds::gc::HP object in beginning of your \p main().
654 See \ref cds_how_to_use "How to use" section for details how to apply SMR schema.
659 /// Native guarded pointer type
660 typedef hp::hazard_ptr guarded_pointer;
663 template <typename T> using atomic_ref = atomics::atomic<T *>;
665 /// Atomic marked pointer
666 template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
669 template <typename T> using atomic_type = atomics::atomic<T>;
671 /// Exception "Not enough Hazard Pointer"
672 typedef hp::not_enought_hazard_ptr not_enought_hazard_ptr_exception;
674 /// Internal statistics
675 typedef hp::stat stat;
677 /// Hazard Pointer guard
679 A guard is a hazard pointer.
680 Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer.
682 \p %Guard object is movable but not copyable.
684 The guard object can be in two states:
685 - unlinked - the guard is not linked with any internal hazard pointer.
686 In this state no operation except \p link() and move assignment is supported.
687 - linked (default) - the guard allocates an internal hazard pointer and completely operable.
689 Due to performance reason the implementation does not check state of the guard in runtime.
691 @warning Move assignment transfers the guard in unlinked state, use with care.
696 /// Default ctor allocates a guard (hazard pointer) from thread-private storage
698 @warning Can throw \p too_many_hazard_ptr_exception if internal hazard pointer objects are exhausted.
701 : guard_( hp::smr::tls()->hazards_.alloc() )
704 /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
705 explicit Guard( std::nullptr_t ) CDS_NOEXCEPT
709 /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership)
710 Guard( Guard&& src ) CDS_NOEXCEPT
711 : guard_( src.guard_ )
713 src.guard_ = nullptr;
716 /// Move assignment: the internal guards are swapped between \p src and \p this
718 @warning \p src will become in unlinked state if \p this was unlinked on entry.
720 Guard& operator=( Guard&& src ) CDS_NOEXCEPT
722 std::swap( guard_, src.guard_ );
726 /// Copy ctor is prohibited - the guard is not copyable
727 Guard( Guard const& ) = delete;
729 /// Copy assignment is prohibited
730 Guard& operator=( Guard const& ) = delete;
732 /// Frees the internal hazard pointer if the guard is in linked state
738 /// Checks if the guard object linked with any internal hazard pointer
739 bool is_linked() const
741 return guard_ != nullptr;
744 /// Links the guard with internal hazard pointer if the guard is in unlinked state
746 @warning Can throw \p not_enought_hazard_ptr_exception if internal hazard pointer array is exhausted.
751 guard_ = hp::smr::tls()->hazards_.alloc();
754 /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state
758 hp::smr::tls()->hazards_.free( guard_ );
763 /// Protects a pointer of type \p atomic<T*>
765 Return the value of \p toGuard
767 The function tries to load \p toGuard and to store it
768 to the HP slot repeatedly until the guard's value equals \p toGuard
770 @warning The guad object should be in linked state, otherwise the result is undefined
772 template <typename T>
773 T protect( atomics::atomic<T> const& toGuard )
775 assert( guard_ != nullptr );
777 T pCur = toGuard.load(atomics::memory_order_acquire);
780 pRet = assign( pCur );
781 pCur = toGuard.load(atomics::memory_order_acquire);
782 } while ( pRet != pCur );
786 /// Protects a converted pointer of type \p atomic<T*>
788 Return the value of \p toGuard
790 The function tries to load \p toGuard and to store result of \p f functor
791 to the HP slot repeatedly until the guard's value equals \p toGuard.
793 The function is useful for intrusive containers when \p toGuard is a node pointer
794 that should be converted to a pointer to the value before protecting.
795 The parameter \p f of type Func is a functor that makes this conversion:
798 value_type * operator()( T * p );
801 Actually, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
803 @warning The guad object should be in linked state, otherwise the result is undefined
805 template <typename T, class Func>
806 T protect( atomics::atomic<T> const& toGuard, Func f )
808 assert( guard_ != nullptr );
810 T pCur = toGuard.load(atomics::memory_order_acquire);
815 pCur = toGuard.load(atomics::memory_order_acquire);
816 } while ( pRet != pCur );
820 /// Store \p p to the guard
822 The function equals to a simple assignment the value \p p to guard, no loop is performed.
823 Can be used for a pointer that cannot be changed concurrently or if the pointer is already
824 guarded by another guard.
826 @warning The guad object should be in linked state, otherwise the result is undefined
828 template <typename T>
831 assert( guard_ != nullptr );
834 hp::smr::tls()->sync();
839 std::nullptr_t assign( std::nullptr_t )
841 assert( guard_ != nullptr );
848 /// Copy a value guarded from \p src guard to \p this guard (valid only in linked state)
849 void copy( Guard const& src )
851 assign( src.get_native());
854 /// Store marked pointer \p p to the guard
856 The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
857 Can be used for a marked pointer that cannot be changed concurrently or if the marked pointer
858 is already guarded by another guard.
860 @warning The guard object should be in linked state, otherwise the result is undefined
862 template <typename T, int BITMASK>
863 T * assign( cds::details::marked_ptr<T, BITMASK> p )
865 return assign( p.ptr());
868 /// Clear value of the guard (valid only in linked state)
874 /// Get the value currently protected (valid only in linked state)
875 template <typename T>
878 assert( guard_ != nullptr );
879 return guard_->get_as<T>();
882 /// Get native hazard pointer stored (valid only in linked state)
883 guarded_pointer get_native() const
885 assert( guard_ != nullptr );
886 return guard_->get();
892 hp::guard* g = guard_;
897 hp::guard*& guard_ref()
909 /// Array of Hazard Pointer guards
911 The class is intended for allocating an array of hazard pointer guards.
912 Template parameter \p Count defines the size of the array.
914 template <size_t Count>
918 /// Rebind array for other size \p Count2
919 template <size_t Count2>
921 typedef GuardArray<Count2> other; ///< rebinding result
925 static CDS_CONSTEXPR const size_t c_nCapacity = Count;
928 /// Default ctor allocates \p Count hazard pointers
931 hp::smr::tls()->hazards_.alloc( guards_ );
934 /// Move ctor is prohibited
935 GuardArray( GuardArray&& ) = delete;
937 /// Move assignment is prohibited
938 GuardArray& operator=( GuardArray&& ) = delete;
940 /// Copy ctor is prohibited
941 GuardArray( GuardArray const& ) = delete;
943 /// Copy assignment is prohibited
944 GuardArray& operator=( GuardArray const& ) = delete;
946 /// Frees allocated hazard pointers
949 hp::smr::tls()->hazards_.free( guards_ );
952 /// Protects a pointer of type \p atomic<T*>
954 Return the value of \p toGuard
956 The function tries to load \p toGuard and to store it
957 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
959 template <typename T>
960 T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
962 assert( nIndex < capacity());
966 pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire));
967 } while ( pRet != toGuard.load(atomics::memory_order_acquire));
972 /// Protects a pointer of type \p atomic<T*>
974 Return the value of \p toGuard
976 The function tries to load \p toGuard and to store it
977 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
979 The function is useful for intrusive containers when \p toGuard is a node pointer
980 that should be converted to a pointer to the value type before guarding.
981 The parameter \p f of type Func is a functor that makes this conversion:
984 value_type * operator()( T * p );
987 Really, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
989 template <typename T, class Func>
990 T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
992 assert( nIndex < capacity());
996 assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire)));
997 } while ( pRet != toGuard.load(atomics::memory_order_acquire));
1002 /// Store \p to the slot \p nIndex
1004 The function equals to a simple assignment, no loop is performed.
1006 template <typename T>
1007 T * assign( size_t nIndex, T * p )
1009 assert( nIndex < capacity() );
1011 guards_.set( nIndex, p );
1012 hp::smr::tls()->sync();
1016 /// Store marked pointer \p p to the guard
1018 The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
1019 Can be used for a marked pointer that cannot be changed concurrently.
1021 template <typename T, int BITMASK>
1022 T * assign( size_t nIndex, cds::details::marked_ptr<T, BITMASK> p )
1024 return assign( nIndex, p.ptr());
1027 /// Copy guarded value from \p src guard to slot at index \p nIndex
1028 void copy( size_t nIndex, Guard const& src )
1030 assign( nIndex, src.get_native());
1033 /// Copy guarded value from slot \p nSrcIndex to the slot \p nDestIndex
1034 void copy( size_t nDestIndex, size_t nSrcIndex )
1036 assign( nDestIndex, get_native( nSrcIndex ));
1039 /// Clear value of the slot \p nIndex
1040 void clear( size_t nIndex )
1042 guards_.clear( nIndex );
1045 /// Get current value of slot \p nIndex
1046 template <typename T>
1047 T * get( size_t nIndex ) const
1049 assert( nIndex < capacity() );
1050 return guards_[nIndex]->template get_as<T>();
1053 /// Get native hazard pointer stored
1054 guarded_pointer get_native( size_t nIndex ) const
1056 assert( nIndex < capacity());
1057 return guards_[nIndex]->get();
1061 hp::guard* release( size_t nIndex ) CDS_NOEXCEPT
1063 return guards_.release( nIndex );
1067 /// Capacity of the guard array
1068 static CDS_CONSTEXPR size_t capacity()
1075 hp::guard_array<c_nCapacity> guards_;
1081 A guarded pointer is a pair of a pointer and GC's guard.
1082 Usually, it is used for returning a pointer to an element of a lock-free container.
1083 The guard prevents the pointer to be early disposed (freed) by SMR.
1084 After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time.
1087 - \p GuardedType - a type which the guard stores
1088 - \p ValueType - a value type
1089 - \p Cast - a functor for converting <tt>GuardedType*</tt> to <tt>ValueType*</tt>. Default is \p void (no casting).
1091 For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed.
1092 In such case the \p %guarded_ptr is:
1094 typedef cds::gc::HP::guarded_ptr< foo > intrusive_guarded_ptr;
1097 For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed.
1105 struct value_accessor {
1106 std::string* operator()( foo* pFoo ) const
1108 return &(pFoo->value);
1113 typedef cds::gc::HP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr;
1116 You don't need use this class directly.
1117 All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor.
1119 template <typename GuardedType, typename ValueType=GuardedType, typename Cast=void >
1123 struct trivial_cast {
1124 ValueType * operator()( GuardedType * p ) const
1130 template <typename GT, typename VT, typename C> friend class guarded_ptr;
1134 typedef GuardedType guarded_type; ///< Guarded type
1135 typedef ValueType value_type; ///< Value type
1137 /// Functor for casting \p guarded_type to \p value_type
1138 typedef typename std::conditional< std::is_same<Cast, void>::value, trivial_cast, Cast >::type value_cast;
1141 /// Creates empty guarded pointer
1142 guarded_ptr() CDS_NOEXCEPT
1147 explicit guarded_ptr( hp::guard* g ) CDS_NOEXCEPT
1151 /// Initializes guarded pointer with \p p
1152 explicit guarded_ptr( guarded_type* p ) CDS_NOEXCEPT
1157 explicit guarded_ptr( std::nullptr_t ) CDS_NOEXCEPT
1163 guarded_ptr( guarded_ptr&& gp ) CDS_NOEXCEPT
1164 : guard_( gp.guard_ )
1166 gp.guard_ = nullptr;
1170 template <typename GT, typename VT, typename C>
1171 guarded_ptr( guarded_ptr<GT, VT, C>&& gp ) CDS_NOEXCEPT
1172 : guard_( gp.guard_ )
1174 gp.guard_ = nullptr;
1177 /// Ctor from \p Guard
1178 explicit guarded_ptr( Guard&& g ) CDS_NOEXCEPT
1179 : guard_( g.release())
1182 /// The guarded pointer is not copy-constructible
1183 guarded_ptr( guarded_ptr const& gp ) = delete;
1185 /// Clears the guarded pointer
1187 \ref release() is called if guarded pointer is not \ref empty()
1189 ~guarded_ptr() CDS_NOEXCEPT
1194 /// Move-assignment operator
1195 guarded_ptr& operator=( guarded_ptr&& gp ) CDS_NOEXCEPT
1197 std::swap( guard_, gp.guard_ );
1201 /// Move-assignment from \p Guard
1202 guarded_ptr& operator=( Guard&& g ) CDS_NOEXCEPT
1204 std::swap( guard_, g.guard_ref());
1208 /// The guarded pointer is not copy-assignable
1209 guarded_ptr& operator=(guarded_ptr const& gp) = delete;
1211 /// Returns a pointer to guarded value
1212 value_type * operator ->() const CDS_NOEXCEPT
1215 return value_cast()( guard_->get_as<guarded_type>());
1218 /// Returns a reference to guarded value
1219 value_type& operator *() CDS_NOEXCEPT
1222 return *value_cast()( guard_->get_as<guarded_type>());
1225 /// Returns const reference to guarded value
1226 value_type const& operator *() const CDS_NOEXCEPT
1229 return *value_cast()( guard_->get_as<guarded_type>());
1232 /// Checks if the guarded pointer is \p nullptr
1233 bool empty() const CDS_NOEXCEPT
1235 return !guard_ || guard_->get( atomics::memory_order_relaxed ) == nullptr;
1238 /// \p bool operator returns <tt>!empty()</tt>
1239 explicit operator bool() const CDS_NOEXCEPT
1244 /// Clears guarded pointer
1246 If the guarded pointer has been released, the pointer can be disposed (freed) at any time.
1247 Dereferncing the guarded pointer after \p release() is dangerous.
1249 void release() CDS_NOEXCEPT
1255 // For internal use only!!!
1256 void reset(guarded_type * p) CDS_NOEXCEPT
1269 guard_ = hp::smr::tls()->hazards_.alloc();
1275 hp::smr::tls()->hazards_.free( guard_ );
1289 enum class scan_type {
1290 classic = hp::classic, ///< classic scan as described in Michael's papers
1291 inplace = hp::inplace ///< inplace scan without allocation
1294 /// Initializes %HP singleton
1296 The constructor initializes Hazard Pointer SMR singleton with passed parameters.
1297 If the instance does not yet exist then the function creates the instance.
1298 Otherwise it does nothing.
1300 The Michael's %HP reclamation schema depends of three parameters:
1301 - \p nHazardPtrCount - hazard pointer count per thread. Usually it is small number (up to 10) depending from
1302 the data structure algorithms. If \p nHazardPtrCount = 0, the defaul value 8 is used
1303 - \p nMaxThreadCount - max count of thread with using Hazard Pointer GC in your application. Default is 100.
1304 - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
1305 <tt> nHazardPtrCount * nMaxThreadCount </tt>. Default is <tt>2 * nHazardPtrCount * nMaxThreadCount </tt>.
1308 size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread
1309 size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application
1310 size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread
1311 scan_type nScanType = scan_type::inplace ///< Scan type (see \p scan_type enum)
1317 nMaxRetiredPtrCount,
1318 static_cast<hp::scan_type>(nScanType)
1322 /// Terminates GC singleton
1324 The destructor destroys %HP global object. After calling of this function you may \b NOT
1325 use CDS data structures based on \p %cds::gc::HP.
1326 Usually, %HP object is destroyed at the end of your \p main().
1330 hp::smr::destruct( true );
1333 /// Checks that required hazard pointer count \p nCountNeeded is less or equal then max hazard pointer count
1335 If <tt> nRequiredCount > get_hazard_ptr_count()</tt> then the exception \p not_enought_hazard_ptr is thrown
1337 static void check_available_guards( size_t nCountNeeded )
1339 hp::smr::check_hazard_ptr_count( nCountNeeded );
1342 /// Set memory management functions
1344 @note This function may be called <b>BEFORE</b> creating an instance
1345 of Hazard Pointer SMR
1347 SMR object allocates some memory for thread-specific data and for
1348 creating SMR object.
1349 By default, a standard \p new and \p delete operators are used for this.
1351 static void set_memory_allocator(
1352 void* ( *alloc_func )( size_t size ), ///< \p malloc() function
1353 void( *free_func )( void * p ) ///< \p free() function
1356 hp::smr::set_memory_allocator( alloc_func, free_func );
1359 /// Returns max Hazard Pointer count
1360 static size_t max_hazard_count()
1362 return hp::smr::instance().get_hazard_ptr_count();
1365 /// Returns max count of thread
1366 static size_t max_thread_count()
1368 return hp::smr::instance().get_max_thread_count();
1371 /// Returns capacity of retired pointer array
1372 static size_t retired_array_capacity()
1374 return hp::smr::instance().get_max_retired_ptr_count();
1377 /// Retire pointer \p p with function \p func
1379 The function places pointer \p p to array of pointers ready for removing.
1380 (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1381 \p func is a disposer: when \p p can be safely removed, \p func is called.
1383 template <typename T>
1384 static void retire( T * p, void( *func )( void * ))
1386 hp::thread_data* rec = hp::smr::tls();
1387 if ( !rec->retired_.push( hp::retired_ptr( p, func )))
1388 hp::smr::instance().scan( rec );
1391 /// Retire pointer \p p with functor of type \p Disposer
1393 The function places pointer \p p to array of pointers ready for removing.
1394 (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1396 Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is:
1398 template <typename T>
1400 void operator()( T * p ) ; // disposing operator
1403 Since the functor call can happen at any time after \p retire() call, additional restrictions are imposed to \p Disposer type:
1404 - it should be stateless functor
1405 - it should be default-constructible
1406 - the result of functor call with argument \p p should not depend on where the functor will be called.
1409 Operator \p delete functor:
1411 template <typename T>
1413 void operator ()( T * p ) {
1418 // How to call HP::retire method
1421 // ... use p in lock-free manner
1423 cds::gc::HP::retire<disposer>( p ) ; // place p to retired pointer array of HP GC
1426 Functor based on \p std::allocator :
1428 template <typename Alloc = std::allocator<int> >
1430 template <typename T>
1431 void operator()( T * p ) {
1432 typedef typename Alloc::templare rebind<T>::other alloc_t;
1435 a.deallocate( p, 1 );
1440 template <class Disposer, typename T>
1441 static void retire( T * p )
1443 if ( !hp::smr::tls()->retired_.push( hp::retired_ptr( p, cds::details::static_functor<Disposer, T>::call )))
1447 /// Get current scan strategy
1448 static scan_type getScanType()
1450 return static_cast<scan_type>( hp::smr::instance().get_scan_type());
1453 /// Checks if Hazard Pointer GC is constructed and may be used
1454 static bool isUsed()
1456 return hp::smr::isUsed();
1459 /// Forces SMR call for current thread
1461 Usually, this function should not be called directly.
1465 hp::smr::instance().scan( hp::smr::tls());
1468 /// Synonym for \p scan()
1469 static void force_dispose()
1474 /// Returns internal statistics
1476 The function clears \p st before gathering statistics.
1478 @note Internal statistics is available only if you compile
1479 \p libcds and your program with \p -DCDS_ENABLE_HPSTAT.
1481 static void statistics( stat& st )
1483 hp::smr::instance().statistics( st );
1486 /// Returns post-mortem statistics
1488 Post-mortem statistics is gathered in the \p %HP object destructor
1489 and can be accessible after destructing the global \p %HP object.
1491 @note Internal statistics is available only if you compile
1492 \p libcds and your program with \p -DCDS_ENABLE_HPSTAT.
1500 // Initialize HP SMR
1503 // deal with HP-based data structured
1507 // HP object destroyed
1508 // Get total post-mortem statistics
1509 cds::gc::HP::stat const& st = cds::gc::HP::postmortem_statistics();
1511 printf( "HP statistics:\n"
1512 " thread count = %llu\n"
1513 " guard allocated = %llu\n"
1514 " guard freed = %llu\n"
1515 " retired data count = %llu\n"
1516 " free data count = %llu\n"
1517 " scan() call count = %llu\n"
1518 " help_scan() call count = %llu\n",
1519 st.thread_rec_count,
1520 st.guard_allocated, st.guard_freed,
1521 st.retired_count, st.free_count,
1522 st.scan_count, st.help_scan_count
1529 CDS_EXPORT_API static stat const& postmortem_statistics();
1532 }} // namespace cds::gc
1534 #endif // #ifndef CDSLIB_GC_HP_SMR_H