X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=cds%2Fgc%2Fdhp.h;h=5c4c45e170f20e6711a95373550e89a7528b780a;hb=330e267919eb4991e43ba7daec8ade0b5e0a577c;hp=c854b91aabf1b2aa1ab63315984a368c09aa8487;hpb=9c5aae5c490d06a8fbc367dd9878b8af38325ca1;p=libcds.git diff --git a/cds/gc/dhp.h b/cds/gc/dhp.h index c854b91a..5c4c45e1 100644 --- a/cds/gc/dhp.h +++ b/cds/gc/dhp.h @@ -1,16 +1,1375 @@ -//$$CDS-header$$ +/* + This file is a part of libcds - Concurrent Data Structures library -#ifndef __CDS_GC_DHP_H -#define __CDS_GC_DHP_H + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 -#include -#include + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_GC_DHP_SMR_H +#define CDSLIB_GC_DHP_SMR_H + +#include +#include #include +#include +#include +#include +#include +#include +#include -//@cond namespace cds { namespace gc { - typedef PTB DHP; + + /// Dynamic (adaptive) Hazard Pointer implementation details + namespace dhp { + using namespace cds::gc::hp::common; + + /// Exception "Dynamic Hazard Pointer SMR is not initialized" + class not_initialized: public std::runtime_error + { + public: + //@cond + not_initialized() + : std::runtime_error( "Global DHP SMR object is not initialized" ) + {} + //@endcond + }; + + //@cond + struct guard_block: public cds::intrusive::FreeListImpl::node + { + guard_block* next_; // next block in the thread list + + guard_block() + : next_( nullptr ) + {} + + guard* first() + { + return reinterpret_cast( this + 1 ); + } + }; + //@endcond + + //@cond + /// \p guard_block allocator (global object) + class hp_allocator + { + friend class smr; + public: + static hp_allocator& instance(); + + CDS_EXPORT_API guard_block* alloc(); + void free( guard_block* block ) + { + free_list_.put( block ); + } + + private: + hp_allocator() + {} + CDS_EXPORT_API ~hp_allocator(); + + private: + cds::intrusive::FreeListImpl free_list_; ///< list of free \p guard_block + }; + //@endcond + + //@cond + /// Per-thread hazard pointer storage + class thread_hp_storage + { + friend class smr; + public: + thread_hp_storage( guard* arr, size_t nSize ) CDS_NOEXCEPT + : free_head_( arr ) + , extended_list_( nullptr ) + , array_( arr ) + , initial_capacity_( nSize ) + { + // Initialize guards + new( arr ) guard[nSize]; + } + + thread_hp_storage() = delete; + thread_hp_storage( thread_hp_storage const& ) = delete; + thread_hp_storage( thread_hp_storage&& ) = delete; + + ~thread_hp_storage() + { + clear(); + } + + guard* alloc() + { + if ( cds_unlikely( free_head_ == nullptr )) { + extend(); + assert( free_head_ != nullptr ); + } + + guard* g = free_head_; + free_head_ = g->next_; + return g; + } + + void free( guard* g ) CDS_NOEXCEPT + { + if ( g ) { + g->clear(); + g->next_ = free_head_; + free_head_ = g; + } + } + + template< size_t Capacity> + size_t alloc( guard_array& arr ) + { + for ( size_t i = 0; i < Capacity; ++i ) { + if ( cds_unlikely( free_head_ == nullptr )) + extend(); + arr.reset( i, free_head_ ); + free_head_ = free_head_->next_; + } + return Capacity; + } + + template + void free( guard_array& arr ) CDS_NOEXCEPT + { + guard* gList = free_head_; + for ( size_t i = 0; i < Capacity; ++i ) { + guard* g = arr[i]; + if ( g ) { + g->clear(); + g->next_ = gList; + gList = g; + } + } + free_head_ = gList; + } + + void clear() + { + // clear array_ + for ( guard* cur = array_, *last = array_ + initial_capacity_; cur < last; ++cur ) + cur->clear(); + + // free all extended blocks + hp_allocator& alloc = hp_allocator::instance(); + for ( guard_block* p = extended_list_; p; ) { + guard_block* next = p->next_; + alloc.free( p ); + p = next; + } + + extended_list_ = nullptr; + } + + void init() + { + assert( extended_list_ == nullptr ); + + guard* p = array_; + for ( guard* pEnd = p + initial_capacity_ - 1; p != pEnd; ++p ) + p->next_ = p + 1; + p->next_ = nullptr; + free_head_ = array_; + } + + private: + void extend() + { + assert( free_head_ == nullptr ); + + guard_block* block = hp_allocator::instance().alloc(); + block->next_ = extended_list_; + extended_list_ = block; + free_head_ = block->first(); + } + + private: + guard* free_head_; ///< Head of free guard list + guard_block* extended_list_; ///< Head of extended guard blocks allocated for the thread + guard* const array_; ///< initial HP array + size_t const initial_capacity_; ///< Capacity of \p array_ + }; + //@endcond + + //@cond + struct retired_block: public cds::intrusive::FreeListImpl::node + { + retired_block* next_; ///< Next block in thread-private retired array + + static size_t const c_capacity = 256; + + retired_block() + : next_( nullptr ) + {} + + retired_ptr* first() const + { + return reinterpret_cast( const_cast( this ) + 1 ); + } + + retired_ptr* last() const + { + return first() + c_capacity; + } + }; + //@endcond + + //@cond + class retired_allocator + { + friend class smr; + public: + static retired_allocator& instance(); + + CDS_EXPORT_API retired_block* alloc(); + void free( retired_block* block ) + { + block->next_ = nullptr; + free_list_.put( block ); + } + + private: + retired_allocator() + {} + CDS_EXPORT_API ~retired_allocator(); + + private: + cds::intrusive::FreeListImpl free_list_; ///< list of free \p guard_block + }; + //@endcond + + //@cond + /// Per-thread retired array + class retired_array + { + friend class smr; + public: + retired_array() CDS_NOEXCEPT + : current_block_( nullptr ) + , current_cell_( nullptr ) + , list_head_( nullptr ) + , list_tail_( nullptr ) + , block_count_(0) + {} + + retired_array( retired_array const& ) = delete; + retired_array( retired_array&& ) = delete; + + ~retired_array() + { + assert( empty()); + fini(); + } + + bool push( retired_ptr const& p ) CDS_NOEXCEPT + { + assert( current_block_ != nullptr ); + assert( current_block_->first() <= current_cell_ ); + assert( current_cell_ < current_block_->last() ); + //assert( &p != current_cell_ ); + + *current_cell_ = p; + if ( ++current_cell_ == current_block_->last() ) { + // goto next block if exists + if ( current_block_->next_ ) { + current_block_ = current_block_->next_; + current_cell_ = current_block_->first(); + return true; + } + + // no free block + // smr::scan() extend retired_array if needed + return false; + } + + return true; + } + + bool safe_push( retired_ptr* p ) CDS_NOEXCEPT + { + bool ret = push( *p ); + assert( ret ); + return ret; + } + + private: // called by smr + void init() + { + if ( list_head_ == nullptr ) { + retired_block* block = retired_allocator::instance().alloc(); + assert( block->next_ == nullptr ); + + current_block_ = + list_head_ = + list_tail_ = block; + current_cell_ = block->first(); + + block_count_ = 1; + } + } + + void fini() + { + retired_allocator& alloc = retired_allocator::instance(); + for ( retired_block* p = list_head_; p; ) { + retired_block* next = p->next_; + alloc.free( p ); + p = next; + } + + current_block_ = + list_head_ = + list_tail_ = nullptr; + current_cell_ = nullptr; + + block_count_ = 0; + } + + void extend() + { + assert( list_head_ != nullptr ); + assert( current_block_ == list_tail_ ); + assert( current_cell_ == current_block_->last() ); + + retired_block* block = retired_allocator::instance().alloc(); + assert( block->next_ == nullptr ); + + list_tail_ = list_tail_->next_ = block; + current_cell_ = block->first(); + ++block_count_; + } + + bool empty() const + { + return current_block_ == nullptr + || ( current_block_ == list_head_ && current_cell_ == current_block_->first()); + } + + private: + retired_block* current_block_; + retired_ptr* current_cell_; // in current_block_ + + retired_block* list_head_; + retired_block* list_tail_; + size_t block_count_; + }; + //@endcond + + //@cond + /// Per-thread data + struct thread_data { + thread_hp_storage hazards_; ///< Hazard pointers private to the thread + retired_array retired_; ///< Retired data private to the thread + + char pad1_[cds::c_nCacheLineSize]; + atomics::atomic sync_; ///< dummy var to introduce synchronizes-with relationship between threads + char pad2_[cds::c_nCacheLineSize]; + + // CppCheck warn: pad1_ and pad2_ is uninitialized in ctor + // cppcheck-suppress uninitMemberVar + thread_data( guard* guards, size_t guard_count ) + : hazards_( guards, guard_count ) + , sync_( 0 ) + {} + + thread_data() = delete; + thread_data( thread_data const& ) = delete; + thread_data( thread_data&& ) = delete; + + void sync() + { + sync_.fetch_add( 1, atomics::memory_order_acq_rel ); + } + }; + //@endcond + + //@cond + // Dynmic (adaptive) Hazard Pointer SMR (Safe Memory Reclamation) + class smr + { + struct thread_record; + + public: + /// Returns the instance of Hazard Pointer \ref smr + static smr& instance() + { +# ifdef CDS_DISABLE_SMR_EXCEPTION + assert( instance_ != nullptr ); +# else + if ( !instance_ ) + CDS_THROW_EXCEPTION( not_initialized() ); +# endif + return *instance_; + } + + /// Creates Dynamic Hazard Pointer SMR singleton + /** + Dynamic Hazard Pointer SMR is a singleton. If DHP instance is not initialized then the function creates the instance. + Otherwise it does nothing. + + The Michael's HP reclamation schema depends of three parameters: + - \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from + the data structure algorithms. By default, if \p nHazardPtrCount = 0, + the function uses maximum of HP count for CDS library + - \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100. + - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than + nHazardPtrCount * nMaxThreadCount + Default is 2 * nHazardPtrCount * nMaxThreadCount + */ + static CDS_EXPORT_API void construct( + size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread + ); + + // for back-copatibility + static void Construct( + size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread + ) + { + construct( nInitialHazardPtrCount ); + } + + /// Destroys global instance of \ref smr + /** + The parameter \p bDetachAll should be used carefully: if its value is \p true, + then the object destroyed automatically detaches all attached threads. This feature + can be useful when you have no control over the thread termination, for example, + when \p libcds is injected into existing external thread. + */ + static CDS_EXPORT_API void destruct( + bool bDetachAll = false ///< Detach all threads + ); + + // for back-compatibility + static void Destruct( + bool bDetachAll = false ///< Detach all threads + ) + { + destruct( bDetachAll ); + } + + /// Checks if global SMR object is constructed and may be used + static bool isUsed() CDS_NOEXCEPT + { + return instance_ != nullptr; + } + + /// Set memory management functions + /** + @note This function may be called BEFORE creating an instance + of Dynamic Hazard Pointer SMR + + SMR object allocates some memory for thread-specific data and for + creating SMR object. + By default, a standard \p new and \p delete operators are used for this. + */ + static CDS_EXPORT_API void set_memory_allocator( + void* ( *alloc_func )( size_t size ), + void( *free_func )( void * p ) + ); + + /// Returns thread-local data for the current thread + static CDS_EXPORT_API thread_data* tls(); + + static CDS_EXPORT_API void attach_thread(); + static CDS_EXPORT_API void detach_thread(); + + public: // for internal use only + /// The main garbage collecting function + CDS_EXPORT_API void scan( thread_data* pRec ); + + /// Helper scan routine + /** + The function guarantees that every node that is eligible for reuse is eventually freed, barring + thread failures. To do so, after executing \p scan(), a thread executes a \p %help_scan(), + where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers + to thread's list of reclaimed pointers. + + The function is called internally by \p scan(). + */ + CDS_EXPORT_API void help_scan( thread_data* pThis ); + + hp_allocator& get_hp_allocator() + { + return hp_allocator_; + } + + retired_allocator& get_retired_allocator() + { + return retired_allocator_; + } + + private: + CDS_EXPORT_API explicit smr( + size_t nInitialHazardPtrCount + ); + + CDS_EXPORT_API ~smr(); + + CDS_EXPORT_API void detach_all_thread(); + + private: + CDS_EXPORT_API thread_record* create_thread_data(); + static CDS_EXPORT_API void destroy_thread_data( thread_record* pRec ); + + /// Allocates Hazard Pointer SMR thread private data + CDS_EXPORT_API thread_record* alloc_thread_data(); + + /// Free HP SMR thread-private data + CDS_EXPORT_API void free_thread_data( thread_record* pRec ); + + private: + static CDS_EXPORT_API smr* instance_; + + atomics::atomic< thread_record*> thread_list_; ///< Head of thread list + size_t const initial_hazard_count_; ///< initial number of hazard pointers per thread + hp_allocator hp_allocator_; + retired_allocator retired_allocator_; + + // temporaries + std::atomic last_plist_size_; ///< HP array size in last scan() call + }; + //@endcond + + //@cond + // for backward compatibility + typedef smr GarbageCollector; + + + // inlines + inline hp_allocator& hp_allocator::instance() + { + return smr::instance().get_hp_allocator(); + } + + inline retired_allocator& retired_allocator::instance() + { + return smr::instance().get_retired_allocator(); + } + //@endcond + + } // namespace dhp + + + /// Dynamic (adaptie) Hazard Pointer SMR + /** @ingroup cds_garbage_collector + + Implementation of Dynamic (adaptive) Hazard Pointer SMR + + Sources: + - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes" + - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects" + - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers" + + %DHP is an adaptive variant of classic \p cds::gc::HP, see @ref cds_garbage_collectors_comparison "Compare HP implementation" + + See \ref cds_how_to_use "How to use" section for details how to apply SMR. + */ + class DHP + { + public: + /// Native guarded pointer type + typedef void* guarded_pointer; + + /// Atomic reference + template using atomic_ref = atomics::atomic; + + /// Atomic type + /** + @headerfile cds/gc/dhp.h + */ + template using atomic_type = atomics::atomic; + + /// Atomic marked pointer + template using atomic_marked_ptr = atomics::atomic; + + + /// Dynamic Hazard Pointer guard + /** + A guard is a hazard pointer. + Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer + + \p %Guard object is movable but not copyable. + + The guard object can be in two states: + - unlinked - the guard is not linked with any internal hazard pointer. + In this state no operation except \p link() and move assignment is supported. + - linked (default) - the guard allocates an internal hazard pointer and fully operable. + + Due to performance reason the implementation does not check state of the guard in runtime. + + @warning Move assignment can transfer the guard in unlinked state, use with care. + */ + class Guard + { + public: + /// Default ctor allocates a guard (hazard pointer) from thread-private storage + Guard() CDS_NOEXCEPT + : guard_( dhp::smr::tls()->hazards_.alloc() ) + {} + + /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support + explicit Guard( std::nullptr_t ) CDS_NOEXCEPT + : guard_( nullptr ) + {} + + /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership) + Guard( Guard&& src ) CDS_NOEXCEPT + : guard_( src.guard_ ) + { + src.guard_ = nullptr; + } + + /// Move assignment: the internal guards are swapped between \p src and \p this + /** + @warning \p src will become in unlinked state if \p this was unlinked on entry. + */ + Guard& operator=( Guard&& src ) CDS_NOEXCEPT + { + std::swap( guard_, src.guard_ ); + return *this; + } + + /// Copy ctor is prohibited - the guard is not copyable + Guard( Guard const& ) = delete; + + /// Copy assignment is prohibited + Guard& operator=( Guard const& ) = delete; + + /// Frees the internal hazard pointer if the guard is in linked state + ~Guard() + { + unlink(); + } + + /// Checks if the guard object linked with any internal hazard pointer + bool is_linked() const + { + return guard_ != nullptr; + } + + /// Links the guard with internal hazard pointer if the guard is in unlinked state + void link() + { + if ( !guard_ ) + guard_ = dhp::smr::tls()->hazards_.alloc(); + } + + /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state + void unlink() + { + if ( guard_ ) { + dhp::smr::tls()->hazards_.free( guard_ ); + guard_ = nullptr; + } + } + + /// Protects a pointer of type atomic + /** + Return the value of \p toGuard + + The function tries to load \p toGuard and to store it + to the HP slot repeatedly until the guard's value equals \p toGuard + */ + template + T protect( atomics::atomic const& toGuard ) + { + assert( guard_ != nullptr ); + + T pCur = toGuard.load(atomics::memory_order_acquire); + T pRet; + do { + pRet = assign( pCur ); + pCur = toGuard.load(atomics::memory_order_acquire); + } while ( pRet != pCur ); + return pCur; + } + + /// Protects a converted pointer of type atomic + /** + Return the value of \p toGuard + + The function tries to load \p toGuard and to store result of \p f functor + to the HP slot repeatedly until the guard's value equals \p toGuard. + + The function is useful for intrusive containers when \p toGuard is a node pointer + that should be converted to a pointer to the value type before guarding. + The parameter \p f of type Func is a functor that makes this conversion: + \code + struct functor { + value_type * operator()( T * p ); + }; + \endcode + Really, the result of f( toGuard.load()) is assigned to the hazard pointer. + */ + template + T protect( atomics::atomic const& toGuard, Func f ) + { + assert( guard_ != nullptr ); + + T pCur = toGuard.load(atomics::memory_order_acquire); + T pRet; + do { + pRet = pCur; + assign( f( pCur )); + pCur = toGuard.load(atomics::memory_order_acquire); + } while ( pRet != pCur ); + return pCur; + } + + /// Store \p p to the guard + /** + The function is just an assignment, no loop is performed. + Can be used for a pointer that cannot be changed concurrently + or for already guarded pointer. + */ + template + T* assign( T* p ) + { + assert( guard_ != nullptr ); + + guard_->set( p ); + dhp::smr::tls()->sync(); + return p; + } + + //@cond + std::nullptr_t assign( std::nullptr_t ) + { + assert( guard_ != nullptr ); + + clear(); + return nullptr; + } + //@endcond + + /// Store marked pointer \p p to the guard + /** + The function is just an assignment of p.ptr(), no loop is performed. + Can be used for a marked pointer that cannot be changed concurrently + or for already guarded pointer. + */ + template + T* assign( cds::details::marked_ptr p ) + { + return assign( p.ptr()); + } + + /// Copy from \p src guard to \p this guard + void copy( Guard const& src ) + { + assign( src.get_native()); + } + + /// Clears value of the guard + void clear() + { + assert( guard_ != nullptr ); + + guard_->clear(); + } + + /// Gets the value currently protected (relaxed read) + template + T * get() const + { + assert( guard_ != nullptr ); + return guard_->get_as(); + } + + /// Gets native guarded pointer stored + void* get_native() const + { + assert( guard_ != nullptr ); + return guard_->get(); + } + + //@cond + dhp::guard* release() + { + dhp::guard* g = guard_; + guard_ = nullptr; + return g; + } + + dhp::guard*& guard_ref() + { + return guard_; + } + //@endcond + + private: + //@cond + dhp::guard* guard_; + //@endcond + }; + + /// Array of Dynamic Hazard Pointer guards + /** + The class is intended for allocating an array of hazard pointer guards. + Template parameter \p Count defines the size of the array. + + A \p %GuardArray object is not copy- and move-constructible + and not copy- and move-assignable. + */ + template + class GuardArray + { + public: + /// Rebind array for other size \p OtherCount + template + struct rebind { + typedef GuardArray other ; ///< rebinding result + }; + + /// Array capacity + static CDS_CONSTEXPR const size_t c_nCapacity = Count; + + public: + /// Default ctor allocates \p Count hazard pointers + GuardArray() + { + dhp::smr::tls()->hazards_.alloc( guards_ ); + } + + /// Move ctor is prohibited + GuardArray( GuardArray&& ) = delete; + + /// Move assignment is prohibited + GuardArray& operator=( GuardArray&& ) = delete; + + /// Copy ctor is prohibited + GuardArray( GuardArray const& ) = delete; + + /// Copy assignment is prohibited + GuardArray& operator=( GuardArray const& ) = delete; + + /// Frees allocated hazard pointers + ~GuardArray() + { + dhp::smr::tls()->hazards_.free( guards_ ); + } + + /// Protects a pointer of type \p atomic + /** + Return the value of \p toGuard + + The function tries to load \p toGuard and to store it + to the slot \p nIndex repeatedly until the guard's value equals \p toGuard + */ + template + T protect( size_t nIndex, atomics::atomic const& toGuard ) + { + assert( nIndex < capacity() ); + + T pRet; + do { + pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire)); + } while ( pRet != toGuard.load(atomics::memory_order_relaxed)); + + return pRet; + } + + /// Protects a pointer of type \p atomic + /** + Return the value of \p toGuard + + The function tries to load \p toGuard and to store it + to the slot \p nIndex repeatedly until the guard's value equals \p toGuard + + The function is useful for intrusive containers when \p toGuard is a node pointer + that should be converted to a pointer to the value type before guarding. + The parameter \p f of type Func is a functor to make that conversion: + \code + struct functor { + value_type * operator()( T * p ); + }; + \endcode + Actually, the result of f( toGuard.load()) is assigned to the hazard pointer. + */ + template + T protect( size_t nIndex, atomics::atomic const& toGuard, Func f ) + { + assert( nIndex < capacity() ); + + T pRet; + do { + assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire))); + } while ( pRet != toGuard.load(atomics::memory_order_relaxed)); + + return pRet; + } + + /// Store \p p to the slot \p nIndex + /** + The function is just an assignment, no loop is performed. + */ + template + T * assign( size_t nIndex, T * p ) + { + assert( nIndex < capacity() ); + + guards_.set( nIndex, p ); + dhp::smr::tls()->sync(); + return p; + } + + /// Store marked pointer \p p to the guard + /** + The function is just an assignment of p.ptr(), no loop is performed. + Can be used for a marked pointer that cannot be changed concurrently + or for already guarded pointer. + */ + template + T * assign( size_t nIndex, cds::details::marked_ptr p ) + { + return assign( nIndex, p.ptr()); + } + + /// Copy guarded value from \p src guard to slot at index \p nIndex + void copy( size_t nIndex, Guard const& src ) + { + assign( nIndex, src.get_native()); + } + + /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex + void copy( size_t nDestIndex, size_t nSrcIndex ) + { + assign( nDestIndex, get_native( nSrcIndex )); + } + + /// Clear value of the slot \p nIndex + void clear( size_t nIndex ) + { + guards_.clear( nIndex ); + } + + /// Get current value of slot \p nIndex + template + T * get( size_t nIndex ) const + { + assert( nIndex < capacity() ); + return guards_[nIndex]->template get_as(); + } + + /// Get native guarded pointer stored + guarded_pointer get_native( size_t nIndex ) const + { + assert( nIndex < capacity() ); + return guards_[nIndex]->get(); + } + + //@cond + dhp::guard* release( size_t nIndex ) CDS_NOEXCEPT + { + return guards_.release( nIndex ); + } + //@endcond + + /// Capacity of the guard array + static CDS_CONSTEXPR size_t capacity() + { + return Count; + } + + private: + //@cond + dhp::guard_array guards_; + //@endcond + }; + + /// Guarded pointer + /** + A guarded pointer is a pair of a pointer and GC's guard. + Usually, it is used for returning a pointer to the item from an lock-free container. + The guard prevents the pointer to be early disposed (freed) by GC. + After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time. + + Template arguments: + - \p GuardedType - a type which the guard stores + - \p ValueType - a value type + - \p Cast - a functor for converting GuardedType* to ValueType*. Default is \p void (no casting). + + For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed. + In such case the \p %guarded_ptr is: + @code + typedef cds::gc::DHP::guarded_ptr< foo > intrusive_guarded_ptr; + @endcode + + For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed. + For example: + @code + struct foo { + int const key; + std::string value; + }; + + struct value_accessor { + std::string* operator()( foo* pFoo ) const + { + return &(pFoo->value); + } + }; + + // Guarded ptr + typedef cds::gc::DHP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr; + @endcode + + You don't need use this class directly. + All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor. + */ + template + class guarded_ptr + { + //@cond + struct trivial_cast { + ValueType * operator()( GuardedType * p ) const + { + return p; + } + }; + + template friend class guarded_ptr; + //@endcond + + public: + typedef GuardedType guarded_type; ///< Guarded type + typedef ValueType value_type; ///< Value type + + /// Functor for casting \p guarded_type to \p value_type + typedef typename std::conditional< std::is_same::value, trivial_cast, Cast >::type value_cast; + + public: + /// Creates empty guarded pointer + guarded_ptr() CDS_NOEXCEPT + : guard_( nullptr ) + {} + + //@cond + explicit guarded_ptr( dhp::guard* g ) CDS_NOEXCEPT + : guard_( g ) + {} + + /// Initializes guarded pointer with \p p + explicit guarded_ptr( guarded_type * p ) CDS_NOEXCEPT + : guard_( nullptr ) + { + reset( p ); + } + explicit guarded_ptr( std::nullptr_t ) CDS_NOEXCEPT + : guard_( nullptr ) + {} + //@endcond + + /// Move ctor + guarded_ptr( guarded_ptr&& gp ) CDS_NOEXCEPT + : guard_( gp.guard_ ) + { + gp.guard_ = nullptr; + } + + /// Move ctor + template + guarded_ptr( guarded_ptr&& gp ) CDS_NOEXCEPT + : guard_( gp.guard_ ) + { + gp.guard_ = nullptr; + } + + /// Ctor from \p Guard + explicit guarded_ptr( Guard&& g ) CDS_NOEXCEPT + : guard_( g.release()) + {} + + /// The guarded pointer is not copy-constructible + guarded_ptr( guarded_ptr const& gp ) = delete; + + /// Clears the guarded pointer + /** + \ref release is called if guarded pointer is not \ref empty + */ + ~guarded_ptr() CDS_NOEXCEPT + { + release(); + } + + /// Move-assignment operator + guarded_ptr& operator=( guarded_ptr&& gp ) CDS_NOEXCEPT + { + std::swap( guard_, gp.guard_ ); + return *this; + } + + /// Move-assignment from \p Guard + guarded_ptr& operator=( Guard&& g ) CDS_NOEXCEPT + { + std::swap( guard_, g.guard_ref()); + return *this; + } + + /// The guarded pointer is not copy-assignable + guarded_ptr& operator=(guarded_ptr const& gp) = delete; + + /// Returns a pointer to guarded value + value_type * operator ->() const CDS_NOEXCEPT + { + assert( !empty()); + return value_cast()( guard_->get_as() ); + } + + /// Returns a reference to guarded value + value_type& operator *() CDS_NOEXCEPT + { + assert( !empty()); + return *value_cast()( guard_->get_as() ); + } + + /// Returns const reference to guarded value + value_type const& operator *() const CDS_NOEXCEPT + { + assert( !empty()); + return *value_cast()(reinterpret_cast(guard_->get())); + } + + /// Checks if the guarded pointer is \p nullptr + bool empty() const CDS_NOEXCEPT + { + return guard_ == nullptr || guard_->get( atomics::memory_order_relaxed ) == nullptr; + } + + /// \p bool operator returns !empty() + explicit operator bool() const CDS_NOEXCEPT + { + return !empty(); + } + + /// Clears guarded pointer + /** + If the guarded pointer has been released, the pointer can be disposed (freed) at any time. + Dereferncing the guarded pointer after \p release() is dangerous. + */ + void release() CDS_NOEXCEPT + { + free_guard(); + } + + //@cond + // For internal use only!!! + void reset(guarded_type * p) CDS_NOEXCEPT + { + alloc_guard(); + assert( guard_ ); + guard_->set( p ); + } + + //@endcond + + private: + //@cond + void alloc_guard() + { + if ( !guard_ ) + guard_ = dhp::smr::tls()->hazards_.alloc(); + } + + void free_guard() + { + if ( guard_ ) { + dhp::smr::tls()->hazards_.free( guard_ ); + guard_ = nullptr; + } + } + //@endcond + + private: + //@cond + dhp::guard* guard_; + //@endcond + }; + + public: + /// Initializes %DHP memory manager singleton + /** + Constructor creates and initializes %DHP global object. + %DHP object should be created before using CDS data structure based on \p %cds::gc::DHP. Usually, + it is created in the beginning of \p main() function. + After creating of global object you may use CDS data structures based on \p %cds::gc::DHP. + + \p nInitialThreadGuardCount - initial count of guard allocated for each thread. + When a thread is initialized the GC allocates local guard pool for the thread from a common guard pool. + By perforce the local thread's guard pool is grown automatically from common pool. + When the thread terminated its guard pool is backed to common GC's pool. + */ + explicit DHP( + size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread + ) + { + dhp::smr::construct( nInitialHazardPtrCount ); + } + + /// Destroys %DHP memory manager + /** + The destructor destroys %DHP global object. After calling of this function you may \b NOT + use CDS data structures based on \p %cds::gc::DHP. + Usually, %DHP object is destroyed at the end of your \p main(). + */ + ~DHP() + { + dhp::GarbageCollector::destruct( true ); + } + + /// Checks if count of hazard pointer is no less than \p nCountNeeded + /** + The function always returns \p true since the guard count is unlimited for + \p %gc::DHP garbage collector. + */ + static CDS_CONSTEXPR bool check_available_guards( +#ifdef CDS_DOXYGEN_INVOKED + size_t nCountNeeded, +#else + size_t +#endif + ) + { + return true; + } + + /// Set memory management functions + /** + @note This function may be called BEFORE creating an instance + of Dynamic Hazard Pointer SMR + + SMR object allocates some memory for thread-specific data and for creating SMR object. + By default, a standard \p new and \p delete operators are used for this. + */ + static void set_memory_allocator( + void* ( *alloc_func )( size_t size ), ///< \p malloc() function + void( *free_func )( void * p ) ///< \p free() function + ) + { + dhp::smr::set_memory_allocator( alloc_func, free_func ); + } + + /// Retire pointer \p p with function \p pFunc + /** + The function places pointer \p p to array of pointers ready for removing. + (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it. + \p func is a disposer: when \p p can be safely removed, \p func is called. + */ + template + static void retire( T * p, void (* func)(T *)) + { + dhp::thread_data* rec = dhp::smr::tls(); + if ( !rec->retired_.push( dhp::retired_ptr( p, func ) ) ) + dhp::smr::instance().scan( rec ); + } + + /// Retire pointer \p p with functor of type \p Disposer + /** + The function places pointer \p p to array of pointers ready for removing. + (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it. + + Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is: + \code + template + struct disposer { + void operator()( T * p ) ; // disposing operator + }; + \endcode + Since the functor call can happen at any time after \p retire() call, additional restrictions are imposed to \p Disposer type: + - it should be stateless functor + - it should be default-constructible + - the result of functor call with argument \p p should not depend on where the functor will be called. + + \par Examples: + Operator \p delete functor: + \code + template + struct disposer { + void operator ()( T * p ) { + delete p; + } + }; + + // How to call HP::retire method + int * p = new int; + + // ... use p in lock-free manner + + cds::gc::DHP::retire( p ) ; // place p to retired pointer array of DHP SMR + \endcode + + Functor based on \p std::allocator : + \code + template > + struct disposer { + template + void operator()( T * p ) { + typedef typename Alloc::templare rebind::other alloc_t; + alloc_t a; + a.destroy( p ); + a.deallocate( p, 1 ); + } + }; + \endcode + */ + template + static void retire( T * p ) + { + if ( !dhp::smr::tls()->retired_.push( dhp::retired_ptr( p, cds::details::static_functor::call ))) + scan(); + } + + /// Checks if Dynamic Hazard Pointer GC is constructed and may be used + static bool isUsed() + { + return dhp::smr::isUsed(); + } + + /// Forced GC cycle call for current thread + /** + Usually, this function should not be called directly. + */ + static void scan() + { + dhp::smr::instance().scan( dhp::smr::tls() ); + } + + /// Synonym for \p scan() + static void force_dispose() + { + scan(); + } + }; + }} // namespace cds::gc -//@endcond -#endif // #ifndef __CDS_GC_DHP_H +#endif // #ifndef CDSLIB_GC_DHP_SMR_H + +