//@cond
struct guard_block: public cds::intrusive::FreeListImpl::node
{
- guard_block* next_; // next block in the thread list
+ atomics::atomic<guard_block*> next_block_; // next block in the thread list
guard_block()
- : next_( nullptr )
+ : next_block_( nullptr )
{}
guard* first()
//@cond
/// Per-thread hazard pointer storage
- class thread_hp_storage
+ class thread_hp_storage
{
friend class smr;
public:
thread_hp_storage( guard* arr, size_t nSize ) CDS_NOEXCEPT
: free_head_( arr )
- , extended_list_( nullptr )
, array_( arr )
, initial_capacity_( nSize )
# ifdef CDS_ENABLE_HPSTAT
{
// Initialize guards
new( arr ) guard[nSize];
+ extended_list_.store( nullptr, atomics::memory_order_release );
}
thread_hp_storage() = delete;
cur->clear();
// free all extended blocks
- hp_allocator& alloc = hp_allocator::instance();
- for ( guard_block* p = extended_list_; p; ) {
- guard_block* next = p->next_;
- alloc.free( p );
+ hp_allocator& a = hp_allocator::instance();
+ for ( guard_block* p = extended_list_.load( atomics::memory_order_relaxed ); p; ) {
+ guard_block* next = p->next_block_.load( atomics::memory_order_relaxed );
+ a.free( p );
p = next;
}
- extended_list_ = nullptr;
+ extended_list_.store( nullptr, atomics::memory_order_release );
}
void init()
{
- assert( extended_list_ == nullptr );
+ assert( extended_list_.load(atomics::memory_order_relaxed) == nullptr );
guard* p = array_;
for ( guard* pEnd = p + initial_capacity_ - 1; p != pEnd; ++p )
assert( free_head_ == nullptr );
guard_block* block = hp_allocator::instance().alloc();
- block->next_ = extended_list_;
- extended_list_ = block;
+ block->next_block_.store( extended_list_.load( atomics::memory_order_relaxed ), atomics::memory_order_release );
+ extended_list_.store( block, atomics::memory_order_release );
free_head_ = block->first();
CDS_HPSTAT( ++extend_call_count_ );
}
private:
guard* free_head_; ///< Head of free guard list
- guard_block* extended_list_; ///< Head of extended guard blocks allocated for the thread
+ atomics::atomic<guard_block*> extended_list_; ///< Head of extended guard blocks allocated for the thread
guard* const array_; ///< initial HP array
size_t const initial_capacity_; ///< Capacity of \p array_
# ifdef CDS_ENABLE_HPSTAT
{
assert( current_block_ != nullptr );
assert( current_block_->first() <= current_cell_ );
- assert( current_cell_ < current_block_->last() );
+ assert( current_cell_ < current_block_->last());
//assert( &p != current_cell_ );
*current_cell_ = p;
CDS_HPSTAT( ++retire_call_count_ );
- if ( ++current_cell_ == current_block_->last() ) {
+ if ( ++current_cell_ == current_block_->last()) {
// goto next block if exists
if ( current_block_->next_ ) {
current_block_ = current_block_->next_;
}
bool repush( retired_ptr* p ) CDS_NOEXCEPT
- {
+ {
bool ret = push( *p );
CDS_HPSTAT( --retire_call_count_ );
assert( ret );
{
assert( list_head_ != nullptr );
assert( current_block_ == list_tail_ );
- assert( current_cell_ == current_block_->last() );
+ assert( current_cell_ == current_block_->last());
retired_block* block = retired_allocator::instance().alloc();
assert( block->next_ == nullptr );
- list_tail_ = list_tail_->next_ = block;
+ current_block_ = list_tail_ = list_tail_->next_ = block;
current_cell_ = block->first();
++block_count_;
CDS_HPSTAT( ++extend_call_count_ );
free_count =
scan_count =
help_scan_count =
- thread_rec_count =
- hp_block_count =
- retired_block_count =
- hp_extend_count =
+ thread_rec_count =
+ hp_block_count =
+ retired_block_count =
+ hp_extend_count =
retired_extend_count = 0;
}
};
assert( instance_ != nullptr );
# else
if ( !instance_ )
- CDS_THROW_EXCEPTION( not_initialized() );
+ CDS_THROW_EXCEPTION( not_initialized());
# endif
return *instance_;
}
%DHP is an adaptive variant of classic \p cds::gc::HP, see @ref cds_garbage_collectors_comparison "Compare HP implementation"
+ @note Internally, %DHP depends on free-list implementation. There are
+ DCAS-based free-list \p cds::intrusive::TaggedFreeList and more complicated CAS-based free-list
+ \p cds::intrusive::FreeList. For x86 architecture and GCC/clang, libcds selects appropriate free-list
+ based on \p -mcx16 compiler flag. You may manually disable DCAS support specifying
+ \p -DCDS_DISABLE_128BIT_ATOMIC for 64bit build or \p -DCDS_DISABLE_64BIT_ATOMIC for 32bit build
+ in compiler command line. All your projects and libcds MUST be compiled with the same flags -
+ either with DCAS support or without it.
+ For MS VC++ compiler DCAS is not supported.
+
See \ref cds_how_to_use "How to use" section for details how to apply SMR.
*/
class DHP
public:
/// Default ctor allocates a guard (hazard pointer) from thread-private storage
Guard() CDS_NOEXCEPT
- : guard_( dhp::smr::tls()->hazards_.alloc() )
+ : guard_( dhp::smr::tls()->hazards_.alloc())
{}
/// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
template <typename T>
T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
{
- assert( nIndex < capacity() );
+ assert( nIndex < capacity());
T pRet;
do {
template <typename T, class Func>
T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
{
- assert( nIndex < capacity() );
+ assert( nIndex < capacity());
T pRet;
do {
template <typename T>
T * assign( size_t nIndex, T * p )
{
- assert( nIndex < capacity() );
+ assert( nIndex < capacity());
guards_.set( nIndex, p );
dhp::smr::tls()->sync();
template <typename T>
T * get( size_t nIndex ) const
{
- assert( nIndex < capacity() );
+ assert( nIndex < capacity());
return guards_[nIndex]->template get_as<T>();
}
/// Get native guarded pointer stored
guarded_pointer get_native( size_t nIndex ) const
{
- assert( nIndex < capacity() );
+ assert( nIndex < capacity());
return guards_[nIndex]->get();
}
value_type * operator ->() const CDS_NOEXCEPT
{
assert( !empty());
- return value_cast()( guard_->get_as<guarded_type>() );
+ return value_cast()( guard_->get_as<guarded_type>());
}
/// Returns a reference to guarded value
value_type& operator *() CDS_NOEXCEPT
{
assert( !empty());
- return *value_cast()( guard_->get_as<guarded_type>() );
+ return *value_cast()( guard_->get_as<guarded_type>());
}
/// Returns const reference to guarded value
\p func is a disposer: when \p p can be safely removed, \p func is called.
*/
template <typename T>
- static void retire( T * p, void (* func)(T *))
+ static void retire( T * p, void (* func)(void *))
{
dhp::thread_data* rec = dhp::smr::tls();
- if ( !rec->retired_.push( dhp::retired_ptr( p, func ) ) )
+ if ( !rec->retired_.push( dhp::retired_ptr( p, func )))
dhp::smr::instance().scan( rec );
}
\endcode
*/
template <class Disposer, typename T>
- static void retire( T * p )
+ static void retire( T* p )
{
if ( !dhp::smr::tls()->retired_.push( dhp::retired_ptr( p, cds::details::static_functor<Disposer, T>::call )))
scan();
*/
static void scan()
{
- dhp::smr::instance().scan( dhp::smr::tls() );
+ dhp::smr::instance().scan( dhp::smr::tls());
}
/// Synonym for \p scan()