//@cond
struct guard_block: public cds::intrusive::FreeListImpl::node
{
- guard_block* next_; // next block in the thread list
+ atomics::atomic<guard_block*> next_block_; // next block in the thread list
guard_block()
- : next_( nullptr )
+ : next_block_( nullptr )
{}
guard* first()
private:
hp_allocator()
+#ifdef CDS_ENABLE_HPSTAT
+ : block_allocated_(0)
+#endif
{}
CDS_EXPORT_API ~hp_allocator();
private:
cds::intrusive::FreeListImpl free_list_; ///< list of free \p guard_block
+#ifdef CDS_ENABLE_HPSTAT
+ public:
+ atomics::atomic<size_t> block_allocated_; ///< count of allocated blocks
+#endif
};
//@endcond
//@cond
/// Per-thread hazard pointer storage
- class thread_hp_storage
+ class thread_hp_storage
{
friend class smr;
public:
thread_hp_storage( guard* arr, size_t nSize ) CDS_NOEXCEPT
: free_head_( arr )
- , extended_list_( nullptr )
, array_( arr )
, initial_capacity_( nSize )
+# ifdef CDS_ENABLE_HPSTAT
+ , alloc_guard_count_( 0 )
+ , free_guard_count_( 0 )
+ , extend_call_count_( 0 )
+# endif
{
// Initialize guards
new( arr ) guard[nSize];
+ extended_list_.store( nullptr, atomics::memory_order_release );
}
thread_hp_storage() = delete;
guard* g = free_head_;
free_head_ = g->next_;
+ CDS_HPSTAT( ++alloc_guard_count_ );
return g;
}
g->clear();
g->next_ = free_head_;
free_head_ = g;
+ CDS_HPSTAT( ++free_guard_count_ );
}
}
arr.reset( i, free_head_ );
free_head_ = free_head_->next_;
}
+ CDS_HPSTAT( alloc_guard_count_ += Capacity );
return Capacity;
}
g->clear();
g->next_ = gList;
gList = g;
+ CDS_HPSTAT( ++free_guard_count_ );
}
}
free_head_ = gList;
cur->clear();
// free all extended blocks
- hp_allocator& alloc = hp_allocator::instance();
- for ( guard_block* p = extended_list_; p; ) {
- guard_block* next = p->next_;
- alloc.free( p );
+ hp_allocator& a = hp_allocator::instance();
+ for ( guard_block* p = extended_list_.load( atomics::memory_order_relaxed ); p; ) {
+ guard_block* next = p->next_block_.load( atomics::memory_order_relaxed );
+ a.free( p );
p = next;
}
- extended_list_ = nullptr;
+ extended_list_.store( nullptr, atomics::memory_order_release );
}
void init()
{
- assert( extended_list_ == nullptr );
+ assert( extended_list_.load(atomics::memory_order_relaxed) == nullptr );
guard* p = array_;
for ( guard* pEnd = p + initial_capacity_ - 1; p != pEnd; ++p )
assert( free_head_ == nullptr );
guard_block* block = hp_allocator::instance().alloc();
- block->next_ = extended_list_;
- extended_list_ = block;
+ block->next_block_.store( extended_list_.load( atomics::memory_order_relaxed ), atomics::memory_order_release );
+ extended_list_.store( block, atomics::memory_order_release );
free_head_ = block->first();
+ CDS_HPSTAT( ++extend_call_count_ );
}
private:
guard* free_head_; ///< Head of free guard list
- guard_block* extended_list_; ///< Head of extended guard blocks allocated for the thread
+ atomics::atomic<guard_block*> extended_list_; ///< Head of extended guard blocks allocated for the thread
guard* const array_; ///< initial HP array
size_t const initial_capacity_; ///< Capacity of \p array_
+# ifdef CDS_ENABLE_HPSTAT
+ public:
+ size_t alloc_guard_count_;
+ size_t free_guard_count_;
+ size_t extend_call_count_;
+# endif
};
//@endcond
private:
retired_allocator()
+#ifdef CDS_ENABLE_HPSTAT
+ : block_allocated_(0)
+#endif
{}
CDS_EXPORT_API ~retired_allocator();
private:
cds::intrusive::FreeListImpl free_list_; ///< list of free \p guard_block
+#ifdef CDS_ENABLE_HPSTAT
+ public:
+ atomics::atomic<size_t> block_allocated_; ///< Count of allocated blocks
+#endif
};
//@endcond
, list_head_( nullptr )
, list_tail_( nullptr )
, block_count_(0)
+# ifdef CDS_ENABLE_HPSTAT
+ , retire_call_count_( 0 )
+ , extend_call_count_( 0 )
+# endif
{}
retired_array( retired_array const& ) = delete;
{
assert( current_block_ != nullptr );
assert( current_block_->first() <= current_cell_ );
- assert( current_cell_ < current_block_->last() );
+ assert( current_cell_ < current_block_->last());
//assert( &p != current_cell_ );
*current_cell_ = p;
- if ( ++current_cell_ == current_block_->last() ) {
+ CDS_HPSTAT( ++retire_call_count_ );
+
+ if ( ++current_cell_ == current_block_->last()) {
// goto next block if exists
if ( current_block_->next_ ) {
current_block_ = current_block_->next_;
return true;
}
- bool safe_push( retired_ptr* p ) CDS_NOEXCEPT
- {
+ bool repush( retired_ptr* p ) CDS_NOEXCEPT
+ {
bool ret = push( *p );
+ CDS_HPSTAT( --retire_call_count_ );
assert( ret );
return ret;
}
{
assert( list_head_ != nullptr );
assert( current_block_ == list_tail_ );
- assert( current_cell_ == current_block_->last() );
+ assert( current_cell_ == current_block_->last());
retired_block* block = retired_allocator::instance().alloc();
assert( block->next_ == nullptr );
- list_tail_ = list_tail_->next_ = block;
+ current_block_ = list_tail_ = list_tail_->next_ = block;
current_cell_ = block->first();
++block_count_;
+ CDS_HPSTAT( ++extend_call_count_ );
}
bool empty() const
retired_block* list_head_;
retired_block* list_tail_;
size_t block_count_;
+# ifdef CDS_ENABLE_HPSTAT
+ public:
+ size_t retire_call_count_;
+ size_t extend_call_count_;
+# endif
};
//@endcond
+ /// Internal statistics
+ struct stat {
+ size_t guard_allocated; ///< Count of allocated HP guards
+ size_t guard_freed; ///< Count of freed HP guards
+ size_t retired_count; ///< Count of retired pointers
+ size_t free_count; ///< Count of free pointers
+ size_t scan_count; ///< Count of \p scan() call
+ size_t help_scan_count; ///< Count of \p help_scan() call
+
+ size_t thread_rec_count; ///< Count of thread records
+
+ size_t hp_block_count; ///< Count of extended HP blocks allocated
+ size_t retired_block_count; ///< Count of retired blocks allocated
+ size_t hp_extend_count; ///< Count of hp array \p extend() call
+ size_t retired_extend_count; ///< Count of retired array \p extend() call
+
+ /// Default ctor
+ stat()
+ {
+ clear();
+ }
+
+ /// Clears all counters
+ void clear()
+ {
+ guard_allocated =
+ guard_freed =
+ retired_count =
+ free_count =
+ scan_count =
+ help_scan_count =
+ thread_rec_count =
+ hp_block_count =
+ retired_block_count =
+ hp_extend_count =
+ retired_extend_count = 0;
+ }
+ };
+
//@cond
/// Per-thread data
struct thread_data {
atomics::atomic<unsigned int> sync_; ///< dummy var to introduce synchronizes-with relationship between threads
char pad2_[cds::c_nCacheLineSize];
+# ifdef CDS_ENABLE_HPSTAT
+ size_t free_call_count_;
+ size_t scan_call_count_;
+ size_t help_scan_call_count_;
+# endif
+
// CppCheck warn: pad1_ and pad2_ is uninitialized in ctor
// cppcheck-suppress uninitMemberVar
thread_data( guard* guards, size_t guard_count )
: hazards_( guards, guard_count )
, sync_( 0 )
+# ifdef CDS_ENABLE_HPSTAT
+ , free_call_count_(0)
+ , scan_call_count_(0)
+ , help_scan_call_count_(0)
+# endif
{}
thread_data() = delete;
//@endcond
//@cond
- // Dynmic (adaptive) Hazard Pointer SMR (Safe Memory Reclamation)
+ // Dynamic (adaptive) Hazard Pointer SMR (Safe Memory Reclamation)
class smr
{
struct thread_record;
assert( instance_ != nullptr );
# else
if ( !instance_ )
- CDS_THROW_EXCEPTION( not_initialized() );
+ CDS_THROW_EXCEPTION( not_initialized());
# endif
return *instance_;
}
static CDS_EXPORT_API void attach_thread();
static CDS_EXPORT_API void detach_thread();
+ /// Get internal statistics
+ CDS_EXPORT_API void statistics( stat& st );
+
public: // for internal use only
/// The main garbage collecting function
CDS_EXPORT_API void scan( thread_data* pRec );
%DHP is an adaptive variant of classic \p cds::gc::HP, see @ref cds_garbage_collectors_comparison "Compare HP implementation"
+ @note Internally, %DHP depends on free-list implementation. There are
+ DCAS-based free-list \p cds::intrusive::TaggedFreeList and more complicated CAS-based free-list
+ \p cds::intrusive::FreeList. For x86 architecture and GCC/clang, libcds selects appropriate free-list
+ based on \p -mcx16 compiler flag. You may manually disable DCAS support specifying
+ \p -DCDS_DISABLE_128BIT_ATOMIC for 64bit build or \p -DCDS_DISABLE_64BIT_ATOMIC for 32bit build
+ in compiler command line. All your projects and libcds MUST be compiled with the same flags -
+ either with DCAS support or without it.
+ For MS VC++ compiler DCAS is not supported.
+
See \ref cds_how_to_use "How to use" section for details how to apply SMR.
*/
class DHP
/// Atomic marked pointer
template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
+ /// Internal statistics
+ typedef dhp::stat stat;
/// Dynamic Hazard Pointer guard
/**
public:
/// Default ctor allocates a guard (hazard pointer) from thread-private storage
Guard() CDS_NOEXCEPT
- : guard_( dhp::smr::tls()->hazards_.alloc() )
+ : guard_( dhp::smr::tls()->hazards_.alloc())
{}
/// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
template <typename T>
T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
{
- assert( nIndex < capacity() );
+ assert( nIndex < capacity());
T pRet;
do {
template <typename T, class Func>
T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
{
- assert( nIndex < capacity() );
+ assert( nIndex < capacity());
T pRet;
do {
template <typename T>
T * assign( size_t nIndex, T * p )
{
- assert( nIndex < capacity() );
+ assert( nIndex < capacity());
guards_.set( nIndex, p );
dhp::smr::tls()->sync();
template <typename T>
T * get( size_t nIndex ) const
{
- assert( nIndex < capacity() );
+ assert( nIndex < capacity());
return guards_[nIndex]->template get_as<T>();
}
/// Get native guarded pointer stored
guarded_pointer get_native( size_t nIndex ) const
{
- assert( nIndex < capacity() );
+ assert( nIndex < capacity());
return guards_[nIndex]->get();
}
value_type * operator ->() const CDS_NOEXCEPT
{
assert( !empty());
- return value_cast()( guard_->get_as<guarded_type>() );
+ return value_cast()( guard_->get_as<guarded_type>());
}
/// Returns a reference to guarded value
value_type& operator *() CDS_NOEXCEPT
{
assert( !empty());
- return *value_cast()( guard_->get_as<guarded_type>() );
+ return *value_cast()( guard_->get_as<guarded_type>());
}
/// Returns const reference to guarded value
\p func is a disposer: when \p p can be safely removed, \p func is called.
*/
template <typename T>
- static void retire( T * p, void (* func)(T *))
+ static void retire( T * p, void (* func)(void *))
{
dhp::thread_data* rec = dhp::smr::tls();
- if ( !rec->retired_.push( dhp::retired_ptr( p, func ) ) )
+ if ( !rec->retired_.push( dhp::retired_ptr( p, func )))
dhp::smr::instance().scan( rec );
}
\endcode
*/
template <class Disposer, typename T>
- static void retire( T * p )
+ static void retire( T* p )
{
if ( !dhp::smr::tls()->retired_.push( dhp::retired_ptr( p, cds::details::static_functor<Disposer, T>::call )))
scan();
*/
static void scan()
{
- dhp::smr::instance().scan( dhp::smr::tls() );
+ dhp::smr::instance().scan( dhp::smr::tls());
}
/// Synonym for \p scan()
{
scan();
}
+
+ /// Returns internal statistics
+ /**
+ The function clears \p st before gathering statistics.
+
+ @note Internal statistics is available only if you compile
+ \p libcds and your program with \p -DCDS_ENABLE_HPSTAT.
+ */
+ static void statistics( stat& st )
+ {
+ dhp::smr::instance().statistics( st );
+ }
+
+ /// Returns post-mortem statistics
+ /**
+ Post-mortem statistics is gathered in the \p %DHP object destructor
+ and can be accessible after destructing the global \p %DHP object.
+
+ @note Internal statistics is available only if you compile
+ \p libcds and your program with \p -DCDS_ENABLE_HPSTAT.
+
+ Usage:
+ \code
+ int main()
+ {
+ cds::Initialize();
+ {
+ // Initialize DHP SMR
+ cds::gc::DHP dhp;
+
+ // deal with DHP-based data structured
+ // ...
+ }
+
+ // DHP object destroyed
+ // Get total post-mortem statistics
+ cds::gc::DHP::stat const& st = cds::gc::DHP::postmortem_statistics();
+
+ printf( "DHP statistics:\n"
+ " thread count = %llu\n"
+ " guard allocated = %llu\n"
+ " guard freed = %llu\n"
+ " retired data count = %llu\n"
+ " free data count = %llu\n"
+ " scan() call count = %llu\n"
+ " help_scan() call count = %llu\n",
+ st.thread_rec_count,
+ st.guard_allocated, st.guard_freed,
+ st.retired_count, st.free_count,
+ st.scan_count, st.help_scan_count
+ );
+
+ cds::Terminate();
+ }
+ \endcode
+ */
+ CDS_EXPORT_API static stat const& postmortem_statistics();
};
}} // namespace cds::gc