//$$CDS-header$$
-#ifndef __CDS_GC_DETAILS_DHP_H
-#define __CDS_GC_DETAILS_DHP_H
+#ifndef CDSLIB_GC_DETAILS_DHP_H
+#define CDSLIB_GC_DETAILS_DHP_H
#include <mutex> // unique_lock
-#include <cds/cxx11_atomic.h>
+#include <cds/algo/atomic.h>
#include <cds/gc/details/retired_ptr.h>
#include <cds/details/aligned_allocator.h>
#include <cds/details/allocator.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
#if CDS_COMPILER == CDS_COMPILER_MSVC
# pragma warning(push)
/// Retired pointer buffer node
struct retired_ptr_node {
retired_ptr m_ptr ; ///< retired pointer
- retired_ptr_node * m_pNext ; ///< next retired pointer in buffer
- retired_ptr_node * m_pNextFree ; ///< next item in free list of retired_ptr_node
+ atomics::atomic<retired_ptr_node *> m_pNext ; ///< next retired pointer in buffer
+ atomics::atomic<retired_ptr_node *> m_pNextFree ; ///< next item in free list of \p retired_ptr_node
};
/// Internal guard representation
{
cds::details::Allocator<details::guard_data> m_GuardAllocator ; ///< guard allocator
- atomics::atomic<guard_data *> m_GuardList ; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
- atomics::atomic<guard_data *> m_FreeGuardList ; ///< Head of free guard list (linked by guard_data::pNextFree field)
- SpinLock m_freeListLock ; ///< Access to m_FreeGuardList
+ atomics::atomic<guard_data *> m_GuardList; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
+ atomics::atomic<guard_data *> m_FreeGuardList; ///< Head of free guard list (linked by guard_data::pNextFree field)
+ cds::sync::spin m_freeListLock; ///< Access to m_FreeGuardList
/*
Unfortunately, access to the list of free guard is lock-based.
details::guard_data * pGuard;
{
- std::unique_lock<SpinLock> al( m_freeListLock );
+ std::unique_lock<cds::sync::spin> al( m_freeListLock );
pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
if ( pGuard )
m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
{
pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
- std::unique_lock<SpinLock> al( m_freeListLock );
+ std::unique_lock<cds::sync::spin> al( m_freeListLock );
pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
}
pLast = p;
}
- std::unique_lock<SpinLock> al( m_freeListLock );
+ std::unique_lock<cds::sync::spin> al( m_freeListLock );
pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
}
{
retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
do {
- node.m_pNext = pHead;
+ node.m_pNext.store( pHead, atomics::memory_order_relaxed );
// pHead is changed by compare_exchange_weak
} while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
}
+ /// Pushes [pFirst, pLast] list linked by pNext field.
+ size_t push_list( retired_ptr_node* pFirst, retired_ptr_node* pLast, size_t nSize )
+ {
+ assert( pFirst );
+ assert( pLast );
+
+ retired_ptr_node * pHead = m_pHead.load( atomics::memory_order_acquire );
+ do {
+ pLast->m_pNext.store( pHead, atomics::memory_order_relaxed );
+ // pHead is changed by compare_exchange_weak
+ } while ( !m_pHead.compare_exchange_weak( pHead, pFirst, atomics::memory_order_release, atomics::memory_order_relaxed ) );
+
+ return m_nItemCount.fetch_add( nSize, atomics::memory_order_relaxed ) + 1;
+ }
+
/// Result of \ref dhp_gc_privatve "privatize" function.
/**
The \p privatize function returns retired node list as \p first and the size of that list as \p second.
/// Pool block
struct block {
- block * pNext ; ///< next block
- item items[m_nItemPerBlock] ; ///< item array
+ atomics::atomic<block *> pNext; ///< next block
+ item items[m_nItemPerBlock]; ///< item array
};
- atomics::atomic<block *> m_pBlockListHead ; ///< head of of allocated block list
+ atomics::atomic<block *> m_pBlockListHead; ///< head of of allocated block list
// To solve ABA problem we use epoch-based approach
- static const unsigned int c_nEpochCount = 4 ; ///< Max epoch count
- atomics::atomic<unsigned int> m_nCurEpoch ; ///< Current epoch
- atomics::atomic<item *> m_pEpochFree[c_nEpochCount] ; ///< List of free item per epoch
- atomics::atomic<item *> m_pGlobalFreeHead ; ///< Head of unallocated item list
+ static const unsigned int c_nEpochCount = 4; ///< Max epoch count
+ atomics::atomic<unsigned int> m_nCurEpoch; ///< Current epoch
+ atomics::atomic<item *> m_pEpochFree[c_nEpochCount]; ///< List of free item per epoch
+ atomics::atomic<item *> m_pGlobalFreeHead; ///< Head of unallocated item list
cds::details::Allocator< block, Alloc > m_BlockAllocator ; ///< block allocator
// link items within the block
item * pLastItem = pNew->items + m_nItemPerBlock - 1;
for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
- pItem->m_pNextFree = pItem + 1;
- CDS_STRICT_DO( pItem->m_pNext = nullptr );
+ pItem->m_pNextFree.store( pItem + 1, atomics::memory_order_release );
+ CDS_STRICT_DO( pItem->m_pNext.store( nullptr, atomics::memory_order_relaxed ));
}
- // link new block to block list
+ // links new block to the block list
{
- block * pHead = m_pBlockListHead.load(atomics::memory_order_acquire);
+ block * pHead = m_pBlockListHead.load(atomics::memory_order_relaxed);
do {
- pNew->pNext = pHead;
+ pNew->pNext.store( pHead, atomics::memory_order_relaxed );
// pHead is changed by compare_exchange_weak
- } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_relaxed ));
+ } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_relaxed, atomics::memory_order_relaxed ));
}
- // link block's items to free list
+ // links block's items to the free list
{
- item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_acquire);
+ item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_relaxed);
do {
- pLastItem->m_pNextFree = pHead;
+ pLastItem->m_pNextFree.store( pHead, atomics::memory_order_release );
// pHead is changed by compare_exchange_weak
} while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
}
{
block * p;
for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
- p = pBlock->pNext;
+ p = pBlock->pNext.load( atomics::memory_order_relaxed );
m_BlockAllocator.Delete( pBlock );
}
}
m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
}
- /// Allocates new retired pointer
+ /// Allocates the new retired pointer
retired_ptr_node& alloc()
{
unsigned int nEpoch;
pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
if ( !pItem )
goto retry;
- if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
+ if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem,
+ pItem->m_pNextFree.load(atomics::memory_order_acquire),
+ atomics::memory_order_acquire, atomics::memory_order_relaxed ))
+ {
goto success;
+ }
}
// Epoch free list is empty
// Alloc from global free list
retry:
- pItem = m_pGlobalFreeHead.load( atomics::memory_order_acquire );
+ pItem = m_pGlobalFreeHead.load( atomics::memory_order_relaxed );
do {
if ( !pItem ) {
allocNewBlock();
goto retry;
}
// pItem is changed by compare_exchange_weak
- } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ));
+ } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem,
+ pItem->m_pNextFree.load(atomics::memory_order_acquire),
+ atomics::memory_order_acquire, atomics::memory_order_relaxed ));
success:
- CDS_STRICT_DO( pItem->m_pNextFree = nullptr );
+ CDS_STRICT_DO( pItem->m_pNextFree.store( nullptr, atomics::memory_order_relaxed ));
return *pItem;
}
return node;
}
- /// Places the list (pHead, pTail) of retired pointers to pool (frees retired pointers)
+ /// Places the list [pHead, pTail] of retired pointers to pool (frees retired pointers)
/**
The list is linked on the m_pNextFree field
*/
item * pCurHead;
do {
pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
- pTail->m_pNextFree = pCurHead;
+ pTail->m_pNextFree.store( pCurHead, atomics::memory_order_release );
} while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
}
};
/// Uninitialized guard
class guard
{
- friend class ThreadGC;
+ friend class dhp::ThreadGC;
protected:
details::guard_data * m_pGuard ; ///< Pointer to guard data
: m_pGuard( nullptr )
{}
- /// Ñopy-ctor is disabled
+ /// Copy-ctor is disabled
guard( guard const& ) = delete;
/// Move-ctor is disabled
public: // for ThreadGC.
/*
- GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard,
+ GCC cannot compile code for template versions of ThreadGC::allocGuard/freeGuard,
the compiler produces error: \91cds::gc::dhp::details::guard_data* cds::gc::dhp::details::guard::m_pGuard\92 is protected
despite the fact that ThreadGC is declared as friend for guard class.
Therefore, we have to add set_guard/get_guard public functions
{
typedef details::guard base_class;
friend class ThreadGC;
-
- ThreadGC& m_gc ; ///< ThreadGC object of current thread
public:
/// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
- Guard( ThreadGC& gc );
+ Guard(); // inline in dhp_impl.h
/// Returns guard allocated back to pool of free guards
- ~Guard(); // inline after GarbageCollector
-
- /// Returns DHP GC object
- ThreadGC& getGC() CDS_NOEXCEPT
- {
- return m_gc;
- }
+ ~Guard(); // inline in dhp_impl.h
/// Guards pointer \p p
template <typename T>
class GuardArray
{
details::guard m_arr[Count] ; ///< array of guard
- ThreadGC& m_gc ; ///< ThreadGC object of current thread
const static size_t c_nCapacity = Count ; ///< Array capacity (equal to \p Count template parameter)
public:
public:
/// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
- GuardArray( ThreadGC& gc ); // inline below
-
- /// The object is not default-constructible
- GuardArray() = delete;
+ GuardArray(); // inline in dhp_impl.h
/// The object is not copy-constructible
GuardArray( GuardArray const& ) = delete;
+ /// The object is not move-constructible
+ GuardArray( GuardArray&& ) = delete;
+
/// Returns guards allocated back to pool
- ~GuardArray(); // inline below
+ ~GuardArray(); // inline in dh_impl.h
/// Returns the capacity of array
CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
return c_nCapacity;
}
- /// Returns DHP ThreadGC object
- ThreadGC& getGC() CDS_NOEXCEPT
- {
- return m_gc;
- }
-
/// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
details::guard& operator []( size_t nIndex ) CDS_NOEXCEPT
{
public:
/// Exception "No GarbageCollector object is created"
- CDS_DECLARE_EXCEPTION( DHPManagerEmpty, "Global DHP GarbageCollector is NULL" );
+ class not_initialized : public std::runtime_error
+ {
+ public:
+ //@cond
+ not_initialized()
+ : std::runtime_error( "Global DHP GarbageCollector is not initialized" )
+ {}
+ //@endcond
+ };
/// Internal GC statistics
struct InternalState
size_t m_nGuardCount ; ///< Total guard count
size_t m_nFreeGuardCount ; ///< Count of free guard
+ //@cond
InternalState()
: m_nGuardCount(0)
, m_nFreeGuardCount(0)
return *this;
}
+ //@endcond
};
private:
/// Returns pointer to GarbageCollector instance
/**
- If DHP GC is not initialized, \p DHPManagerEmpty exception is thrown
+ If DHP GC is not initialized, \p not_initialized exception is thrown
*/
static GarbageCollector& instance()
{
if ( m_pManager == nullptr )
- throw DHPManagerEmpty();
+ throw not_initialized();
return *m_pManager;
}
m_gc.retirePtr( p, pFunc );
}
+ /// Run retiring cycle
void scan()
{
m_gc.scan();
}
};
-
- //////////////////////////////////////////////////////////
- // Inlines
-
- inline Guard::Guard(ThreadGC& gc)
- : m_gc( gc )
- {
- getGC().allocGuard( *this );
- }
- inline Guard::~Guard()
- {
- getGC().freeGuard( *this );
- }
-
- template <size_t Count>
- inline GuardArray<Count>::GuardArray( ThreadGC& gc )
- : m_gc( gc )
- {
- getGC().allocGuard( *this );
- }
- template <size_t Count>
- inline GuardArray<Count>::~GuardArray()
- {
- getGC().freeGuard( *this );
- }
-
} // namespace dhp
}} // namespace cds::gc
//@endcond
# pragma warning(pop)
#endif
-#endif // #ifndef __CDS_GC_DETAILS_DHP_H
+#endif // #ifndef CDSLIB_GC_DETAILS_DHP_H