//$$CDS-header$$
-#ifndef __CDS_GC_DETAILS_DHP_H
-#define __CDS_GC_DETAILS_DHP_H
+#ifndef CDSLIB_GC_DETAILS_DHP_H
+#define CDSLIB_GC_DETAILS_DHP_H
#include <mutex> // unique_lock
#include <cds/algo/atomic.h>
#include <cds/gc/details/retired_ptr.h>
#include <cds/details/aligned_allocator.h>
#include <cds/details/allocator.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
#if CDS_COMPILER == CDS_COMPILER_MSVC
# pragma warning(push)
/// Retired pointer buffer node
struct retired_ptr_node {
retired_ptr m_ptr ; ///< retired pointer
- retired_ptr_node * m_pNext ; ///< next retired pointer in buffer
- retired_ptr_node * m_pNextFree ; ///< next item in free list of retired_ptr_node
+ atomics::atomic<retired_ptr_node *> m_pNext ; ///< next retired pointer in buffer
+ atomics::atomic<retired_ptr_node *> m_pNextFree ; ///< next item in free list of \p retired_ptr_node
};
/// Internal guard representation
{
cds::details::Allocator<details::guard_data> m_GuardAllocator ; ///< guard allocator
- atomics::atomic<guard_data *> m_GuardList ; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
- atomics::atomic<guard_data *> m_FreeGuardList ; ///< Head of free guard list (linked by guard_data::pNextFree field)
- SpinLock m_freeListLock ; ///< Access to m_FreeGuardList
+ atomics::atomic<guard_data *> m_GuardList; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
+ atomics::atomic<guard_data *> m_FreeGuardList; ///< Head of free guard list (linked by guard_data::pNextFree field)
+ cds::sync::spin m_freeListLock; ///< Access to m_FreeGuardList
/*
Unfortunately, access to the list of free guard is lock-based.
details::guard_data * pGuard;
{
- std::unique_lock<SpinLock> al( m_freeListLock );
+ std::unique_lock<cds::sync::spin> al( m_freeListLock );
pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
if ( pGuard )
m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
{
pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
- std::unique_lock<SpinLock> al( m_freeListLock );
+ std::unique_lock<cds::sync::spin> al( m_freeListLock );
pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
}
pLast = p;
}
- std::unique_lock<SpinLock> al( m_freeListLock );
+ std::unique_lock<cds::sync::spin> al( m_freeListLock );
pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
}
{
retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
do {
- node.m_pNext = pHead;
+ node.m_pNext.store( pHead, atomics::memory_order_relaxed );
// pHead is changed by compare_exchange_weak
} while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
retired_ptr_node * pHead = m_pHead.load( atomics::memory_order_acquire );
do {
- pLast->m_pNext = pHead;
+ pLast->m_pNext.store( pHead, atomics::memory_order_relaxed );
// pHead is changed by compare_exchange_weak
} while ( !m_pHead.compare_exchange_weak( pHead, pFirst, atomics::memory_order_release, atomics::memory_order_relaxed ) );
/// Pool block
struct block {
- block * pNext; ///< next block
- item items[m_nItemPerBlock]; ///< item array
+ atomics::atomic<block *> pNext; ///< next block
+ item items[m_nItemPerBlock]; ///< item array
};
atomics::atomic<block *> m_pBlockListHead; ///< head of of allocated block list
// link items within the block
item * pLastItem = pNew->items + m_nItemPerBlock - 1;
for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
- pItem->m_pNextFree = pItem + 1;
- CDS_STRICT_DO( pItem->m_pNext = nullptr );
+ pItem->m_pNextFree.store( pItem + 1, atomics::memory_order_release );
+ CDS_STRICT_DO( pItem->m_pNext.store( nullptr, atomics::memory_order_relaxed ));
}
// links new block to the block list
{
- block * pHead = m_pBlockListHead.load(atomics::memory_order_acquire);
+ block * pHead = m_pBlockListHead.load(atomics::memory_order_relaxed);
do {
- pNew->pNext = pHead;
+ pNew->pNext.store( pHead, atomics::memory_order_relaxed );
// pHead is changed by compare_exchange_weak
- } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_relaxed ));
+ } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_relaxed, atomics::memory_order_relaxed ));
}
// links block's items to the free list
{
- item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_acquire);
+ item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_relaxed);
do {
- pLastItem->m_pNextFree = pHead;
+ pLastItem->m_pNextFree.store( pHead, atomics::memory_order_release );
// pHead is changed by compare_exchange_weak
} while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
}
{
block * p;
for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
- p = pBlock->pNext;
+ p = pBlock->pNext.load( atomics::memory_order_relaxed );
m_BlockAllocator.Delete( pBlock );
}
}
pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
if ( !pItem )
goto retry;
- if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
+ if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem,
+ pItem->m_pNextFree.load(atomics::memory_order_acquire),
+ atomics::memory_order_acquire, atomics::memory_order_relaxed ))
+ {
goto success;
+ }
}
// Epoch free list is empty
// Alloc from global free list
retry:
- pItem = m_pGlobalFreeHead.load( atomics::memory_order_acquire );
+ pItem = m_pGlobalFreeHead.load( atomics::memory_order_relaxed );
do {
if ( !pItem ) {
allocNewBlock();
goto retry;
}
// pItem is changed by compare_exchange_weak
- } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ));
+ } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem,
+ pItem->m_pNextFree.load(atomics::memory_order_acquire),
+ atomics::memory_order_acquire, atomics::memory_order_relaxed ));
success:
- CDS_STRICT_DO( pItem->m_pNextFree = nullptr );
+ CDS_STRICT_DO( pItem->m_pNextFree.store( nullptr, atomics::memory_order_relaxed ));
return *pItem;
}
item * pCurHead;
do {
pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
- pTail->m_pNextFree = pCurHead;
+ pTail->m_pNextFree.store( pCurHead, atomics::memory_order_release );
} while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
}
};
: m_pGuard( nullptr )
{}
- /// Ñopy-ctor is disabled
+ /// Copy-ctor is disabled
guard( guard const& ) = delete;
/// Move-ctor is disabled
public: // for ThreadGC.
/*
- GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard,
+ GCC cannot compile code for template versions of ThreadGC::allocGuard/freeGuard,
the compiler produces error: \91cds::gc::dhp::details::guard_data* cds::gc::dhp::details::guard::m_pGuard\92 is protected
despite the fact that ThreadGC is declared as friend for guard class.
Therefore, we have to add set_guard/get_guard public functions
size_t m_nGuardCount ; ///< Total guard count
size_t m_nFreeGuardCount ; ///< Count of free guard
+ //@cond
InternalState()
: m_nGuardCount(0)
, m_nFreeGuardCount(0)
return *this;
}
+ //@endcond
};
private:
m_gc.retirePtr( p, pFunc );
}
+ /// Run retiring cycle
void scan()
{
m_gc.scan();
# pragma warning(pop)
#endif
-#endif // #ifndef __CDS_GC_DETAILS_DHP_H
+#endif // #ifndef CDSLIB_GC_DETAILS_DHP_H