class gc_common
{
public:
-# ifdef CDS_CXX11_TEMPLATE_ALIAS_SUPPORT
- template <typename MarkedPtr> using atomic_marked_ptr = CDS_ATOMIC::atomic<MarkedPtr>;
-# else
- template <typename MarkedPtr>
- class atomic_marked_ptr: public CDS_ATOMIC::atomic<MarkedPtr>
- {
- typedef CDS_ATOMIC::atomic<MarkedPtr> base_class;
- public:
-# ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT
- atomic_marked_ptr() CDS_NOEXCEPT_DEFAULTED_( noexcept(base_class()) ) = default;
-# else
- atomic_marked_ptr() CDS_NOEXCEPT_( noexcept(base_class()) )
- : base_class()
- {}
-# endif
- explicit CDS_CONSTEXPR atomic_marked_ptr(MarkedPtr val) CDS_NOEXCEPT_( noexcept(base_class( val )) )
- : base_class( val )
- {}
- explicit CDS_CONSTEXPR atomic_marked_ptr(typename MarkedPtr::value_type * p) CDS_NOEXCEPT_( noexcept(base_class( p )) )
- : base_class( p )
- {}
- };
-# endif
+ template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
};
//@endcond
template <typename ThreadData>
struct thread_list_record {
ThreadData * m_pNext ; ///< Next item in thread list
- CDS_ATOMIC::atomic<OS::ThreadId> m_idOwner ; ///< Owner thread id; 0 - the record is free (not owned)
+ atomics::atomic<OS::ThreadId> m_idOwner ; ///< Owner thread id; 0 - the record is free (not owned)
thread_list_record()
- : m_pNext( null_ptr<ThreadData *>() )
- , m_idOwner( cds::OS::nullThreadId() )
+ : m_pNext( nullptr )
+ , m_idOwner( cds::OS::c_NullThreadId )
{}
~thread_list_record()
typedef cds::details::Allocator< thread_record, Alloc > allocator_type;
private:
- CDS_ATOMIC::atomic<thread_record *> m_pHead;
+ atomics::atomic<thread_record *> m_pHead;
public:
thread_list()
- : m_pHead( null_ptr<thread_record *>())
+ : m_pHead( nullptr )
{}
~thread_list()
thread_record * alloc()
{
thread_record * pRec;
- cds::OS::ThreadId const nullThreadId = cds::OS::nullThreadId();
- cds::OS::ThreadId const curThreadId = cds::OS::getCurrentThreadId();
+ cds::OS::ThreadId const nullThreadId = cds::OS::c_NullThreadId;
+ cds::OS::ThreadId const curThreadId = cds::OS::get_current_thread_id();
// First try to reuse a retired (non-active) HP record
- for ( pRec = m_pHead.load( CDS_ATOMIC::memory_order_acquire ); pRec; pRec = pRec->m_list.m_pNext ) {
+ for ( pRec = m_pHead.load( atomics::memory_order_acquire ); pRec; pRec = pRec->m_list.m_pNext ) {
cds::OS::ThreadId thId = nullThreadId;
- if ( !pRec->m_list.m_idOwner.compare_exchange_strong( thId, curThreadId, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ) )
+ if ( !pRec->m_list.m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) )
continue;
return pRec;
}
// No records available for reuse
// Allocate and push a new record
pRec = allocator_type().New();
- pRec->m_list.m_idOwner.store( curThreadId, CDS_ATOMIC::memory_order_relaxed );
+ pRec->m_list.m_idOwner.store( curThreadId, atomics::memory_order_relaxed );
- CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release );
+ atomics::atomic_thread_fence( atomics::memory_order_release );
- thread_record * pOldHead = m_pHead.load( CDS_ATOMIC::memory_order_acquire );
+ thread_record * pOldHead = m_pHead.load( atomics::memory_order_acquire );
do {
pRec->m_list.m_pNext = pOldHead;
- } while ( !m_pHead.compare_exchange_weak( pOldHead, pRec, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+ } while ( !m_pHead.compare_exchange_weak( pOldHead, pRec, atomics::memory_order_release, atomics::memory_order_relaxed ));
return pRec;
}
void retire( thread_record * pRec )
{
- assert( pRec != null_ptr<thread_record *>() );
- pRec->m_list.m_idOwner.store( cds::OS::nullThreadId(), CDS_ATOMIC::memory_order_release );
+ assert( pRec != nullptr );
+ pRec->m_list.m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
}
void detach_all()
{
- thread_record * pNext = null_ptr<thread_record *>();
- cds::OS::ThreadId const nullThreadId = cds::OS::nullThreadId();
+ thread_record * pNext = nullptr;
+ cds::OS::ThreadId const nullThreadId = cds::OS::c_NullThreadId;
- for ( thread_record * pRec = m_pHead.load(CDS_ATOMIC::memory_order_acquire); pRec; pRec = pNext ) {
+ for ( thread_record * pRec = m_pHead.load(atomics::memory_order_acquire); pRec; pRec = pNext ) {
pNext = pRec->m_list.m_pNext;
- if ( pRec->m_list.m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) != nullThreadId ) {
+ if ( pRec->m_list.m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
retire( pRec );
}
}
}
- thread_record * head( CDS_ATOMIC::memory_order mo ) const
+ thread_record * head( atomics::memory_order mo ) const
{
return m_pHead.load( mo );
}
void destroy()
{
allocator_type al;
- CDS_DEBUG_DO( cds::OS::ThreadId const nullThreadId = cds::OS::nullThreadId() ;)
- CDS_DEBUG_DO( cds::OS::ThreadId const mainThreadId = cds::OS::getCurrentThreadId() ;)
+ CDS_DEBUG_ONLY( cds::OS::ThreadId const nullThreadId = cds::OS::c_NullThreadId; )
+ CDS_DEBUG_ONLY( cds::OS::ThreadId const mainThreadId = cds::OS::get_current_thread_id() ;)
- thread_record * p = m_pHead.exchange( null_ptr<thread_record *>(), CDS_ATOMIC::memory_order_seq_cst );
+ thread_record * p = m_pHead.exchange( nullptr, atomics::memory_order_seq_cst );
while ( p ) {
thread_record * pNext = p->m_list.m_pNext;
- assert( p->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == nullThreadId
- || p->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == mainThreadId
- || !cds::OS::isThreadAlive( p->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) )
+ assert( p->m_list.m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
+ || p->m_list.m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
+ || !cds::OS::is_thread_alive( p->m_list.m_idOwner.load( atomics::memory_order_relaxed ) )
);
al.Delete( p );