atomic_marked_ptr m_pNext ; ///< pointer to the next node in the container
node()
- : m_pNext( nullptr )
- {}
+ {
+ m_pNext.store( marked_ptr(), atomics::memory_order_release );
+ }
};
using cds::intrusive::single_link::default_hook;
while ( true ) {
t = guard.protect( m_pTail, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr());});
- marked_ptr pNext = t->m_pNext.load(memory_model::memory_order_acquire );
+ marked_ptr pNext = t->m_pNext.load(memory_model::memory_order_relaxed );
if ( pNext.ptr() == nullptr ) {
pNew->m_pNext.store( marked_ptr(), memory_model::memory_order_relaxed );
if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr(pNew), memory_model::memory_order_release, atomics::memory_order_relaxed )) {
- if ( !m_pTail.compare_exchange_strong( t, marked_ptr(pNew), memory_model::memory_order_release, atomics::memory_order_acquire ))
+ if ( !m_pTail.compare_exchange_strong( t, marked_ptr(pNew), memory_model::memory_order_release, atomics::memory_order_relaxed ))
m_Stat.onAdvanceTailFailed();
break;
}
pNext = gNext.protect( t->m_pNext, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr());});
// add to the basket
- if ( m_pTail.load(memory_model::memory_order_acquire) == t
+ if ( m_pTail.load( memory_model::memory_order_relaxed ) == t
&& t->m_pNext.load( memory_model::memory_order_relaxed) == pNext
&& !pNext.bits())
{
typename gc::template GuardArray<2> g;
g.assign( 0, node_traits::to_value_ptr( pNext.ptr()));
if ( m_pTail.load( memory_model::memory_order_acquire ) != t
-
|| t->m_pNext.load( memory_model::memory_order_relaxed ) != pNext )
-
{
m_Stat.onEnqueueRace();
bkoff();
marked_ptr p;
bool bTailOk = true;
- while ( (p = pNext->m_pNext.load( memory_model::memory_order_relaxed )).ptr() != nullptr )
+ while ( (p = pNext->m_pNext.load( memory_model::memory_order_acquire )).ptr() != nullptr )
{
- bTailOk = m_pTail.load( memory_model::memory_order_acquire ) == t;
+ bTailOk = m_pTail.load( memory_model::memory_order_relaxed ) == t;
if ( !bTailOk )
break;
g.assign( 1, node_traits::to_value_ptr( p.ptr()));
- if ( pNext->m_pNext.load(memory_model::memory_order_acquire) != p )
+ if ( pNext->m_pNext.load( memory_model::memory_order_relaxed ) != p )
continue;
pNext = p;
g.assign( 0, g.template get<value_type>( 1 ));
atomic_node_ptr m_pPrev ; ///< Pointer to previous node
CDS_CONSTEXPR node() CDS_NOEXCEPT
- : m_pNext( nullptr )
- , m_pPrev( nullptr )
- {}
+ {
+ m_pNext.store( nullptr, atomics::memory_order_relaxed );
+ m_pPrev.store( nullptr, atomics::memory_order_release );
+ }
};
//@cond
back_off bkoff;
guards.assign( 1, &val );
- node_type * pTail = guards.protect( 0, m_pTail, [](node_type * p) -> value_type * {return node_traits::to_value_ptr(p);} ); // Read the tail
while( true ) {
+ node_type * pTail = guards.protect( 0, m_pTail, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p ); } ); // Read the tail
pNew->m_pNext.store( pTail, memory_model::memory_order_relaxed );
if ( m_pTail.compare_exchange_strong( pTail, pNew, memory_model::memory_order_release, atomics::memory_order_acquire )) { // Try to CAS the tail
pTail->m_pPrev.store( pNew, memory_model::memory_order_release ); // Success, write prev
m_Stat.onEnqueue();
break; // Enqueue done!
}
- guards.assign( 0, node_traits::to_value_ptr( pTail )); // pTail has been changed by CAS above
m_Stat.onEnqueueRace();
bkoff();
}