#ifndef __CDS_INTRUSIVE_SKIP_LIST_IMPL_H
#define __CDS_INTRUSIVE_SKIP_LIST_IMPL_H
+#include <type_traits>
+#include <memory>
#include <cds/intrusive/skip_list_base.h>
-#include <cds/details/std/type_traits.h>
-#include <cds/details/std/memory.h>
#include <cds/opt/compare.h>
#include <cds/ref.h>
#include <cds/details/binary_functor_wrapper.h>
back_off bkoff;
for (;;) {
- if ( m_pNode->next( m_pNode->height() - 1 ).load( CDS_ATOMIC::memory_order_acquire ).bits() ) {
+ if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
// Current node is marked as deleted. So, its next pointer can point to anything
// In this case we interrupt our iteration and returns end() iterator.
*this = iterator();
bkoff();
continue;
}
- else if ( pp && pp->next( pp->height() - 1 ).load( CDS_ATOMIC::memory_order_relaxed ).bits() ) {
+ else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits() ) {
// p is marked as deleted. Spin waiting for physical removal
bkoff();
continue;
public: // for internal use only!!!
iterator( node_type& refHead )
- : m_pNode( null_ptr<node_type *>() )
+ : m_pNode( nullptr )
{
back_off bkoff;
node_type * pp = p.ptr();
// Logically deleted node is marked from highest level
- if ( !pp->next( pp->height() - 1 ).load( CDS_ATOMIC::memory_order_acquire ).bits() ) {
+ if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
m_pNode = pp;
break;
}
public:
iterator()
- : m_pNode( null_ptr<node_type *>())
+ : m_pNode( nullptr )
{}
iterator( iterator const& s)
value_type * operator ->() const
{
- assert( m_pNode != null_ptr< node_type *>() );
- assert( node_traits::to_value_ptr( m_pNode ) != null_ptr<value_type *>() );
+ assert( m_pNode != nullptr );
+ assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
return node_traits::to_value_ptr( m_pNode );
}
value_ref operator *() const
{
- assert( m_pNode != null_ptr< node_type *>() );
- assert( node_traits::to_value_ptr( m_pNode ) != null_ptr<value_type *>() );
+ assert( m_pNode != nullptr );
+ assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
return *node_traits::to_value_ptr( m_pNode );
}
bool operator !=(iterator const& i ) const;
};
\endcode
- Note, the iterator object returned by \ref end, \p cend member functions points to \p NULL and should not be dereferenced.
+ Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced.
<b>How to use</b>
item_counter m_ItemCounter ; ///< item counter
random_level_generator m_RandomLevelGen ; ///< random level generator instance
- CDS_ATOMIC::atomic<unsigned int> m_nHeight ; ///< estimated high level
+ atomics::atomic<unsigned int> m_nHeight ; ///< estimated high level
mutable stat m_Stat ; ///< internal statistics
protected:
static void dispose_node( value_type * pVal )
{
- assert( pVal != null_ptr<value_type *>() );
+ assert( pVal != nullptr );
typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal) );
disposer()( pVal );
}
goto retry;
}
- if ( pCur.ptr() == null_ptr<node_type *>()) {
+ if ( pCur.ptr() == nullptr ) {
// end of the list at level nLevel - goto next level
break;
}
// pCur is marked, i.e. logically deleted.
marked_node_ptr p( pCur.ptr() );
if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
if ( nLevel == 0 ) {
gc::retire( node_traits::to_value_ptr( pCur.ptr() ), dispose_node );
// pCur is marked, i.e. logically deleted.
marked_node_ptr p( pCur.ptr() );
if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
if ( nLevel == 0 )
gc::retire( node_traits::to_value_ptr( pCur.ptr() ), dispose_node );
pos.pSucc[ nLevel ] = pCur.ptr();
}
- return (pos.pCur = pCur.ptr()) != null_ptr<node_type *>();
+ return (pos.pCur = pCur.ptr()) != nullptr;
}
bool find_max_position( position& pos )
goto retry;
}
- if ( pCur.ptr() == null_ptr<node_type *>()) {
+ if ( pCur.ptr() == nullptr ) {
// end of the list at level nLevel - goto next level
break;
}
// pCur is marked, i.e. logically deleted.
marked_node_ptr p( pCur.ptr() );
if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
if ( nLevel == 0 )
gc::retire( node_traits::to_value_ptr( pCur.ptr() ), dispose_node );
pos.pSucc[ nLevel ] = pCur.ptr();
}
- return (pos.pCur = pCur.ptr()) != null_ptr<node_type *>();
+ return (pos.pCur = pCur.ptr()) != nullptr;
}
template <typename Func>
{
marked_node_ptr p( pos.pSucc[0] );
pNode->next( 0 ).store( p, memory_model::memory_order_release );
- if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ) {
+ if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed ) ) {
return false;
}
cds::unref( f )( val );
marked_node_ptr p;
while ( true ) {
marked_node_ptr q( pos.pSucc[ nLevel ]);
- if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
+ if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
// pNode has been marked as removed while we are inserting it
// Stop inserting
assert( p.bits() );
return true;
}
p = q;
- if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) )
+ if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ) )
break;
// Renew insert position
template <typename Func>
bool try_remove_at( node_type * pDel, position& pos, Func f )
{
- assert( pDel != null_ptr<node_type *>());
+ assert( pDel != nullptr );
marked_node_ptr pSucc;
typename gc::Guard gSucc;
while ( true ) {
pSucc = gSucc.protect( pDel->next(nLevel), gc_protect );
if ( pSucc.bits() || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1,
- memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
break;
}
pSucc = gSucc.protect( pDel->next(0), gc_protect );
marked_node_ptr p( pSucc.ptr() );
if ( pDel->next(0).compare_exchange_strong( p, marked_node_ptr(p.ptr(), 1),
- memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
cds::unref(f)( *node_traits::to_value_ptr( pDel ));
for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
pSucc = gSucc.protect( pDel->next(nLevel), gc_protect );
if ( !pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( p, marked_node_ptr(pSucc.ptr()),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed) )
+ memory_model::memory_order_release, atomics::memory_order_relaxed) )
{
// Make slow erase
find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false );
{
unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed );
if ( nCur < nHeight )
- m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+ m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed );
}
//@endcond
gc::check_available_guards( c_nHazardPtrCount );
// Barrier for head node
- CDS_ATOMIC::atomic_thread_fence( memory_model::memory_order_release );
+ atomics::atomic_thread_fence( memory_model::memory_order_release );
}
/// Clears and destructs the skip-list
node_type * pNode = node_traits::to_node_ptr( val );
scoped_node_ptr scp( pNode );
unsigned int nHeight = pNode->height();
- bool bTowerOk = nHeight > 1 && pNode->get_tower() != null_ptr<atomic_node_ptr *>();
+ bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
bool bTowerMade = false;
position pos;
node_type * pNode = node_traits::to_node_ptr( val );
scoped_node_ptr scp( pNode );
unsigned int nHeight = pNode->height();
- bool bTowerOk = nHeight > 1 && pNode->get_tower() != null_ptr<atomic_node_ptr *>();
+ bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
bool bTowerMade = false;
# ifndef CDS_CXX11_LAMBDA_SUPPORT
/// Checks if the set is empty
bool empty() const
{
- return m_Head.head()->next(0).load( memory_model::memory_order_relaxed ) == null_ptr<node_type *>();
+ return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr;
}
/// Clears the set (non-atomic)