/*
This file is a part of libcds - Concurrent Data Structures library
- (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
Source code repo: http://github.com/khizmax/libcds/
Download: http://sourceforge.net/projects/libcds/files/
protected:
unsigned int m_nHeight; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1.
atomic_marked_ptr * m_arrNext; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr
- atomics::atomic<unsigned> m_nUnlink; ///< How many levels has been unlinked
+ atomics::atomic<unsigned> m_nUnlink; ///< Unlink helper
public:
/// Constructs a node of height 1 (a bottom-list node)
- CDS_CONSTEXPR node()
+ node()
: m_pNext( nullptr )
, m_pDelChain( nullptr )
, m_nHeight(1)
, m_arrNext( nullptr )
- , m_nUnlink(0)
- {}
+ {
+ m_nUnlink.store( 1, atomics::memory_order_release );
+ }
/// Constructs a node of height \p nHeight
void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower )
m_arrNext = nextTower;
m_nHeight = nHeight;
+ m_nUnlink.store( nHeight, atomics::memory_order_release );
}
atomic_marked_ptr * release_tower()
assert( nLevel < height());
assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr));
-# ifdef CDS_THREAD_SANITIZER_ENABLED
- // TSan false positive: m_arrNext is read-only array
- CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
- atomic_marked_ptr& r = nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
- CDS_TSAN_ANNOTATE_IGNORE_READS_END;
- return r;
-# else
return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
-# endif
}
/// Access to element of next pointer array (const version)
assert( nLevel < height());
assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr );
-# ifdef CDS_THREAD_SANITIZER_ENABLED
- // TSan false positive: m_arrNext is read-only array
- CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
- atomic_marked_ptr& r = nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
- CDS_TSAN_ANNOTATE_IGNORE_READS_END;
- return r;
-# else
return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
-# endif
}
/// Access to element of next pointer array (same as \ref next function)
bool level_unlinked( unsigned nCount = 1 )
{
- return m_nUnlink.fetch_add( nCount, std::memory_order_relaxed ) + 1 == height();
+ return m_nUnlink.fetch_sub( nCount, std::memory_order_relaxed ) == 1;
+ }
+
+ bool is_upper_level( unsigned nLevel ) const
+ {
+ return m_nUnlink.load( atomics::memory_order_relaxed ) == nLevel + 1;
}
};
} // namespace skip_list
protected:
skip_list::details::head_node< node_type > m_Head; ///< head tower (max height)
- item_counter m_ItemCounter; ///< item counter
random_level_generator m_RandomLevelGen; ///< random level generator instance
atomics::atomic<unsigned int> m_nHeight; ///< estimated high level
atomics::atomic<node_type *> m_pDeferredDelChain ; ///< Deferred deleted node chain
+ item_counter m_ItemCounter; ///< item counter
mutable stat m_Stat; ///< internal statistics
protected:
void help_remove( int nLevel, node_type* pPred, marked_node_ptr pCur, marked_node_ptr pSucc, position& pos )
{
- marked_node_ptr p( pCur.ptr() );
- if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
- memory_model::memory_order_release, atomics::memory_order_relaxed ) )
+ marked_node_ptr p( pCur.ptr());
+
+ if ( pCur->is_upper_level( nLevel )
+ && pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()),
+ memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
if ( pCur->level_unlinked()) {
- if ( !is_extracted( pSucc ) ) {
+ if ( !is_extracted( pSucc )) {
// We cannot free the node at this moment because RCU is locked
// Link deleted nodes to a chain to free later
- pos.dispose( pCur.ptr() );
+ pos.dispose( pCur.ptr());
m_Stat.onEraseWhileFind();
}
else
template <typename Q, typename Compare >
bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound )
{
- assert( gc::is_locked() );
+ assert( gc::is_locked());
node_type * pPred;
marked_node_ptr pSucc;
while ( true ) {
pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
- if ( pCur.bits() ) {
+ if ( pCur.bits()) {
// pCur.bits() means that pPred is logically deleted
goto retry;
}
// pSucc contains deletion mark for pCur
pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
- if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
+ if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
goto retry;
- if ( pSucc.bits() ) {
+ if ( pSucc.bits()) {
// pCur is marked, i.e. logically deleted.
help_remove( nLevel, pPred, pCur, pSucc, pos );
goto retry;
}
else {
- nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
+ nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
if ( nCmp < 0 )
pPred = pCur.ptr();
else if ( nCmp == 0 && bStopIfFound )
bool find_min_position( position& pos )
{
- assert( gc::is_locked() );
+ assert( gc::is_locked());
node_type * pPred;
marked_node_ptr pSucc;
// head cannot be deleted
assert( pCur.bits() == 0 );
- if ( pCur.ptr() ) {
+ if ( pCur.ptr()) {
// pSucc contains deletion mark for pCur
pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
- if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
+ if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
goto retry;
- if ( pSucc.bits() ) {
+ if ( pSucc.bits()) {
// pCur is marked, i.e. logically deleted.
help_remove( nLevel, pPred, pCur, pSucc, pos );
goto retry;
pos.pPrev[nLevel] = pPred;
pos.pSucc[nLevel] = pCur.ptr();
}
- return ( pos.pCur = pCur.ptr() ) != nullptr;
+ return ( pos.pCur = pCur.ptr()) != nullptr;
}
bool find_max_position( position& pos )
{
- assert( gc::is_locked() );
+ assert( gc::is_locked());
node_type * pPred;
marked_node_ptr pSucc;
while ( true ) {
pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
- if ( pCur.bits() ) {
+ if ( pCur.bits()) {
// pCur.bits() means that pPred is logically deleted
goto retry;
}
// pSucc contains deletion mark for pCur
pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
- if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
+ if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
goto retry;
- if ( pSucc.bits() ) {
+ if ( pSucc.bits()) {
// pCur is marked, i.e. logically deleted.
help_remove( nLevel, pPred, pCur, pSucc, pos );
goto retry;
}
else {
- if ( !pSucc.ptr() )
+ if ( !pSucc.ptr())
break;
pPred = pCur.ptr();
pos.pSucc[nLevel] = pCur.ptr();
}
- return ( pos.pCur = pCur.ptr() ) != nullptr;
+ return ( pos.pCur = pCur.ptr()) != nullptr;
+ }
+
+ bool renew_insert_position( value_type& val, node_type * pNode, position& pos )
+ {
+ assert( gc::is_locked());
+
+ node_type * pPred;
+ marked_node_ptr pSucc;
+ marked_node_ptr pCur;
+ key_comparator cmp;
+ int nCmp = 1;
+
+ retry:
+ pPred = m_Head.head();
+
+ for ( int nLevel = static_cast<int>( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) {
+
+ while ( true ) {
+ pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
+ if ( pCur.bits()) {
+ // pCur.bits() means that pPred is logically deleted
+ goto retry;
+ }
+
+ if ( pCur.ptr() == nullptr ) {
+ // end of the list at level nLevel - goto next level
+ break;
+ }
+
+ // pSucc contains deletion mark for pCur
+ pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
+
+ if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
+ goto retry;
+
+ if ( pSucc.bits()) {
+ // pCur is marked, i.e. logically deleted.
+ if ( pCur.ptr() == pNode ) {
+ // Node is removing while we are inserting it
+ return false;
+ }
+
+ // try to help deleting pCur
+ help_remove( nLevel, pPred, pCur, pSucc, pos );
+ goto retry;
+ }
+ else {
+ nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
+ if ( nCmp < 0 )
+ pPred = pCur.ptr();
+ else
+ break;
+ }
+ }
+
+ // Next level
+ pos.pPrev[nLevel] = pPred;
+ pos.pSucc[nLevel] = pCur.ptr();
+ }
+
+ return nCmp == 0;
}
template <typename Func>
bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f )
{
- assert( gc::is_locked() );
+ assert( gc::is_locked());
unsigned int const nHeight = pNode->height();
pNode->clear_tower();
{
marked_node_ptr p( pos.pSucc[0] );
pNode->next( 0 ).store( p, memory_model::memory_order_relaxed );
- if ( !pos.pPrev[0]->next( 0 ).compare_exchange_strong( p, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ) ) {
+ if ( !pos.pPrev[0]->next( 0 ).compare_exchange_strong( p, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ))
return false;
- }
f( val );
}
// Stop inserting
assert( p.bits() != 0 );
- if ( pNode->level_unlinked( nHeight - nLevel ) && p.bits() == 1 ) {
- pos.dispose( pNode );
- m_Stat.onEraseWhileInsert();
- }
- else
- m_Stat.onLogicDeleteWhileInsert();
+ // Here pNode is linked at least level 0 so level_unlinked() cannot returns true
+ CDS_VERIFY_FALSE( pNode->level_unlinked( nHeight - nLevel ));
+
+ // pNode is linked up to nLevel - 1
+ // Remove it via find_position()
+ find_position( val, pos, key_comparator(), false );
+ m_Stat.onLogicDeleteWhileInsert();
return true;
}
p = pSucc;
// Renew insert position
m_Stat.onRenewInsertPosition();
- if ( !find_position( val, pos, key_comparator(), false ) ) {
+
+ if ( !renew_insert_position( val, pNode, pos )) {
// The node has been deleted while we are inserting it
- m_Stat.onNotFoundWhileInsert();
+ // Update current height for concurent removing
+ CDS_VERIFY_FALSE( pNode->level_unlinked( nHeight - nLevel ));
+
+ m_Stat.onRemoveWhileInsert();
+
+ // help to removing val
+ find_position( val, pos, key_comparator(), false );
return true;
}
}
bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract )
{
assert( pDel != nullptr );
- assert( gc::is_locked() );
+ assert( gc::is_locked());
marked_node_ptr pSucc;
back_off bkoff;
}
}
- marked_node_ptr p( pDel->next( 0 ).load( memory_model::memory_order_relaxed ).ptr() );
+ marked_node_ptr p( pDel->next( 0 ).load( memory_model::memory_order_relaxed ).ptr());
while ( true ) {
if ( pDel->next( 0 ).compare_exchange_strong( p, p | nMask, memory_model::memory_order_release, atomics::memory_order_acquire ))
{
- f( *node_traits::to_value_ptr( pDel ) );
+ f( *node_traits::to_value_ptr( pDel ));
// physical deletion
// try fast erase
p = pDel;
- unsigned nCount = 0;
for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
- pSucc = pDel->next( nLevel ).load( memory_model::memory_order_relaxed );
- if ( !pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()),
- memory_model::memory_order_acq_rel, atomics::memory_order_acquire ) )
+ pSucc = pDel->next( nLevel ).load( memory_model::memory_order_acquire );
+ if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()),
+ memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ))
{
- // Do slow erase
- if ( nCount ) {
- if ( pDel->level_unlinked( nCount )) {
- if ( p.bits() == 1 ) {
- pos.dispose( pDel );
- m_Stat.onFastEraseHelped();
- }
- else
- m_Stat.onFastExtractHelped();
- return true;
- }
- }
-
+ pDel->level_unlinked();
+ }
+ else {
// Make slow erase
+# ifdef CDS_DEBUG
+ if ( find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false ))
+ assert( pDel != pos.pCur );
+# else
find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false );
+# endif
if ( bExtract )
m_Stat.onSlowExtract();
else
return true;
}
- ++nCount;
}
+ // Fast erasing success
if ( !bExtract ) {
// We cannot free the node at this moment since RCU is locked
// Link deleted nodes to a chain to free later
m_Stat.onFastExtract();
return true;
}
- else if ( p.bits() ) {
+ else if ( p.bits()) {
// Another thread is deleting pDel right now
m_Stat.onEraseContention();
return false;
pPred = m_Head.head();
for ( int nLevel = static_cast<int>( m_nHeight.load( memory_model::memory_order_relaxed ) - 1 ); nLevel >= 0; --nLevel ) {
pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
- if ( pCur == pNull )
- continue;
while ( pCur != pNull ) {
- if ( pCur.bits() ) {
- // pPrev is being removed
+ if ( pCur.bits()) {
+ // pPred is being removed
if ( ++attempt < 4 ) {
bkoff();
goto try_again;
return find_fastpath_abort;
}
- if ( pCur.ptr() ) {
- int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
+ if ( pCur.ptr()) {
+ int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
if ( nCmp < 0 ) {
pPred = pCur.ptr();
pCur = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
}
else if ( nCmp == 0 ) {
// found
- f( *node_traits::to_value_ptr( pCur.ptr() ), val );
+ f( *node_traits::to_value_ptr( pCur.ptr()), val );
return find_fastpath_found;
}
else // pCur > val - go down
template <typename Q, typename Compare, typename Func>
bool find_slowpath( Q& val, Compare cmp, Func f, position& pos )
{
- if ( find_position( val, pos, cmp, true ) ) {
+ if ( find_position( val, pos, cmp, true )) {
assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 );
f( *node_traits::to_value_ptr( pos.pCur ), val );
{
rcu_lock l;
- switch ( find_fastpath( val, cmp, f ) ) {
+ switch ( find_fastpath( val, cmp, f )) {
case find_fastpath_found:
m_Stat.onFindFastSuccess();
return true;
break;
}
- if ( find_slowpath( val, cmp, f, pos ) ) {
+ if ( find_slowpath( val, cmp, f, pos )) {
m_Stat.onFindSlowSuccess();
bRet = true;
}
{
rcu_lock rcuLock;
- if ( !find_position( val, pos, cmp, false ) ) {
+ if ( !find_position( val, pos, cmp, false )) {
m_Stat.onEraseFailed();
bRet = false;
}
assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 );
unsigned int nHeight = pDel->height();
- if ( try_remove_at( pDel, pos, f, false ) ) {
+ if ( try_remove_at( pDel, pos, f, false )) {
--m_ItemCounter;
m_Stat.onRemoveNode( nHeight );
m_Stat.onEraseSuccess();
value_type * do_extract_key( Q const& key, Compare cmp, position& pos )
{
// RCU should be locked!!!
- assert( gc::is_locked() );
+ assert( gc::is_locked());
node_type * pDel;
- if ( !find_position( key, pos, cmp, false ) ) {
+ if ( !find_position( key, pos, cmp, false )) {
m_Stat.onExtractFailed();
pDel = nullptr;
}
unsigned int const nHeight = pDel->height();
- if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
+ if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true )) {
--m_ItemCounter;
m_Stat.onRemoveNode( nHeight );
m_Stat.onExtractSuccess();
value_type * do_extract_min()
{
- assert( !gc::is_locked() );
+ assert( !gc::is_locked());
position pos;
node_type * pDel;
{
rcu_lock l;
- if ( !find_min_position( pos ) ) {
+ if ( !find_min_position( pos )) {
m_Stat.onExtractMinFailed();
pDel = nullptr;
}
pDel = pos.pCur;
unsigned int const nHeight = pDel->height();
- if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
+ if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true )) {
--m_ItemCounter;
m_Stat.onRemoveNode( nHeight );
m_Stat.onExtractMinSuccess();
value_type * do_extract_max()
{
- assert( !gc::is_locked() );
+ assert( !gc::is_locked());
position pos;
node_type * pDel;
{
rcu_lock l;
- if ( !find_max_position( pos ) ) {
+ if ( !find_max_position( pos )) {
m_Stat.onExtractMaxFailed();
pDel = nullptr;
}
pDel = pos.pCur;
unsigned int const nHeight = pDel->height();
- if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
+ if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true )) {
--m_ItemCounter;
m_Stat.onRemoveNode( nHeight );
m_Stat.onExtractMaxSuccess();
node_type* p = m_Head.head()->next( 0 ).load( atomics::memory_order_relaxed ).ptr();
while ( p ) {
node_type* pNext = p->next( 0 ).load( atomics::memory_order_relaxed ).ptr();
- dispose_node( node_traits::to_value_ptr( p ) );
+ dispose_node( node_traits::to_value_ptr( p ));
p = pNext;
}
}