+ for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel )
+ pNode->next( nLevel ).store( marked_node_ptr(), memory_model::memory_order_relaxed );
+
+ // Insert at level 0
+ {
+ marked_node_ptr p( pos.pSucc[0] );
+ pNode->next( 0 ).store( p, memory_model::memory_order_relaxed );
+ if ( !pos.pPrev[0]->next( 0 ).compare_exchange_strong( p, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ))
+ return false;
+
+ f( val );
+ }
+
+ // Insert at level 1..max
+ for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) {
+ marked_node_ptr p;
+ while ( true ) {
+ marked_node_ptr pSucc( pos.pSucc[nLevel] );
+
+ // Set pNode->next
+ // pNode->next can have a "logical deleted" flag if another thread is removing pNode right now
+ if ( !pNode->next( nLevel ).compare_exchange_strong( p, pSucc,
+ memory_model::memory_order_acq_rel, atomics::memory_order_acquire ) )
+ {
+ // pNode has been marked as removed while we are inserting it
+ // Stop inserting
+ assert( p.bits() != 0 );
+ if ( pNode->level_unlinked( nHeight - nLevel ))
+ gc::retire( &val, dispose_node );
+ m_Stat.onLogicDeleteWhileInsert();
+ return true;
+ }
+ p = pSucc;
+
+ // Link pNode into the list at nLevel
+ if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( pSucc, marked_node_ptr( pNode ),
+ memory_model::memory_order_release, atomics::memory_order_relaxed ))
+ {
+ // go to next level
+ break;
+ }
+
+ // Renew insert position
+ m_Stat.onRenewInsertPosition();
+ if ( !find_position( val, pos, key_comparator(), false )) {
+ // The node has been deleted while we are inserting it
+ m_Stat.onNotFoundWhileInsert();
+ return true;
+ }
+ }
+ }
+ return true;
+ }
+
+ template <typename Func>
+ bool try_remove_at( node_type * pDel, position& pos, Func f )
+ {
+ assert( pDel != nullptr );
+
+ marked_node_ptr pSucc;
+ back_off bkoff;
+
+ // logical deletion (marking)
+ for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) {
+ pSucc = pDel->next( nLevel ).load( memory_model::memory_order_relaxed );
+ if ( pSucc.bits() == 0 ) {
+ bkoff.reset();
+ while ( !( pDel->next( nLevel ).compare_exchange_weak( pSucc, pSucc | 1,
+ memory_model::memory_order_release, atomics::memory_order_acquire )
+ || pSucc.bits() != 0 ))
+ {
+ bkoff();
+ m_Stat.onMarkFailed();
+ }
+ }
+ }
+
+ marked_node_ptr p( pDel->next( 0 ).load( memory_model::memory_order_relaxed ).ptr());
+ while ( true ) {
+ if ( pDel->next( 0 ).compare_exchange_strong( p, p | 1, memory_model::memory_order_release, atomics::memory_order_acquire ))
+ {
+ f( *node_traits::to_value_ptr( pDel ) );
+
+ // Physical deletion
+ // try fast erase
+ p = pDel;
+
+ for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
+
+ pSucc = pDel->next( nLevel ).load( memory_model::memory_order_relaxed );
+ if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
+ memory_model::memory_order_acquire, atomics::memory_order_relaxed ) )
+ {
+ pDel->level_unlinked();
+ }
+ else {
+ // Make slow erase
+ find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false );
+ m_Stat.onSlowErase();
+ return true;
+ }
+ }
+
+ // Fast erasing success
+ gc::retire( node_traits::to_value_ptr( pDel ), dispose_node );
+ m_Stat.onFastErase();
+ return true;
+ }
+ else if ( p.bits()) {
+ // Another thread is deleting pDel right now
+ m_Stat.onEraseContention();
+ return false;
+ }
+ m_Stat.onEraseRetry();
+ bkoff();
+ }
+ }
+
+ enum finsd_fastpath_result {
+ find_fastpath_found,
+ find_fastpath_not_found,
+ find_fastpath_abort
+ };
+ template <typename Q, typename Compare, typename Func>
+ finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f )
+ {
+ node_type * pPred;
+ marked_node_ptr pCur;
+ marked_node_ptr pNull;
+
+ // guard array:
+ // 0 - pPred on level N
+ // 1 - pCur on level N
+ typename gc::template GuardArray<2> guards;
+ back_off bkoff;
+ unsigned attempt = 0;
+
+ try_again:
+ pPred = m_Head.head();
+ for ( int nLevel = static_cast<int>( m_nHeight.load( memory_model::memory_order_relaxed ) - 1 ); nLevel >= 0; --nLevel ) {
+ pCur = guards.protect( 1, pPred->next( nLevel ), gc_protect );
+
+ while ( pCur != pNull ) {
+ if ( pCur.bits() ) {
+ // pPred is being removed
+ if ( ++attempt < 4 ) {
+ bkoff();
+ goto try_again;
+ }
+
+ return find_fastpath_abort;
+ }
+
+ if ( pCur.ptr() ) {
+ int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
+ if ( nCmp < 0 ) {
+ guards.copy( 0, 1 );
+ pPred = pCur.ptr();
+ pCur = guards.protect( 1, pCur->next( nLevel ), gc_protect );
+ }
+ else if ( nCmp == 0 ) {
+ // found
+ f( *node_traits::to_value_ptr( pCur.ptr() ), val );
+ return find_fastpath_found;
+ }
+ else {
+ // pCur > val - go down
+ break;
+ }
+ }
+ }
+ }
+
+ return find_fastpath_not_found;
+ }
+
+ template <typename Q, typename Compare, typename Func>
+ bool find_slowpath( Q& val, Compare cmp, Func f )