#ifndef __CDS_INTRUSIVE_SKIP_LIST_RCU_H
#define __CDS_INTRUSIVE_SKIP_LIST_RCU_H
-#include <cds/intrusive/skip_list_base.h>
-#include <cds/details/std/type_traits.h>
-#include <cds/details/std/memory.h>
+#include <type_traits>
+#include <memory>
+#include <functional> // ref
+#include <cds/intrusive/details/skip_list_base.h>
#include <cds/opt/compare.h>
-#include <cds/ref.h>
#include <cds/urcu/details/check_deadlock.h>
#include <cds/details/binary_functor_wrapper.h>
#include <cds/urcu/exempt_ptr.h>
// bit 0 - the item is logically deleted
// bit 1 - the item is extracted (only for level 0)
typedef cds::details::marked_ptr<node, 3> marked_ptr ; ///< marked pointer
- typedef CDS_ATOMIC::atomic< marked_ptr > atomic_marked_ptr ; ///< atomic marked pointer
+ typedef atomics::atomic< marked_ptr > atomic_marked_ptr ; ///< atomic marked pointer
typedef atomic_marked_ptr tower_item_type;
protected:
# endif
protected:
unsigned int m_nHeight ; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1.
- atomic_marked_ptr * m_arrNext ; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p NULL
+ atomic_marked_ptr * m_arrNext ; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr
public:
/// Constructs a node of height 1 (a bottom-list node)
void clear_tower()
{
for ( unsigned int nLevel = 1; nLevel < m_nHeight; ++nLevel )
- next(nLevel).store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed );
+ next(nLevel).store( marked_ptr(), atomics::memory_order_relaxed );
}
/// Access to element of next pointer array
void clear()
{
assert( m_arrNext == nullptr );
- m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_release );
+ m_pNext.store( marked_ptr(), atomics::memory_order_release );
m_pDelChain = nullptr;
}
back_off bkoff;
for (;;) {
- if ( m_pNode->next( m_pNode->height() - 1 ).load( CDS_ATOMIC::memory_order_acquire ).bits() ) {
+ if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
// Current node is marked as deleted. So, its next pointer can point to anything
// In this case we interrupt our iteration and returns end() iterator.
*this = iterator();
return;
}
- marked_ptr p = m_pNode->next(0).load( CDS_ATOMIC::memory_order_relaxed );
+ marked_ptr p = m_pNode->next(0).load( atomics::memory_order_relaxed );
node_type * pp = p.ptr();
if ( p.bits() ) {
// p is marked as deleted. Spin waiting for physical removal
bkoff();
continue;
}
- else if ( pp && pp->next( pp->height() - 1 ).load( CDS_ATOMIC::memory_order_relaxed ).bits() ) {
+ else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits() ) {
// p is marked as deleted. Spin waiting for physical removal
bkoff();
continue;
back_off bkoff;
for (;;) {
- marked_ptr p = refHead.next(0).load( CDS_ATOMIC::memory_order_relaxed );
+ marked_ptr p = refHead.next(0).load( atomics::memory_order_relaxed );
if ( !p.ptr() ) {
// empty skip-list
break;
node_type * pp = p.ptr();
// Logically deleted node is marked from highest level
- if ( !pp->next( pp->height() - 1 ).load( CDS_ATOMIC::memory_order_acquire ).bits() ) {
+ if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
m_pNode = pp;
break;
}
bool operator !=(iterator const& i ) const;
};
\endcode
- Note, the iterator object returned by \ref end, \p cend member functions points to \p NULL and should not be dereferenced.
+ Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced.
<b>How to use</b>
typedef typename options::stat stat ; ///< internal statistics type
typedef typename options::rcu_check_deadlock rcu_check_deadlock ; ///< Deadlock checking policy
typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock
- static CDS_CONSTEXPR_CONST bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking
+ static CDS_CONSTEXPR const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking
/// Max node height. The actual node height should be in range <tt>[0 .. c_nMaxHeight)</tt>
};
typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy;
-
-# ifndef CDS_CXX11_LAMBDA_SUPPORT
- struct empty_insert_functor {
- void operator()( value_type& )
- {}
- };
-
- struct empty_erase_functor {
- void operator()( value_type const& )
- {}
- };
-
- struct empty_find_functor {
- template <typename Q>
- void operator()( value_type& item, Q& val )
- {}
- };
-
- struct get_functor {
- value_type * pFound;
-
- template <typename Q>
- void operator()( value_type& item, Q& val )
- {
- pFound = &item;
- }
- };
-
- template <typename Func>
- struct insert_at_ensure_functor {
- Func m_func;
- insert_at_ensure_functor( Func f ) : m_func(f) {}
-
- void operator()( value_type& item )
- {
- cds::unref( m_func)( true, item, item );
- }
- };
-
- struct copy_value_functor {
- template <typename Q>
- void operator()( Q& dest, value_type const& src ) const
- {
- dest = src;
- }
- };
-
-# endif // ifndef CDS_CXX11_LAMBDA_SUPPORT
-
//@endcond
protected:
item_counter m_ItemCounter ; ///< item counter
random_level_generator m_RandomLevelGen ; ///< random level generator instance
- CDS_ATOMIC::atomic<unsigned int> m_nHeight ; ///< estimated high level
- CDS_ATOMIC::atomic<node_type *> m_pDeferredDelChain ; ///< Deferred deleted node chain
+ atomics::atomic<unsigned int> m_nHeight ; ///< estimated high level
+ atomics::atomic<node_type *> m_pDeferredDelChain ; ///< Deferred deleted node chain
mutable stat m_Stat ; ///< internal statistics
protected:
static void dispose_node( value_type * pVal )
{
- assert( pVal != NULL );
+ assert( pVal );
typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal) );
disposer()( pVal );
// pCur is marked, i.e. logically deleted.
marked_node_ptr p( pCur.ptr() );
if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
if ( nLevel == 0 ) {
# ifdef _DEBUG
// pCur is marked, i.e. logically deleted.
marked_node_ptr p( pCur.ptr() );
if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
if ( nLevel == 0 ) {
# ifdef _DEBUG
// pCur is marked, i.e. logically deleted.
marked_node_ptr p( pCur.ptr() );
if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
if ( nLevel == 0 ) {
# ifdef _DEBUG
{
marked_node_ptr p( pos.pSucc[0] );
pNode->next( 0 ).store( p, memory_model::memory_order_release );
- if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
+ if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed )) {
return false;
}
# ifdef _DEBUG
pNode->m_bLinked = true;
# endif
- cds::unref( f )( val );
+ f( val );
}
for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) {
marked_node_ptr p;
while ( true ) {
marked_node_ptr q( pos.pSucc[ nLevel ]);
- if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) {
+ if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
// pNode has been marked as removed while we are inserting it
// Stop inserting
assert( p.bits() );
return true;
}
p = q;
- if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) )
+ if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ) )
break;
// Renew insert position
pSucc = pDel->next(nLevel).load( memory_model::memory_order_relaxed );
while ( true ) {
if ( pSucc.bits()
- || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+ || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
break;
}
return false;
int const nMask = bExtract ? 3 : 1;
- if ( pDel->next(0).compare_exchange_strong( pSucc, pSucc | nMask, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+ if ( pDel->next(0).compare_exchange_strong( pSucc, pSucc | nMask, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
- cds::unref(f)( *node_traits::to_value_ptr( pDel ));
+ f( *node_traits::to_value_ptr( pDel ));
// physical deletion
// try fast erase
for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
if ( !pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( pSucc,
marked_node_ptr( pDel->next(nLevel).load(memory_model::memory_order_relaxed).ptr() ),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed) )
+ memory_model::memory_order_release, atomics::memory_order_relaxed) )
{
// Do slow erase
find_position( *node_traits::to_value_ptr(pDel), pos, key_comparator(), false );
}
else if ( nCmp == 0 ) {
// found
- cds::unref(f)( *node_traits::to_value_ptr( pCur.ptr() ), val );
+ f( *node_traits::to_value_ptr( pCur.ptr() ), val );
return find_fastpath_found;
}
else // pCur > val - go down
if ( find_position( val, pos, cmp, true )) {
assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 );
- cds::unref(f)( *node_traits::to_value_ptr( pos.pCur ), val );
+ f( *node_traits::to_value_ptr( pos.pCur ), val );
return true;
}
else
unsigned int const nHeight = pDel->height();
- if ( try_remove_at( pDel, pos,
-# ifdef CDS_CXX11_LAMBDA_SUPPORT
- [](value_type const&) {}
-# else
- empty_erase_functor()
-# endif
- , true ))
- {
+ if ( try_remove_at( pDel, pos, [](value_type const&) {}, true )) {
--m_ItemCounter;
m_Stat.onRemoveNode( nHeight );
m_Stat.onExtractSuccess();
pDel = pos.pCur;
unsigned int const nHeight = pDel->height();
- if ( try_remove_at( pDel, pos,
-# ifdef CDS_CXX11_LAMBDA_SUPPORT
- [](value_type const&) {}
-# else
- empty_erase_functor()
-# endif
- , true ))
- {
+ if ( try_remove_at( pDel, pos, [](value_type const&) {}, true )) {
--m_ItemCounter;
m_Stat.onRemoveNode( nHeight );
m_Stat.onExtractMinSuccess();
pDel = pos.pCur;
unsigned int const nHeight = pDel->height();
- if ( try_remove_at( pDel, pos,
-# ifdef CDS_CXX11_LAMBDA_SUPPORT
- [](value_type const&) {}
-# else
- empty_erase_functor()
-# endif
- , true ))
- {
+ if ( try_remove_at( pDel, pos, [](value_type const&) {}, true )) {
--m_ItemCounter;
m_Stat.onRemoveNode( nHeight );
m_Stat.onExtractMaxSuccess();
{
unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed );
if ( nCur < nHeight )
- m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+ m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed );
}
class deferred_list_iterator
node_type * pDeferList = m_pDeferredDelChain.load( memory_model::memory_order_relaxed );
do {
pTail->m_pDelChain = pDeferList;
- } while ( !m_pDeferredDelChain.compare_exchange_weak( pDeferList, pHead, memory_model::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed ));
+ } while ( !m_pDeferredDelChain.compare_exchange_weak( pDeferList, pHead, memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ));
pos.pDelChain = nullptr;
}
static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
// Barrier for head node
- CDS_ATOMIC::atomic_thread_fence( memory_model::memory_order_release );
+ atomics::atomic_thread_fence( memory_model::memory_order_release );
}
/// Clears and destructs the skip-list
*/
bool insert( value_type& val )
{
-# ifdef CDS_CXX11_LAMBDA_SUPPORT
return insert( val, []( value_type& ) {} );
-# else
- return insert( val, empty_insert_functor() );
-# endif
}
/// Inserts new node
where \p val is the item inserted. User-defined functor \p f should guarantee that during changing
\p val no any other changes could be made on this set's item by concurrent threads.
The user-defined functor is called only if the inserting is success and may be passed by reference
- using <tt>boost::ref</tt>
+ using \p std::ref.
RCU \p synchronize method can be called. RCU should not be locked.
*/
The functor can change non-key fields of the \p item; however, \p func must guarantee
that during changing no any other modifications could be made on this item by concurrent threads.
- You can pass \p func argument by value or by reference using <tt>boost::ref</tt> or cds::ref.
+ You can pass \p func argument by value or by reference using \p std::ref.
RCU \p synchronize method can be called. RCU should not be locked.
bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
bool bTowerMade = false;
-# ifndef CDS_CXX11_LAMBDA_SUPPORT
- insert_at_ensure_functor<Func> wrapper( func );
-# endif
-
rcu_lock rcuLock;
while ( true )
{
if ( !bTowerMade )
scp.release();
- cds::unref(func)( false, *node_traits::to_value_ptr(pos.pCur), val );
+ func( false, *node_traits::to_value_ptr(pos.pCur), val );
m_Stat.onEnsureExist();
break;
}
bTowerOk = true;
}
-# ifdef CDS_CXX11_LAMBDA_SUPPORT
- if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { cds::unref(func)( true, item, item ); }))
-# else
- if ( !insert_at_position( val, pNode, pos, cds::ref(wrapper) ))
-# endif
- {
+ if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { func( true, item, item ); })) {
m_Stat.onInsertRetry();
continue;
}
unsigned int nHeight = pDel->height();
- if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos,
-# ifdef CDS_CXX11_LAMBDA_SUPPORT
- [](value_type const&) {}
-# else
- empty_erase_functor()
-# endif
- , false ))
- {
+ if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos, [](value_type const&) {}, false )) {
--m_ItemCounter;
m_Stat.onRemoveNode( nHeight );
m_Stat.onUnlinkSuccess();
template <typename Q>
bool erase( const Q& val )
{
-# ifdef CDS_CXX11_LAMBDA_SUPPORT
return do_erase( val, key_comparator(), [](value_type const&) {} );
-# else
- return do_erase( val, key_comparator(), empty_erase_functor() );
-# endif
}
/// Delete the item from the set with comparing functor \p pred
template <typename Q, typename Less>
bool erase_with( const Q& val, Less pred )
{
-# ifdef CDS_CXX11_LAMBDA_SUPPORT
return do_erase( val, cds::opt::details::make_comparator_from_less<Less>(), [](value_type const&) {} );
-# else
- return do_erase( val, cds::opt::details::make_comparator_from_less<Less>(), empty_erase_functor() );
-# endif
}
/// Deletes the item from the set
\endcode
where \p item is the item found, \p val is the <tt>find</tt> function argument.
- You can pass \p f argument by value or by reference using <tt>boost::ref</tt> or cds::ref.
+ You can pass \p f argument by value or by reference using \p std::ref.
The functor can change non-key fields of \p item. Note that the functor is only guarantee
that \p item cannot be disposed during functor is executing.
\endcode
where \p item is the item found, \p val is the <tt>find</tt> function argument.
- You can pass \p f argument by value or by reference using <tt>boost::ref</tt> or cds::ref.
+ You can pass \p f argument by value or by reference using \p std::ref.
The functor can change non-key fields of \p item. Note that the functor is only guarantee
that \p item cannot be disposed during functor is executing.
template <typename Q>
bool find( Q const& val )
{
-# ifdef CDS_CXX11_LAMBDA_SUPPORT
return do_find_with( val, key_comparator(), [](value_type& , Q const& ) {} );
-# else
- return do_find_with( val, key_comparator(), empty_find_functor() );
-# endif
}
/// Finds the key \p val with comparing functor \p pred
template <typename Q, typename Less>
bool find_with( Q const& val, Less pred )
{
- return do_find_with( val, cds::opt::details::make_comparator_from_less<Less>(),
-# ifdef CDS_CXX11_LAMBDA_SUPPORT
- [](value_type& , Q const& ) {}
-# else
- empty_find_functor()
-# endif
- );
+ return do_find_with( val, cds::opt::details::make_comparator_from_less<Less>(), [](value_type& , Q const& ) {} );
}
/// Finds the key \p val and return the item found
/** \anchor cds_intrusive_SkipListSet_rcu_get
The function searches the item with key equal to \p val and returns the pointer to item found.
- If \p val is not found it returns \p NULL.
+ If \p val is not found it returns \p nullptr.
Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
{
assert( gc::is_locked());
-# ifdef CDS_CXX11_LAMBDA_SUPPORT
value_type * pFound;
return do_find_with( val, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; } )
? pFound : nullptr;
-# else
- get_functor gf;
- return do_find_with( val, key_comparator(), cds::ref(gf) )
- ? gf.pFound : nullptr;
-# endif
}
/// Finds the key \p val and return the item found
{
assert( gc::is_locked());
-# ifdef CDS_CXX11_LAMBDA_SUPPORT
value_type * pFound;
return do_find_with( val, cds::opt::details::make_comparator_from_less<Less>(),
[&pFound](value_type& found, Q const& ) { pFound = &found; } )
? pFound : nullptr;
-# else
- get_functor gf;
- return do_find_with( val, cds::opt::details::make_comparator_from_less<Less>(), cds::ref(gf) )
- ? gf.pFound : nullptr;
-# endif
}
/// Returns item count in the set