typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
/// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
- static bool const c_bRelaxedInsert = traits::relaxed_insert;
+ static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
- /// Pointer to removed value object
- typedef std::unique_ptr< mapped_type, disposer > exempt_ptr;
+ /// Returned pointer to \p mapped_type of extracted node
+ typedef std::unique_ptr< T, disposer > unique_ptr;
+
+ typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
protected:
//@cond
- typedef typename sync_monitor::template node_injection< bronson_avltree::node< key_type, mapped_type >> node_type;
+ typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
typedef typename node_type::version_type version_type;
typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
retry
};
- enum class update_flags
+ struct update_flags
{
- allow_insert = 1,
- allow_update = 2,
- allow_remove = 4,
+ enum {
+ allow_insert = 1,
+ allow_update = 2,
+ //allow_remove = 4,
- retry = 1024,
+ retry = 1024,
- failed = 0,
- result_inserted = allow_insert,
- result_updated = allow_update,
- result_removed = allow_remove
+ failed = 0,
+ result_inserted = allow_insert,
+ result_updated = allow_update,
+ result_removed = 4
+ };
};
enum node_condition
unlink_required = -1
};
+ enum direction {
+ left_child = -1,
+ right_child = 1
+ };
+
typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
//@endcond
cxx_allocator().Delete( pNode );
}
- // RCU safe disposer
+ static void free_value( mapped_type pVal )
+ {
+ disposer()(pVal);
+ }
+
+ static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
+ {
+ return pNode->child( nDir ).load( order );
+ }
+
+ static node_type * parent( node_type * pNode, atomics::memory_order order )
+ {
+ return pNode->m_pParent.load( order );
+ }
+
+ // RCU safe disposer for node
class rcu_disposer
{
node_type * m_pRetiredList; ///< head of retired list
void dispose( node_type * pNode )
{
- mapped_type * pVal = pNode->value( memory_model::memory_order_relaxed );
- if ( pVal ) {
- pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
- disposer()(pVal);
- }
-
+ assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
pNode->m_pNextRemoved = m_pRetiredList;
m_pRetiredList = pNode;
}
assert( !gc::is_locked() );
for ( node_type * p = m_pRetiredList; p; ) {
- node_type * pNext = p->m_pNextRemoved;
+ node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
// Value already disposed
gc::template retire_ptr<internal_disposer>( p );
p = pNext;
protected:
//@cond
- typename node_type::base_class m_Root;
- node_type * m_pRoot;
- item_counter m_ItemCounter;
- mutable sync_monitor m_Monitor;
- mutable stat m_stat;
+ typename node_type::base_class m_Root;
+ node_type * m_pRoot;
+ item_counter m_ItemCounter;
+ mutable sync_monitor m_Monitor;
+ mutable stat m_stat;
//@endcond
public:
/// Creates empty map
BronsonAVLTreeMap()
- : m_pRoot( static_cast<node_type *>(&m_Root) )
+ : m_pRoot( static_cast<node_type *>( &m_Root ))
{}
/// Destroys the map
Returns \p true if inserting successful, \p false otherwise.
*/
template <typename K>
- bool insert( K const& key, mapped_type * pVal )
+ bool insert( K const& key, mapped_type pVal )
{
return do_update(key, key_comparator(),
- [pVal]( node_type * pNode ) -> mapped_type*
+ [pVal]( node_type * pNode ) -> mapped_type
{
assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
return pVal;
},
- update_flags::allow_insert
+ update_flags::allow_insert
) == update_flags::result_inserted;
}
already is in the tree.
*/
template <typename K, typename Func>
- std::pair<bool, bool> update( K const& key, mapped_type * pVal, bool bInsert = true )
+ std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
{
int result = do_update( key, key_comparator(),
- [pVal]( node_type * ) -> mapped_type*
+ [pVal]( node_type * ) -> mapped_type
{
return pVal;
},
template <typename K>
bool erase( K const& key )
{
- return do_update(
- key,
- key_comparator(),
- []( mapped_type * pVal ) { disposer()(pVal); },
- update_flags::allow_remove
- ) == update_flags::result_removed;
+ return do_remove(
+ key,
+ key_comparator(),
+ []( mapped_type pVal ) -> bool { free_value( pVal ); return true; }
+ );
}
/// Deletes the item from the map using \p pred predicate for searching
bool erase_with( K const& key, Less pred )
{
CDS_UNUSED( pred );
- return do_update(
+ return do_remove(
key,
cds::details::predicate_wrapper<key_type, Less, cds::details::trivial_accessor >(),
- []( mapped_type * pVal ) { disposer()(pVal); },
- update_flags::allow_remove
- ) == update_flags::result_removed;
+ []( mapped_type pVal ) -> bool { free_value( pVal ); return true; }
+ );
}
/// Delete \p key from the map
template <typename K, typename Func>
bool erase( K const& key, Func f )
{
- return do_update(
+ return do_remove(
key,
key_comparator(),
- [&f]( mapped_type * pVal ) { f( *pVal ); disposer()(pVal); },
- update_flags::allow_remove
- ) == update_flags::result_removed;
+ [&f]( mapped_type pVal ) -> bool {
+ assert( pVal );
+ f( *pVal );
+ free_value(pVal);
+ return true;
+ }
+ );
}
/// Deletes the item from the map using \p pred predicate for searching
bool erase_with( K const& key, Less pred, Func f )
{
CDS_UNUSED( pred );
- return do_update(
+ return do_remove(
key,
cds::details::predicate_wrapper<key_type, Less, cds::details::trivial_accessor >(),
- [&f]( mapped_type * pVal ) { f( *pVal ); disposer()(pVal); },
- update_flags::allow_remove
- ) == update_flags::result_removed;
+ [&f]( mapped_type pVal ) -> bool {
+ assert( pVal );
+ f( *pVal );
+ free_value(pVal);
+ return true;
+ }
+ );
}
/// Extracts an item with minimal key from the map
/**
- Returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the leftmost item.
- If the set is empty, returns empty \p exempt_ptr.
+ Returns \p std::unique_ptr to the leftmost item.
+ If the set is empty, returns empty \p std::unique_ptr.
@note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
It means that the function gets leftmost leaf of the tree and tries to unlink it.
RCU \p synchronize method can be called. RCU should NOT be locked.
The function does not free the item.
The deallocator will be implicitly invoked when the returned object is destroyed or when
- its \p release() member function is called.
+ its \p reset(nullptr) member function is called.
*/
- exempt_ptr extract_min()
+ unique_ptr extract_min()
{
//TODO
+ return unique_ptr();
}
/// Extracts an item with maximal key from the map
/**
- Returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the rightmost item.
- If the set is empty, returns empty \p exempt_ptr.
+ Returns \p std::unique_ptr pointer to the rightmost item.
+ If the set is empty, returns empty \p std::unique_ptr.
@note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
It means that the function gets rightmost leaf of the tree and tries to unlink it.
RCU \p synchronize method can be called. RCU should NOT be locked.
The function does not free the item.
The deallocator will be implicitly invoked when the returned object is destroyed or when
- its \p release() is called.
+ its \p reset(nullptr) is called.
@note Before reusing \p result object you should call its \p release() method.
*/
- exempt_ptr extract_max()
+ unique_ptr extract_max()
{
//TODO
+ return unique_ptr();
}
/// Extracts an item from the map
/**
The function searches an item with key equal to \p key in the tree,
- unlinks it, and returns \p exempt_ptr pointer to a value found.
- If \p key is not found the function returns an empty \p exempt_ptr.
+ unlinks it, and returns \p std::unique_ptr pointer to a value found.
+ If \p key is not found the function returns an empty \p std::unique_ptr.
RCU \p synchronize method can be called. RCU should NOT be locked.
The function does not destroy the value found.
its \p reset(nullptr) member function is called.
*/
template <typename Q>
- exempt_ptr extract( Q const& key )
+ unique_ptr extract( Q const& key )
{
- exempt_ptr pExtracted;
+ unique_ptr pExtracted;
- do_update(
+ do_remove(
key,
key_comparator(),
- [&pExtracted]( mapped_type * pVal ) { pExtracted.reset( pVal ); },
- update_flags::allow_remove
- );
+ [&pExtracted]( mapped_type pVal ) -> bool { pExtracted.reset( pVal ); return false; }
+ );
return pExtracted;
}
\p pred must imply the same element order as the comparator used for building the tree.
*/
template <typename Q, typename Less>
- exempt_ptr extract_with( Q const& key, Less pred )
+ unique_ptr extract_with( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- exempt_ptr pExtracted;
+ unique_ptr pExtracted;
- do_update(
+ do_remove(
key,
cds::details::predicate_wrapper<key_type, Less, cds::details::trivial_accessor >(),
- [&pExtracted]( mapped_type * pVal ) { pExtracted.reset( pVal ); },
- update_flags::allow_remove
- );
+ [&pExtracted]( mapped_type pVal ) -> bool { pExtracted.reset( pVal ); return false; }
+ );
return pExtracted;
}
The interface of \p Func functor is:
\code
struct functor {
- void operator()( mapped_type& item );
+ void operator()( key_type const& key, mapped_type& item );
};
\endcode
where \p item is the item found.
template <typename K, typename Func>
bool find( K const& key, Func f )
{
- return do_find( key, key_comparator(), [&f]( sync_monitor& monitor, node_type * pNode ) -> bool {
- assert( pNode != nullptr );
- node_scoped_lock l( monitor, *pNode );
- mapped_type * pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
- if ( pVal ) {
- f( *pVal );
- return true;
+ return do_find( key, key_comparator(),
+ [&f]( sync_monitor& monitor, node_type * pNode ) -> bool {
+ assert( pNode != nullptr );
+ node_scoped_lock l( monitor, *pNode );
+ mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
+ if ( pVal ) {
+ f( pNode->m_key, *pVal );
+ return true;
+ }
+ return false;
}
- return false;
- });
+ );
}
/// Finds the key \p val using \p pred predicate for searching
[&f]( sync_monitor& monitor, node_type * pNode ) -> bool {
assert( pNode != nullptr );
node_scoped_lock l( monitor, *pNode );
- mapped_type * pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
+ mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
if ( pVal ) {
- f( *pVal );
+ f( pNode->m_key, *pVal );
return true;
}
return false;
template <typename K>
bool find( K const& key )
{
- return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
+ return do_find( key, key_comparator(), []( sync_monitor&, node_type * ) -> bool { return true; });
}
/// Finds the key \p val using \p pred predicate for searching
bool find_with( K const& key, Less pred )
{
CDS_UNUSED( pred );
- return do_find( key, opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
- }
-
- /// Finds \p key and return the item found
- /**
- The function searches the item with key equal to \p key and returns the pointer to item found.
- If \p key is not found it returns \p nullptr.
-
- RCU should be locked before call the function.
- Returned pointer is valid while RCU is locked.
- */
- template <typename Q>
- mapped_type * get( Q const& key ) const
- {
- //TODO
- }
-
- /// Finds \p key with \p pred predicate and return the item found
- /**
- The function is an analog of \p get(Q const&)
- but \p pred is used for comparing the keys.
-
- \p Less functor has the semantics like \p std::less but should take arguments of type \p key_type
- and \p Q in any order.
- \p pred must imply the same element order as the comparator used for building the map.
- */
- template <typename Q, typename Less>
- mapped_type * get_with( Q const& key, Less pred ) const
- {
- CDS_UNUSED( pred );
- //TODO
+ return do_find( key, opt::details::make_comparator_from_less<Less>(), []( sync_monitor&, node_type * ) -> bool { return true; } );
}
/// Clears the tree (thread safe, not atomic)
*/
void clear()
{
- for ( exempt_ptr ep = extract_min(); !ep.empty(); ep = extract_min() )
- ep.release();
+ while ( extract_min() );
}
/// Clears the tree (not thread safe)
bool check_consistency() const
{
//TODO
+ return true;
}
protected:
rcu_disposer removed_list;
{
rcu_lock l;
- return try_udate_root( key, cmp, nFlags, funcUpdate, removed_list );
+ return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
+ }
+ }
+
+ template <typename K, typename Compare, typename Func>
+ bool do_remove( K const& key, Compare cmp, Func func )
+ {
+ // Func must return true if the value was disposed
+ // or false if the value was extracted
+
+ check_deadlock_policy::check();
+
+ rcu_disposer removed_list;
+ {
+ rcu_lock l;
+ return try_remove_root( key, cmp, func, removed_list );
}
}
assert( pNode );
while ( true ) {
- node_type * pChild = pNode->child( nDir ).load( memory_model::memory_order_relaxed );
+ node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
if ( !pChild ) {
if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
m_stat.onFindRetry();
if ( nCmp == 0 ) {
if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
// key found
- node_scoped_lock l( m_Monitor, pChild );
+ node_scoped_lock l( m_Monitor, *pChild );
if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
- if ( f( *pChild ) ) {
+ if ( f( m_Monitor, pChild ) ) {
m_stat.onFindSuccess();
return find_result::found;
}
}
template <typename K, typename Compare, typename Func>
- int try_update_root( K const* key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
+ int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
{
assert( gc::is_locked() );
int result;
do {
// get right child of root
- node_type * pChild = m_Root.child( 1, memory_model::memory_order_acquire );
+ node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
if ( pChild ) {
version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
if ( nChildVersion & node_type::shrinking ) {
pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
result = update_flags::retry;
}
- else if ( pChild == m_Root.child( 1, memory_model::memory_order_acquire ) ) {
+ else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
}
}
// insert into tree as right child of the root
{
node_scoped_lock l( m_Monitor, *m_pRoot );
+ if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
+ result = result == update_flags::retry;
+ continue;
+ }
node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
- mapped_type * pVal = funcUpdate( pNew );
+ mapped_type pVal = funcUpdate( pNew );
assert( pVal != nullptr );
pNew->m_pValue.store( pVal, memory_model::memory_order_release );
- m_pRoot->child( pNew, 1, memory_model::memory_order_relaxed );
+ m_pRoot->child( pNew, right_child, memory_model::memory_order_relaxed );
m_pRoot->height( 2, memory_model::memory_order_relaxed );
}
return result;
}
+ template <typename K, typename Compare, typename Func>
+ bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
+ {
+ assert( gc::is_locked() );
+
+ int result;
+ do {
+ // get right child of root
+ node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
+ if ( pChild ) {
+ version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
+ if ( nChildVersion & node_type::shrinking ) {
+ m_stat.onUpdateRootWaitShrinking();
+ pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
+ result = update_flags::retry;
+ }
+ else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
+ result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
+ }
+ }
+ else
+ return false;
+ } while ( result == update_flags::retry );
+
+ return result == update_flags::result_removed;
+ }
+
template <typename K, typename Compare, typename Func>
int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
{
assert( gc::is_locked() );
assert( nVersion != node_type::unlinked );
+ CDS_UNUSED( pParent );
int nCmp = cmp( key, pNode->m_key );
if ( nCmp == 0 ) {
if ( nFlags & update_flags::allow_update ) {
return try_update_node( funcUpdate, pNode );
}
- if ( nFlags & update_flags::allow_remove ) {
- return try_remove_node( pParent, pNode, nVersion, funcUpdate, disp );
- }
return update_flags::failed;
}
int result;
do {
- node_type * pChild = pNode->child( nCmp ).load( memory_model::memory_order_relaxed );
+ node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
return update_flags::retry;
pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
// retry
}
- else if ( pChild == pNode->child( nCmp ).load( memory_model::memory_order_relaxed ) ) {
+ else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
// this second read is important, because it is protected by nChildVersion
// validate the read that our caller took to get to node
return result;
}
+ template <typename K, typename Compare, typename Func>
+ int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
+ {
+ assert( gc::is_locked() );
+ assert( nVersion != node_type::unlinked );
+
+ int nCmp = cmp( key, pNode->m_key );
+ if ( nCmp == 0 )
+ return try_remove_node( pParent, pNode, nVersion, func, disp );
+
+ int result;
+ do {
+ node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
+ if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
+ return update_flags::retry;
+
+ if ( pChild == nullptr ) {
+ return update_flags::failed;
+ }
+ else {
+ // update child
+ result = update_flags::retry;
+ version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
+ if ( nChildVersion & node_type::shrinking ) {
+ m_stat.onUpdateWaitShrinking();
+ pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
+ // retry
+ }
+ else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
+ // this second read is important, because it is protected by nChildVersion
+
+ // validate the read that our caller took to get to node
+ if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
+ m_stat.onUpdateRetry();
+ return update_flags::retry;
+ }
+
+ // At this point we know that the traversal our parent took to get to node is still valid.
+ // The recursive implementation will validate the traversal from node to
+ // child, so just prior to the node nVersion validation both traversals were definitely okay.
+ // This means that we are no longer vulnerable to node shrinks, and we don't need
+ // to validate node version any more.
+ result = try_remove( key, cmp, func, pNode, pChild, nChildVersion, disp );
+ }
+ }
+ } while ( result == update_flags::retry );
+ return result;
+ }
+
template <typename K, typename Func>
int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
{
node_type * pNew;
auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
- mapped_type * pVal = funcUpdate( pNew );
+ mapped_type pVal = funcUpdate( pNew );
assert( pVal != nullptr );
pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
};
if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion
|| pNode->child( nDir ).load( memory_model::memory_order_relaxed ) != nullptr )
{
- if ( c_RelaxedInsert ) {
+ if ( c_bRelaxedInsert ) {
free_node( pNew );
m_stat.onRelaxedInsertFailed();
}
pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
pDamaged = fix_height_locked( pNode );
}
+
+ ++m_ItemCounter;
m_stat.onInsertSuccess();
fix_height_and_rebalance( pDamaged, disp );
template <typename Func>
int try_update_node( Func funcUpdate, node_type * pNode )
{
- mapped_type * pOld;
+ mapped_type pOld;
{
assert( pNode != nullptr );
node_scoped_lock l( m_Monitor, *pNode );
}
pOld = pNode->value( memory_model::memory_order_relaxed );
- mapped_type * pVal = funcUpdate( *pNode );
+ mapped_type pVal = funcUpdate( pNode );
assert( pVal != nullptr );
- pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed ));
+ pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed );
}
if ( pOld ) {
- disposer()(pOld);
+ free_value(pOld);
m_stat.onDisposeValue();
}
if ( !pNode->is_valued( atomics::memory_order_relaxed ) )
return update_flags::failed;
- if ( pNode->child( -1, atomics::memory_order_relaxed ) == nullptr
- || pNode->child( 1, atomics::memory_order_relaxed ) == nullptr )
+ if ( pNode->child( left_child ).load( atomics::memory_order_relaxed ) == nullptr
+ || pNode->child( right_child ).load( atomics::memory_order_relaxed ) == nullptr )
{
node_type * pDamaged;
- mapped_type * pOld;
+ mapped_type pOld;
{
node_scoped_lock lp( m_Monitor, *pParent );
- if ( pParent->is_unlinked( atomics::memory_order_relaxed ) || pNode->m_pParent.load( atomics::memory_order_relaxed ) != pParent )
+ if ( pParent->is_unlinked( atomics::memory_order_relaxed ) || parent( pNode, memory_model::memory_order_relaxed ) != pParent )
return update_flags::retry;
{
node_scoped_lock ln( m_Monitor, *pNode );
- pOld = pNode->value( atomics::memory_order_relaxed );
- if ( pNode->version( atomics::memory_order_relaxed ) == nVersion
+ pOld = pNode->value( memory_model::memory_order_relaxed );
+ if ( !( pNode->version( memory_model::memory_order_relaxed ) == nVersion
&& pOld
- && try_unlink_locked( pParent, pNode, disp ) )
+ && try_unlink_locked( pParent, pNode, disp )))
{
- pNode->m_pValue.store( nullptr, atomics::memory_order_relaxed );
- }
- else
return update_flags::retry;
+ }
}
pDamaged = fix_height_locked( pParent );
}
- func( pOld ); // calls pOld disposer inside
- m_stat.onDisposeValue();
+ --m_ItemCounter;
+ if ( func( pOld ) ) // calls pOld disposer inside
+ m_stat.onDisposeValue();
+ else
+ m_stat.onExtractValue();
fix_height_and_rebalance( pDamaged, disp );
- return upfate_flags::result_removed;
+ return update_flags::result_removed;
}
else {
int result = update_flags::retry;
+ mapped_type pOld;
{
node_scoped_lock ln( m_Monitor, *pNode );
- mapped_type * pOld = pNode->value( atomics::memory_order_relaxed );
+ pOld = pNode->value( atomics::memory_order_relaxed );
if ( pNode->version( atomics::memory_order_relaxed ) == nVersion && pOld ) {
pNode->m_pValue.store( nullptr, atomics::memory_order_relaxed );
- result = upfate_flags::result_removed;
+ result = update_flags::result_removed;
}
}
- if ( result == upfate_flags::result_removed ) {
- func( *pOld ); // calls pOld disposer inside
- m_stat.onDisposeValue();
+ if ( result == update_flags::result_removed ) {
+ --m_ItemCounter;
+ if ( func( pOld )) // calls pOld disposer inside
+ m_stat.onDisposeValue();
+ else
+ m_stat.onExtractValue();
}
return result;
// pParent and pNode must be locked
assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
- node_type * pParentLeft = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
- node_type * pParentRight = pParent->m_pRight.load( memory_model::memory_order_relaxed );
+ node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
+ node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
if ( pNode != pParentLeft && pNode != pParentRight ) {
// node is no longer a child of parent
return false;
}
- assert( !pNode->is_unlinked());
- assert( pParent == pNode->m_pParent.load(memory_model::memory_order_relaxed));
+ assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ) );
+ assert( pParent == parent( pNode, memory_model::memory_order_relaxed));
- node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
- node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
+ node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
+ node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
if ( pLeft != nullptr && pRight != nullptr ) {
// splicing is no longer possible
return false;
pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
if ( pSplice )
- pSplice->pParent.store( pParent, memory_model::memory_order_release );
+ pSplice->m_pParent.store( pParent, memory_model::memory_order_release );
// Mark the node as unlinked
pNode->version( node_type::unlinked, memory_model::memory_order_release );
+ mapped_type pVal = pNode->value( memory_model::memory_order_relaxed );
+ if ( pVal ) {
+ free_value( pVal );
+ m_stat.onDisposeValue();
+ pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
+ }
disp.dispose( pNode );
+ m_stat.onDisposeNode();
return true;
}
//@endcond
+
private: // rotations
//@cond
int estimate_node_condition( node_type * pNode )
{
- node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
- node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
+ node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
+ node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
- if ( (pLeft == nullptr || pRight == nullptr) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
+ if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed ))
return unlink_required;
int h = pNode->height( memory_model::memory_order_relaxed );
return nullptr;
default:
pNode->height( h, memory_model::memory_order_relaxed );
- return pNode->m_pParent.load( memory_model::memory_order_relaxed );
+ return parent( pNode, memory_model::memory_order_relaxed );
}
}
void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
{
- while ( pNode && pNode->m_pParent.load( memory_model::memory_order_relaxed ) ) {
+ while ( pNode && pNode->m_pParent.load( memory_model::memory_order_relaxed )) {
int nCond = estimate_node_condition( pNode );
if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
return;
if ( nCond != unlink_required && nCond != rebalance_required )
pNode = fix_height( pNode );
else {
- node_type * pParent = pNode->m_pParent.load( memory_model::memry_order_relaxed );
+ node_type * pParent = parent( pNode, memory_model::memory_order_relaxed );
assert( pParent != nullptr );
{
node_scoped_lock lp( m_Monitor, *pParent );
if ( !pParent->is_unlinked( memory_model::memory_order_relaxed )
- && pNode->m_pParent.load( memory_model::memory_order_relaxed ) == pParent )
+ && parent( pNode, memory_model::memory_order_relaxed ) == pParent )
{
node_scoped_lock ln( m_Monitor, *pNode );
pNode = rebalance_locked( pParent, pNode, disp );
node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
{
- // pParent and pNode shlould be locked.
+ // pParent and pNode should be locked.
// Returns a damaged node, or nullptr if no more rebalancing is necessary
- assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
- assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
- || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
+ assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
+ assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
+ || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
- node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
- node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
+ node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
+ node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
- if ( (pLeft == nullptr || pRight == nullptr) && pNode->value( memory_model::memory_order_relaxed ) == nullptr ) {
+ if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
if ( try_unlink_locked( pParent, pNode, disp ))
return fix_height_locked( pParent );
else {
node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
{
- assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
- assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
- || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
+ assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
+ assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
+ || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
// pParent and pNode is locked yet
// pNode->pLeft is too large, we will rotate-right.
if ( hL - hR <= 1 )
return pNode; // retry
- node_type * pLRight = pLeft->m_pRight.load( memory_model::memory_order_relaxed );
+ node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
int hLR = pLRight ? pLRight->height( memory_model::memory_order_relaxed ) : 0;
- node_type * pLLeft = pLeft->m_pLeft.load( memory_model::memory_order_relaxed );
+ node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
int hLL = pLLeft ? pLLeft->height( memory_model::memory_order_relaxed ) : 0;
if ( hLL > hLR ) {
if ( hLL > hLR )
return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
- node_type * pLRLeft = pLRight->m_pLeft.load( memory_model::memory_order_relaxed );
+ node_type * pLRLeft = child( pLRight, left_child, memory_model::memory_order_relaxed );
int hLRL = pLRLeft ? pLRLeft->height( memory_model::memory_order_relaxed ) : 0;
int balance = hLL - hLRL;
- if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && pLeft->value(memory_model::memory_order_relaxed) == nullptr) ) {
+ if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
// nParent.child.left won't be damaged after a double rotation
return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
}
node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
{
- assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
- assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
- || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
+ assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
+ assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
+ || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
// pParent and pNode is locked yet
{
if ( hL - hR >= -1 )
return pNode; // retry
- node_type * pRLeft = pRight->m_pLeft.load( memory_model::memory_order_relaxed );
- int hRL = pRLeft > pRLeft->height( memory_model::memory_order_relaxed ) : 0;
- node_type * pRRight = pRight->m_pRight.load( memory_model::memory_order_relaxed );
+ node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
+ int hRL = pRLeft ? pRLeft->height( memory_model::memory_order_relaxed ) : 0;
+ node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
int hRR = pRRight ? pRRight->height( memory_model::memory_order_relaxed ) : 0;
if ( hRR > hRL )
return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
if ( hRR >= hRL )
return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
- node_type * pRLRight = pRLeft->m_pRight.load( memory_model::memory_order_relaxed );
+ node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
int hRLR = pRLRight ? pRLRight->height( memory_model::memory_order_relaxed ) : 0;
int balance = hRR - hRLR;
- if ( b >= -1 && b <= 1 && !((hRR == 0 || hRLR == 0) && pRight->value( memory_odel::memory_order_relaxed ) == nullptr)
+ if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
}
return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
{
version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
- node_type * pParentLeft = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
+ node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
begin_change( pNode, nodeVersion );
if ( pParentLeft == pNode )
pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
else {
- assert( pParent->m_pRight.load( memory_odel::memory_order_relaxed ) == pNode );
- pParent->m_pRight.store( pLeft, memory_mdel::memory_order_relaxed );
+ assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
+ pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed );
}
pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
pLeft->height( 1 + std::max( hLL, hNode ), memory_model::memory_order_relaxed );
end_change( pNode, nodeVersion );
+ m_stat.onRotateRight();
// We have damaged pParent, pNode (now parent.child.right), and pLeft (now
// parent.child). pNode is the deepest. Perform as many fixes as we can
// we've fixed balance and height damage for pNode, now handle
// extra-routing node damage
- if ( (pLRight == nullptr || hR == 0) && pNode->value(memory_model::memory_order_relaxed) == nullptr ) {
+ if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
// we need to remove pNode and then repair
return pNode;
}
return pLeft;
// pLeft might also have routing node damage (if pLeft.left was null)
- if ( hLL == 0 && pLeft->value( memory_model::memory_order_relaxed ) == nullptr )
+ if ( hLL == 0 && !pLeft->is_valued( memory_model::memory_order_relaxed ))
return pLeft;
// try to fix the parent height while we've still got the lock
node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
{
version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
- node_type * pParentLeft = pParent->m_pLeft.load( memory_odel::memory_order_relaxed );
+ node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
begin_change( pNode, nodeVersion );
// fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
- pNode->m_pRight.store( pRLeft, memory_nodel::memory_order_relaxed );
+ pNode->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
if ( pRLeft != nullptr )
pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
if ( pParentLeft == pNode )
pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
else {
- assert( pParent->m_pRight.load( memory_odel::memory_order_relaxed ) == pNode );
- pParent->m_pRight.store( pRight, memory_model::memory_model_relaxed );
+ assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
+ pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed );
}
pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
pRight->height( 1 + std::max( hNode, hRR ), memory_model::memory_order_relaxed );
end_change( pNode, nodeVersion );
+ m_stat.onRotateLeft();
int nodeBalance = hRL - hL;
if ( nodeBalance < -1 || nodeBalance > 1 )
return pNode;
- if ( (pRLeft == nullptr || hL == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
+ if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
return pNode;
int rightBalance = hRR - hNode;
if ( rightBalance < -1 || rightBalance > 1 )
return pRight;
- if ( hRR == 0 && pRight->value( memory_model::memory_order_relaxed ) == nullptr )
+ if ( hRR == 0 && !pRight->is_valued( memory_model::memory_order_relaxed ))
return pRight;
return fix_height_locked( pParent );
node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
{
- typename node_type::value_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
- typename node_type::value_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
+ version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
+ version_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
- node_type * pPL = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
- node_type * pLRL = pLRight->m_pLeft.load( memory_model::memory_order_relaxed );
- node_type * pLRR = pLRight->m_pRight.load( memory_model::memory_order_relaxed );
- int hLRR = pLRR ? pLRR->hight( memory_model::memory_order_relaxed ) : 0;
+ node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
+ node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_relaxed );
+ node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_relaxed );
+ int hLRR = pLRR ? pLRR->height( memory_model::memory_order_relaxed ) : 0;
begin_change( pNode, nodeVersion );
begin_change( pLeft, leftVersion );
if ( pPL == pNode )
pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
else {
- assert( Parent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
+ assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
}
pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
end_change( pNode, nodeVersion );
end_change( pLeft, leftVersion );
+ m_stat.onRotateRightOverLeft();
// caller should have performed only a single rotation if pLeft was going
// to end up damaged
assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
- assert( !((hLL == 0 || pLRL == nullptr) && pLeft->value( memory_model::memory_order_relaxed ) == nullptr ));
+ assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_relaxed )));
// We have damaged pParent, pLR (now parent.child), and pNode (now
// parent.child.right). pNode is the deepest. Perform as many fixes as we
}
// pNode might also be damaged by being an unnecessary routing node
- if ( (pLRR == nullptr || hR == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr ) {
+ if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
// repair involves splicing out pNode and maybe more rotations
return pNode;
}
// we've already fixed the height at pLRight, do we need a rotation here?
- final int balanceLR = hLeft - hNode;
+ int balanceLR = hLeft - hNode;
if ( balanceLR < -1 || balanceLR > 1 )
- return pLR;
+ return pLRight;
// try to fix the parent height while we've still got the lock
return fix_height_locked( pParent );
version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
version_type rightVersion = pRight->version( memory_model::memory_order_relaxed );
- node_type * pPL = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
- node_type * pRLL = pRLeft->m_pLeft.load( memory_model::memory_order_relaxed );
- node_type * pRLR = pRLeft->m_pRight.load( memory_model::memory_order_relaxed );
+ node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
+ node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_relaxed );
+ node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_relaxed );
int hRLL = pRLL ? pRLL->height( memory_model::memory_order_relaxed ) : 0;
begin_change( pNode, nodeVersion );
- begin_change( pRight, ghtVersion );
+ begin_change( pRight, rightVersion );
// fix up pNode links, careful about the order!
pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
end_change( pNode, nodeVersion );
end_change( pRight, rightVersion );
+ m_stat.onRotateLeftOverRight();
assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
int nodeBalance = hRLL - hL;
if ( nodeBalance < -1 || nodeBalance > 1 )
return pNode;
- if ( (pRLL == nullptr || hL == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
+ if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
return pNode;
int balRL = hRight - hNode;