/** @ingroup cds_nonintrusive_map
@ingroup cds_nonintrusive_tree
@headerfile cds/container/bronson_avltree_map_rcu.h
+ @anchor cds_container_BronsonAVLTreeMap_rcu_ptr
This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
static void free_node( node_type * pNode )
{
// Free node without disposer
+ assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
+ assert( pNode->m_SyncMonitorInjection.check_free());
cxx_allocator().Delete( pNode );
}
[pVal]( node_type * pNode ) -> mapped_type
{
assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
+ CDS_UNUSED( pNode );
return pVal;
},
update_flags::allow_insert
The functor \p Func interface:
\code
struct extractor {
- void operator()(mapped_type& item) { ... }
+ void operator()( key_type const& key, std::remove_pointer<mapped_type>::type& val) { ... }
};
\endcode
return do_remove(
key,
key_comparator(),
- [&f]( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool {
+ [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
assert( pVal );
- f( *pVal );
+ f( key, *pVal );
disp.dispose_value(pVal);
return true;
}
return do_remove(
key,
cds::opt::details::make_comparator_from_less<Less>(),
- [&f]( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool {
+ [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
assert( pVal );
- f( *pVal );
+ f( key, *pVal );
disp.dispose_value(pVal);
return true;
}
return m_stat;
}
+ /// Returns reference to \p sync_monitor object
+ sync_monitor& monitor()
+ {
+ return m_Monitor;
+ }
+ //@cond
+ sync_monitor const& monitor() const
+ {
+ return m_Monitor;
+ }
+ //@endcond
+
/// Checks internal consistency (not atomic, not thread-safe)
/**
The debugging function to check internal consistency of the tree.
size_t do_check_consistency( node_type * pNode, size_t nLevel, Func f, size_t& nErrors ) const
{
if ( pNode ) {
- size_t hLeft = do_check_consistency( child( pNode, left_child, memory_model::memory_order_relaxed ), nLevel + 1, f, nErrors );
- size_t hRight = do_check_consistency( child( pNode, right_child, memory_model::memory_order_relaxed ), nLevel + 1, f, nErrors );
+ key_comparator cmp;
+ node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
+ node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
+ if ( pLeft && cmp( pLeft->m_key, pNode->m_key ) > 0 )
+ ++nErrors;
+ if ( pRight && cmp( pNode->m_key, pRight->m_key ) > 0 )
+ ++nErrors;
+
+ size_t hLeft = do_check_consistency( pLeft, nLevel + 1, f, nErrors );
+ size_t hRight = do_check_consistency( pRight, nLevel + 1, f, nErrors );
if ( hLeft >= hRight ) {
if ( hLeft - hRight > 1 ) {
}
}
else if ( nChildVersion != node_type::unlinked ) {
-
if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
m_stat.onFindRetry();
return find_result::retry;
if ( found != find_result::retry )
return found;
}
+
+ if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
+ m_stat.onFindRetry();
+ return find_result::retry;
+ }
}
}
else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
}
+ else
+ result = update_flags::retry;
}
else {
// the tree is empty
{
node_scoped_lock l( m_Monitor, *m_pRoot );
if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
- result = result == update_flags::retry;
+ result = update_flags::retry;
continue;
}
else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
}
+ else
+ result = update_flags::retry;
}
else
return false;
result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion, disp );
}
}
+
+ if ( result == update_flags::retry && pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
+ m_stat.onUpdateRetry();
+ return update_flags::retry;
+ }
} while ( result == update_flags::retry );
return result;
}
result = try_remove( key, cmp, func, pNode, pChild, nChildVersion, disp );
}
}
+
+ if ( result == update_flags::retry && pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
+ m_stat.onRemoveRetry();
+ return update_flags::retry;
+ }
} while ( result == update_flags::retry );
return result;
}
result = try_extract_minmax( nDir, func, pNode, pChild, nChildVersion, disp );
}
}
+
+ if ( result == update_flags::retry && pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
+ m_stat.onRemoveRetry();
+ return update_flags::retry;
+ }
} while ( result == update_flags::retry );
return result;
}
|| child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
{
if ( c_bRelaxedInsert ) {
+ mapped_type pVal = pNew->m_pValue.load( memory_model::memory_order_relaxed );
+ pNew->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
+ free_value( pVal );
free_node( pNew );
m_stat.onRelaxedInsertFailed();
}
++m_ItemCounter;
m_stat.onInsertSuccess();
- fix_height_and_rebalance( pDamaged, disp );
+ if ( pDamaged ) {
+ fix_height_and_rebalance( pDamaged, disp );
+ m_stat.onInsertRebalanceRequired();
+ }
return update_flags::result_inserted;
}
else
m_stat.onExtractValue();
- fix_height_and_rebalance( pDamaged, disp );
+ if ( pDamaged ) {
+ fix_height_and_rebalance( pDamaged, disp );
+ m_stat.onRemoveRebalanceRequired();
+ }
return update_flags::result_removed;
}
else {
// pParent and pNode should be locked.
// Returns a damaged node, or nullptr if no more rebalancing is necessary
assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
- assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
- || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
}
}
+ assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
+ || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
+
int h = pNode->height( memory_model::memory_order_relaxed );
int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
{
assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
- assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
+ assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
|| child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
// pParent and pNode is locked yet
node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
{
assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
- assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
+ assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
|| child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
// pParent and pNode is locked yet