#ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
#define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
-#include <memory> // unique_ptr
+#include <type_traits> // is_base_of
#include <cds/container/details/bronson_avltree_base.h>
#include <cds/urcu/details/check_deadlock.h>
+#include <cds/urcu/exempt_ptr.h>
namespace cds { namespace container {
/** @ingroup cds_nonintrusive_map
@ingroup cds_nonintrusive_tree
@headerfile cds/container/bronson_avltree_map_rcu.h
+ @anchor cds_container_BronsonAVLTreeMap_rcu_ptr
This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
/// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
+ /// Group of \p extract_xxx functions does not require external locking
+ static CDS_CONSTEXPR const bool c_bExtractLockExternal = false;
+
+# ifdef CDS_DOXYGEN_INVOKED
/// Returned pointer to \p mapped_type of extracted node
- typedef std::unique_ptr< T, disposer > unique_ptr;
+ typedef cds::urcu::exempt_ptr< gc, T, T, disposer, void > exempt_ptr;
+# else
+ typedef cds::urcu::exempt_ptr< gc,
+ typename std::remove_pointer<mapped_type>::type,
+ typename std::remove_pointer<mapped_type>::type,
+ disposer,
+ void
+ > exempt_ptr;
+# endif
typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
static void free_node( node_type * pNode )
{
// Free node without disposer
+ assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
+ assert( pNode->m_SyncMonitorInjection.check_free());
cxx_allocator().Delete( pNode );
}
return pNode->m_pParent.load( order );
}
- // RCU safe disposer for node
+ // RCU safe disposer
class rcu_disposer
{
- node_type * m_pRetiredList; ///< head of retired list
+ node_type * m_pRetiredList; ///< head of retired node list
+ mapped_type m_pRetiredValue; ///< value retired
public:
rcu_disposer()
: m_pRetiredList( nullptr )
+ , m_pRetiredValue( nullptr )
{}
~rcu_disposer()
pNode->m_pNextRemoved = m_pRetiredList;
m_pRetiredList = pNode;
}
-
+
+ void dispose_value( mapped_type pVal )
+ {
+ assert( m_pRetiredValue == nullptr );
+ m_pRetiredValue = pVal;
+ }
+
private:
struct internal_disposer
{
void clean()
{
assert( !gc::is_locked() );
+
+ // TODO: use RCU::batch_retire
+ // Dispose nodes
for ( node_type * p = m_pRetiredList; p; ) {
node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
// Value already disposed
gc::template retire_ptr<internal_disposer>( p );
p = pNext;
}
+
+ // Dispose value
+ if ( m_pRetiredValue )
+ gc::template retire_ptr<disposer>( m_pRetiredValue );
}
};
[pVal]( node_type * pNode ) -> mapped_type
{
assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
+ CDS_UNUSED( pNode );
return pVal;
},
update_flags::allow_insert
If \p bInsert is \p false, only updating of existing node is possible.
If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
- then the new node created from \p key is inserted into the map; note that in this case the \ref key_type should be
+ then the new node created from \p key will be inserted into the map; note that in this case the \ref key_type should be
constructible from type \p K.
- Otherwise, the value is changed to \p pVal.
+ Otherwise, the value for \p key will be changed to \p pVal.
RCU \p synchronize() method can be called. RCU should not be locked.
Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
\p second is \p true if new node has been added or \p false if the node with \p key
- already is in the tree.
+ already exists.
*/
- template <typename K, typename Func>
+ template <typename K>
std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
{
int result = do_update( key, key_comparator(),
return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
}
+ //@cond
+ template <typename K>
+ std::pair<bool, bool> ensure( K const& key, mapped_type pVal )
+ {
+ return update( key, pVal, true );
+ }
+
+ //@endcond
+
/// Delete \p key from the map
/**
RCU \p synchronize() method can be called. RCU should not be locked.
return do_remove(
key,
key_comparator(),
- []( mapped_type pVal ) -> bool { free_value( pVal ); return true; }
+ []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
);
}
return do_remove(
key,
cds::opt::details::make_comparator_from_less<Less>(),
- []( mapped_type pVal ) -> bool { free_value( pVal ); return true; }
+ []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
);
}
The functor \p Func interface:
\code
struct extractor {
- void operator()(mapped_type& item) { ... }
+ void operator()( key_type const& key, std::remove_pointer<mapped_type>::type& val) { ... }
};
\endcode
return do_remove(
key,
key_comparator(),
- [&f]( mapped_type pVal ) -> bool {
+ [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
assert( pVal );
- f( *pVal );
- free_value(pVal);
+ f( key, *pVal );
+ disp.dispose_value(pVal);
return true;
}
);
return do_remove(
key,
cds::opt::details::make_comparator_from_less<Less>(),
- [&f]( mapped_type pVal ) -> bool {
+ [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
assert( pVal );
- f( *pVal );
- free_value(pVal);
+ f( key, *pVal );
+ disp.dispose_value(pVal);
return true;
}
);
}
- /// Extracts an item with minimal key from the map
+ /// Extracts a value with minimal key from the map
/**
- Returns \p std::unique_ptr to the leftmost item.
- If the set is empty, returns empty \p std::unique_ptr.
+ Returns \p exempt_ptr to the leftmost item.
+ If the tree is empty, returns empty \p exempt_ptr.
+
+ Note that the function returns only the value for minimal key.
+ To retrieve its key use \p extract_min( Func ) member function.
@note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
It means that the function gets leftmost leaf of the tree and tries to unlink it.
RCU \p synchronize method can be called. RCU should NOT be locked.
The function does not free the item.
The deallocator will be implicitly invoked when the returned object is destroyed or when
- its \p reset(nullptr) member function is called.
+ its \p release() member function is called.
*/
- unique_ptr extract_min()
+ exempt_ptr extract_min()
{
- unique_ptr pExtracted;
+ return exempt_ptr(do_extract_min( []( key_type const& ) {}));
+ }
- do_extract_minmax(
- left_child,
- [&pExtracted]( mapped_type pVal ) -> bool { pExtracted.reset( pVal ); return false; }
- );
- return pExtracted;
+ /// Extracts minimal key key and corresponding value
+ /**
+ Returns \p exempt_ptr to the leftmost item.
+ If the tree is empty, returns empty \p exempt_ptr.
+
+ \p Func functor is used to store minimal key.
+ \p Func has the following signature:
+ \code
+ struct functor {
+ void operator()( key_type const& key );
+ };
+ \endcode
+ If the tree is empty, \p f is not called.
+ Otherwise, is it called with minimal key, the pointer to corresponding value is returned
+ as \p exempt_ptr.
+
+ @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
+ It means that the function gets leftmost leaf of the tree and tries to unlink it.
+ During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
+ So, the function returns the item with minimum key at the moment of tree traversing.
+
+ RCU \p synchronize method can be called. RCU should NOT be locked.
+ The function does not free the item.
+ The deallocator will be implicitly invoked when the returned object is destroyed or when
+ its \p release() member function is called.
+ */
+ template <typename Func>
+ exempt_ptr extract_min( Func f )
+ {
+ return exempt_ptr(do_extract_min( [&f]( key_type const& key ) { f(key); }));
+ }
+
+ /// Extracts minimal key key and corresponding value
+ /**
+ This function is a shortcut for the following call:
+ \code
+ key_type key;
+ exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } );
+ \endode
+ \p key_type should be copy-assignable. The copy of minimal key
+ is returned in \p min_key argument.
+ */
+ typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
+ extract_min_key( key_type& min_key )
+ {
+ return exempt_ptr(do_extract_min( [&min_key]( key_type const& key ) { min_key = key; }));
}
- /// Extracts an item with maximal key from the map
+ /// Extracts a value with maximal key from the tree
/**
- Returns \p std::unique_ptr pointer to the rightmost item.
- If the set is empty, returns empty \p std::unique_ptr.
+ Returns \p exempt_ptr pointer to the rightmost item.
+ If the set is empty, returns empty \p exempt_ptr.
+
+ Note that the function returns only the value for maximal key.
+ To retrieve its key use \p extract_max( Func ) member function.
@note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
It means that the function gets rightmost leaf of the tree and tries to unlink it.
RCU \p synchronize method can be called. RCU should NOT be locked.
The function does not free the item.
The deallocator will be implicitly invoked when the returned object is destroyed or when
- its \p reset(nullptr) is called.
- @note Before reusing \p result object you should call its \p release() method.
+ its \p release() is called.
*/
- unique_ptr extract_max()
+ exempt_ptr extract_max()
{
- unique_ptr pExtracted;
+ return exempt_ptr(do_extract_max( []( key_type const& ) {}));
+ }
- do_extract_minmax(
- right_child,
- [&pExtracted]( mapped_type pVal ) -> bool { pExtracted.reset( pVal ); return false; }
- );
- return pExtracted;
+ /// Extracts the maximal key and corresponding value
+ /**
+ Returns \p exempt_ptr pointer to the rightmost item.
+ If the set is empty, returns empty \p exempt_ptr.
+
+ \p Func functor is used to store maximal key.
+ \p Func has the following signature:
+ \code
+ struct functor {
+ void operator()( key_type const& key );
+ };
+ \endcode
+ If the tree is empty, \p f is not called.
+ Otherwise, is it called with maximal key, the pointer to corresponding value is returned
+ as \p exempt_ptr.
+
+ @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
+ It means that the function gets rightmost leaf of the tree and tries to unlink it.
+ During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
+ So, the function returns the item with maximum key at the moment of tree traversing.
+
+ RCU \p synchronize method can be called. RCU should NOT be locked.
+ The function does not free the item.
+ The deallocator will be implicitly invoked when the returned object is destroyed or when
+ its \p release() is called.
+ */
+ template <typename Func>
+ exempt_ptr extract_max( Func f )
+ {
+ return exempt_ptr(do_extract_max( [&f]( key_type const& key ) { f(key); }));
+ }
+
+ /// Extracts the maximal key and corresponding value
+ /**
+ This function is a shortcut for the following call:
+ \code
+ key_type key;
+ exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } );
+ \endode
+ \p key_type should be copy-assignable. The copy of maximal key
+ is returned in \p max_key argument.
+ */
+ typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
+ extract_max_key( key_type& max_key )
+ {
+ return exempt_ptr(do_extract_max( [&max_key]( key_type const& key ) { max_key = key; }));
}
/// Extracts an item from the map
/**
The function searches an item with key equal to \p key in the tree,
- unlinks it, and returns \p std::unique_ptr pointer to a value found.
- If \p key is not found the function returns an empty \p std::unique_ptr.
+ unlinks it, and returns \p exempt_ptr pointer to a value found.
+ If \p key is not found the function returns an empty \p exempt_ptr.
RCU \p synchronize method can be called. RCU should NOT be locked.
The function does not destroy the value found.
The disposer will be implicitly invoked when the returned object is destroyed or when
- its \p reset(nullptr) member function is called.
+ its \p release() member function is called.
*/
template <typename Q>
- unique_ptr extract( Q const& key )
+ exempt_ptr extract( Q const& key )
{
- unique_ptr pExtracted;
-
- do_remove(
- key,
- key_comparator(),
- [&pExtracted]( mapped_type pVal ) -> bool { pExtracted.reset( pVal ); return false; }
- );
- return pExtracted;
+ return exempt_ptr(do_extract( key ));
}
+
/// Extracts an item from the map using \p pred for searching
/**
The function is an analog of \p extract(Q const&)
\p pred must imply the same element order as the comparator used for building the tree.
*/
template <typename Q, typename Less>
- unique_ptr extract_with( Q const& key, Less pred )
+ exempt_ptr extract_with( Q const& key, Less pred )
{
- CDS_UNUSED( pred );
- unique_ptr pExtracted;
-
- do_remove(
- key,
- cds::opt::details::make_comparator_from_less<Less>(),
- [&pExtracted]( mapped_type pVal ) -> bool { pExtracted.reset( pVal ); return false; }
- );
- return pExtracted;
+ return exempt_ptr(do_extract_with( key, pred ));
}
/// Find the key \p key
*/
void unsafe_clear()
{
+ clear(); // temp solution
//TODO
}
return m_stat;
}
+ /// Returns reference to \p sync_monitor object
+ sync_monitor& monitor()
+ {
+ return m_Monitor;
+ }
+ //@cond
+ sync_monitor const& monitor() const
+ {
+ return m_Monitor;
+ }
+ //@endcond
+
/// Checks internal consistency (not atomic, not thread-safe)
/**
The debugging function to check internal consistency of the tree.
*/
bool check_consistency() const
{
- //TODO
+ return check_consistency([]( size_t /*nLevel*/, size_t /*hLeft*/, size_t /*hRight*/ ){} );
+ }
+
+ /// Checks internal consistency (not atomic, not thread-safe)
+ /**
+ The debugging function to check internal consistency of the tree.
+ The functor \p Func is called if a violation of internal tree structure
+ is found:
+ \code
+ struct functor {
+ void operator()( size_t nLevel, size_t hLeft, size_t hRight );
+ };
+ \endcode
+ where
+ - \p nLevel - the level where the violation is found
+ - \p hLeft - the height of left subtree
+ - \p hRight - the height of right subtree
+
+ The functor is called for each violation found.
+ */
+ template <typename Func>
+ bool check_consistency( Func f ) const
+ {
+ node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_relaxed );
+ if ( pChild ) {
+ size_t nErrors = 0;
+ do_check_consistency( pChild, 1, f, nErrors );
+ return nErrors == 0;
+ }
return true;
}
protected:
//@cond
+ template <typename Func>
+ size_t do_check_consistency( node_type * pNode, size_t nLevel, Func f, size_t& nErrors ) const
+ {
+ if ( pNode ) {
+ key_comparator cmp;
+ node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
+ node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
+ if ( pLeft && cmp( pLeft->m_key, pNode->m_key ) > 0 )
+ ++nErrors;
+ if ( pRight && cmp( pNode->m_key, pRight->m_key ) > 0 )
+ ++nErrors;
+
+ size_t hLeft = do_check_consistency( pLeft, nLevel + 1, f, nErrors );
+ size_t hRight = do_check_consistency( pRight, nLevel + 1, f, nErrors );
+
+ if ( hLeft >= hRight ) {
+ if ( hLeft - hRight > 1 ) {
+ f( nLevel, hLeft, hRight );
+ ++nErrors;
+ }
+ return hLeft;
+ }
+ else {
+ if ( hRight - hLeft > 1 ) {
+ f( nLevel, hLeft, hRight );
+ ++nErrors;
+ }
+ return hRight;
+ }
+ }
+ return 0;
+ }
+
template <typename Q, typename Compare, typename Func>
bool do_find( Q& key, Compare cmp, Func f ) const
{
}
}
+ template <typename Func>
+ mapped_type do_extract_min( Func f )
+ {
+ mapped_type pExtracted = nullptr;
+ do_extract_minmax(
+ left_child,
+ [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
+ );
+ return pExtracted;
+ }
+
+ template <typename Func>
+ mapped_type do_extract_max( Func f )
+ {
+ mapped_type pExtracted = nullptr;
+ do_extract_minmax(
+ right_child,
+ [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
+ );
+ return pExtracted;
+ }
+
template <typename Func>
void do_extract_minmax( int nDir, Func func )
{
{
rcu_lock l;
- int result;
+ int result = update_flags::failed;
do {
// get right child of root
node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
}
}
+ template <typename Q>
+ mapped_type do_extract( Q const& key )
+ {
+ mapped_type pExtracted = nullptr;
+ do_remove(
+ key,
+ key_comparator(),
+ [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
+ );
+ return pExtracted;
+ }
+
+ template <typename Q, typename Less>
+ mapped_type do_extract_with( Q const& key, Less pred )
+ {
+ CDS_UNUSED( pred );
+ mapped_type pExtracted = nullptr;
+ do_remove(
+ key,
+ cds::opt::details::make_comparator_from_less<Less>(),
+ [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
+ );
+ return pExtracted;
+ }
+
//@endcond
private:
}
}
else if ( nChildVersion != node_type::unlinked ) {
-
if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
m_stat.onFindRetry();
return find_result::retry;
if ( found != find_result::retry )
return found;
}
+
+ if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
+ m_stat.onFindRetry();
+ return find_result::retry;
+ }
}
}
else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
}
+ else
+ result = update_flags::retry;
}
else {
// the tree is empty
{
node_scoped_lock l( m_Monitor, *m_pRoot );
if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
- result = result == update_flags::retry;
+ result = update_flags::retry;
continue;
}
else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
}
+ else
+ result = update_flags::retry;
}
else
return false;
int nCmp = cmp( key, pNode->m_key );
if ( nCmp == 0 ) {
if ( nFlags & update_flags::allow_update ) {
- return try_update_node( funcUpdate, pNode );
+ return try_update_node( funcUpdate, pNode, disp );
}
return update_flags::failed;
}
result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion, disp );
}
}
+
+ if ( result == update_flags::retry && pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
+ m_stat.onUpdateRetry();
+ return update_flags::retry;
+ }
} while ( result == update_flags::retry );
return result;
}
result = try_remove( key, cmp, func, pNode, pChild, nChildVersion, disp );
}
}
+
+ if ( result == update_flags::retry && pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
+ m_stat.onRemoveRetry();
+ return update_flags::retry;
+ }
} while ( result == update_flags::retry );
return result;
}
result = try_extract_minmax( nDir, func, pNode, pChild, nChildVersion, disp );
}
}
+
+ if ( result == update_flags::retry && pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
+ m_stat.onRemoveRetry();
+ return update_flags::retry;
+ }
} while ( result == update_flags::retry );
return result;
}
|| child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
{
if ( c_bRelaxedInsert ) {
+ mapped_type pVal = pNew->m_pValue.load( memory_model::memory_order_relaxed );
+ pNew->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
+ free_value( pVal );
free_node( pNew );
m_stat.onRelaxedInsertFailed();
}
++m_ItemCounter;
m_stat.onInsertSuccess();
- fix_height_and_rebalance( pDamaged, disp );
+ if ( pDamaged ) {
+ fix_height_and_rebalance( pDamaged, disp );
+ m_stat.onInsertRebalanceRequired();
+ }
return update_flags::result_inserted;
}
template <typename Func>
- int try_update_node( Func funcUpdate, node_type * pNode )
+ int try_update_node( Func funcUpdate, node_type * pNode, rcu_disposer& disp )
{
mapped_type pOld;
assert( pNode != nullptr );
}
if ( pOld ) {
- free_value(pOld);
+ disp.dispose_value(pOld);
m_stat.onDisposeValue();
}
}
--m_ItemCounter;
- if ( func( pOld ) ) // calls pOld disposer inside
+ if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
m_stat.onDisposeValue();
else
m_stat.onExtractValue();
- fix_height_and_rebalance( pDamaged, disp );
+ if ( pDamaged ) {
+ fix_height_and_rebalance( pDamaged, disp );
+ m_stat.onRemoveRebalanceRequired();
+ }
return update_flags::result_removed;
}
else {
if ( result == update_flags::result_removed ) {
--m_ItemCounter;
- if ( func( pOld )) // calls pOld disposer inside
+ if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
m_stat.onDisposeValue();
else
m_stat.onExtractValue();
// pParent and pNode should be locked.
// Returns a damaged node, or nullptr if no more rebalancing is necessary
assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
- assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
- || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
}
}
+ assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
+ || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
+
int h = pNode->height( memory_model::memory_order_relaxed );
int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
{
assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
- assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
+ assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
|| child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
// pParent and pNode is locked yet
node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
{
assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
- assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
+ assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
|| child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
// pParent and pNode is locked yet