3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <memory> // unique_ptr
7 #include <cds/container/details/bronson_avltree_base.h>
8 #include <cds/urcu/details/check_deadlock.h>
9 #include <cds/details/binary_functor_wrapper.h>
12 namespace cds { namespace container {
14 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
15 /** @ingroup cds_nonintrusive_map
16 @ingroup cds_nonintrusive_tree
17 @headerfile cds/container/bronson_avltree_map_rcu.h
19 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
20 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
21 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
22 the disposer functor provided by \p Traits template parameter.
24 <b>Template arguments</b>:
25 - \p RCU - one of \ref cds_urcu_gc "RCU type"
27 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
29 - \p Traits - tree traits, default is \p bronson_avltree::traits
30 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
31 instead of \p Traits template argument.
33 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
34 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
40 # ifdef CDS_DOXYGEN_INVOKED
41 typename Traits = bronson_avltree::traits
46 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
49 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
50 typedef Key key_type; ///< type of a key stored in the map
51 typedef T * mapped_type; ///< type of value stored in the map
52 typedef Traits traits; ///< Traits template parameter
54 # ifdef CDS_DOXYGEN_INVOKED
55 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
57 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
59 typedef typename traits::item_counter item_counter; ///< Item counting policy
60 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
61 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
62 typedef typename traits::stat stat; ///< internal statistics
63 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
64 typedef typename traits::back_off back_off; ///< Back-off strategy
65 typedef typename traits::disposer disposer; ///< Value disposer
66 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
68 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
69 static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
71 /// Returned pointer to \p mapped_type of extracted node
72 typedef std::unique_ptr< T, disposer > unique_ptr;
74 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
78 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
79 typedef typename node_type::version_type version_type;
81 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
82 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
84 enum class find_result
101 result_inserted = allow_insert,
102 result_updated = allow_update,
109 nothing_required = -3,
110 rebalance_required = -2,
114 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
119 template <typename K>
120 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
122 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
125 static void free_node( node_type * pNode )
127 // Free node without disposer
128 cxx_allocator().Delete( pNode );
131 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
133 return pNode->child( nDir ).load( order );
136 static node_type * parent( node_type * pNode, atomics::memory_order order )
138 return pNode->m_pParent.load( order );
144 node_type * m_pRetiredList; ///< head of retired list
148 : m_pRetiredList( nullptr )
156 void dispose( node_type * pNode )
158 mapped_type pVal = pNode->value( memory_model::memory_order_relaxed );
160 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
164 pNode->m_pNextRemoved = m_pRetiredList;
165 m_pRetiredList = pNode;
169 struct internal_disposer
171 void operator()( node_type * p ) const
179 assert( !gc::is_locked() );
181 for ( node_type * p = m_pRetiredList; p; ) {
182 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
183 // Value already disposed
184 gc::template retire_ptr<internal_disposer>( p );
194 typename node_type::base_class m_Root;
196 item_counter m_ItemCounter;
197 mutable sync_monitor m_Monitor;
202 /// Creates empty map
204 : m_pRoot( static_cast<node_type *>( &m_Root ))
215 The \p key_type should be constructible from a value of type \p K.
217 RCU \p synchronize() can be called. RCU should not be locked.
219 Returns \p true if inserting successful, \p false otherwise.
221 template <typename K>
222 bool insert( K const& key, mapped_type pVal )
224 return do_update(key, key_comparator(),
225 [pVal]( node_type * pNode ) -> mapped_type
227 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
230 update_flags::allow_insert
231 ) == update_flags::result_inserted;
234 /// Updates the value for \p key
236 The operation performs inserting or updating the value for \p key with lock-free manner.
237 If \p bInsert is \p false, only updating of existing node is possible.
239 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
240 then the new node created from \p key is inserted into the map; note that in this case the \ref key_type should be
241 constructible from type \p K.
242 Otherwise, the value is changed to \p pVal.
244 RCU \p synchronize() method can be called. RCU should not be locked.
246 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
247 \p second is \p true if new node has been added or \p false if the node with \p key
248 already is in the tree.
250 template <typename K, typename Func>
251 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
253 int result = do_update( key, key_comparator(),
254 [pVal]( node_type * ) -> mapped_type
258 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
260 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
263 /// Delete \p key from the map
265 RCU \p synchronize() method can be called. RCU should not be locked.
267 Return \p true if \p key is found and deleted, \p false otherwise
269 template <typename K>
270 bool erase( K const& key )
275 []( mapped_type pVal ) { disposer()(pVal); }
279 /// Deletes the item from the map using \p pred predicate for searching
281 The function is an analog of \p erase(K const&)
282 but \p pred is used for key comparing.
283 \p Less functor has the interface like \p std::less.
284 \p Less must imply the same element order as the comparator used for building the map.
286 template <typename K, typename Less>
287 bool erase_with( K const& key, Less pred )
292 cds::details::predicate_wrapper<key_type, Less, cds::details::trivial_accessor >(),
293 []( mapped_type pVal ) { disposer()(pVal); }
297 /// Delete \p key from the map
299 The function searches an item with key \p key, calls \p f functor
300 and deletes the item. If \p key is not found, the functor is not called.
302 The functor \p Func interface:
305 void operator()(mapped_type& item) { ... }
309 RCU \p synchronize method can be called. RCU should not be locked.
311 Return \p true if key is found and deleted, \p false otherwise
313 template <typename K, typename Func>
314 bool erase( K const& key, Func f )
319 [&f]( mapped_type pVal ) {
327 /// Deletes the item from the map using \p pred predicate for searching
329 The function is an analog of \p erase(K const&, Func)
330 but \p pred is used for key comparing.
331 \p Less functor has the interface like \p std::less.
332 \p Less must imply the same element order as the comparator used for building the map.
334 template <typename K, typename Less, typename Func>
335 bool erase_with( K const& key, Less pred, Func f )
340 cds::details::predicate_wrapper<key_type, Less, cds::details::trivial_accessor >(),
341 [&f]( mapped_type pVal ) {
349 /// Extracts an item with minimal key from the map
351 Returns \p std::unique_ptr to the leftmost item.
352 If the set is empty, returns empty \p std::unique_ptr.
354 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
355 It means that the function gets leftmost leaf of the tree and tries to unlink it.
356 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
357 So, the function returns the item with minimum key at the moment of tree traversing.
359 RCU \p synchronize method can be called. RCU should NOT be locked.
360 The function does not free the item.
361 The deallocator will be implicitly invoked when the returned object is destroyed or when
362 its \p reset(nullptr) member function is called.
364 unique_ptr extract_min()
370 /// Extracts an item with maximal key from the map
372 Returns \p std::unique_ptr pointer to the rightmost item.
373 If the set is empty, returns empty \p std::unique_ptr.
375 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
376 It means that the function gets rightmost leaf of the tree and tries to unlink it.
377 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
378 So, the function returns the item with maximum key at the moment of tree traversing.
380 RCU \p synchronize method can be called. RCU should NOT be locked.
381 The function does not free the item.
382 The deallocator will be implicitly invoked when the returned object is destroyed or when
383 its \p reset(nullptr) is called.
384 @note Before reusing \p result object you should call its \p release() method.
386 unique_ptr extract_max()
392 /// Extracts an item from the map
394 The function searches an item with key equal to \p key in the tree,
395 unlinks it, and returns \p std::unique_ptr pointer to a value found.
396 If \p key is not found the function returns an empty \p std::unique_ptr.
398 RCU \p synchronize method can be called. RCU should NOT be locked.
399 The function does not destroy the value found.
400 The disposer will be implicitly invoked when the returned object is destroyed or when
401 its \p reset(nullptr) member function is called.
403 template <typename Q>
404 unique_ptr extract( Q const& key )
406 unique_ptr pExtracted;
411 [&pExtracted]( mapped_type pVal ) { pExtracted.reset( pVal ); }
416 /// Extracts an item from the map using \p pred for searching
418 The function is an analog of \p extract(Q const&)
419 but \p pred is used for key compare.
420 \p Less has the interface like \p std::less.
421 \p pred must imply the same element order as the comparator used for building the tree.
423 template <typename Q, typename Less>
424 unique_ptr extract_with( Q const& key, Less pred )
427 unique_ptr pExtracted;
431 cds::details::predicate_wrapper<key_type, Less, cds::details::trivial_accessor >(),
432 [&pExtracted]( mapped_type pVal ) { pExtracted.reset( pVal ); }
437 /// Find the key \p key
439 The function searches the item with key equal to \p key and calls the functor \p f for item found.
440 The interface of \p Func functor is:
443 void operator()( key_type const& key, mapped_type& item );
446 where \p item is the item found.
447 The functor is called under node-level lock.
449 The function applies RCU lock internally.
451 The function returns \p true if \p key is found, \p false otherwise.
453 template <typename K, typename Func>
454 bool find( K const& key, Func f )
456 return do_find( key, key_comparator(),
457 [&f]( sync_monitor& monitor, node_type * pNode ) -> bool {
458 assert( pNode != nullptr );
459 node_scoped_lock l( monitor, *pNode );
460 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
462 f( pNode->m_key, *pVal );
470 /// Finds the key \p val using \p pred predicate for searching
472 The function is an analog of \p find(K const&, Func)
473 but \p pred is used for key comparing.
474 \p Less functor has the interface like \p std::less.
475 \p Less must imply the same element order as the comparator used for building the map.
477 template <typename K, typename Less, typename Func>
478 bool find_with( K const& key, Less pred, Func f )
481 return do_find( key, opt::details::make_comparator_from_less<Less>(),
482 [&f]( sync_monitor& monitor, node_type * pNode ) -> bool {
483 assert( pNode != nullptr );
484 node_scoped_lock l( monitor, *pNode );
485 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
487 f( pNode->m_key, *pVal );
495 /// Find the key \p key
497 The function searches the item with key equal to \p key
498 and returns \p true if it is found, and \p false otherwise.
500 The function applies RCU lock internally.
502 template <typename K>
503 bool find( K const& key )
505 return do_find( key, key_comparator(), []( sync_monitor&, node_type * ) -> bool { return true; });
508 /// Finds the key \p val using \p pred predicate for searching
510 The function is an analog of \p find(K const&)
511 but \p pred is used for key comparing.
512 \p Less functor has the interface like \p std::less.
513 \p Less must imply the same element order as the comparator used for building the map.
515 template <typename K, typename Less>
516 bool find_with( K const& key, Less pred )
519 return do_find( key, opt::details::make_comparator_from_less<Less>(), []( sync_monitor&, node_type * ) -> bool { return true; } );
522 /// Clears the tree (thread safe, not atomic)
524 The function unlink all items from the tree.
525 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
529 assert( set.empty() );
531 the assertion could be raised.
533 For each node the \ref disposer will be called after unlinking.
535 RCU \p synchronize method can be called. RCU should not be locked.
539 while ( extract_min() );
542 /// Clears the tree (not thread safe)
544 This function is not thread safe and may be called only when no other thread deals with the tree.
545 The function is used in the tree destructor.
552 /// Checks if the map is empty
555 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
558 /// Returns item count in the map
560 Only leaf nodes containing user data are counted.
562 The value returned depends on item counter type provided by \p Traits template parameter.
563 If it is \p atomicity::empty_item_counter this function always returns 0.
565 The function is not suitable for checking the tree emptiness, use \p empty()
566 member function for this purpose.
570 return m_ItemCounter;
573 /// Returns const reference to internal statistics
574 stat const& statistics() const
579 /// Checks internal consistency (not atomic, not thread-safe)
581 The debugging function to check internal consistency of the tree.
583 bool check_consistency() const
591 template <typename Q, typename Compare, typename Func>
592 bool do_find( Q& key, Compare cmp, Func f ) const
597 result = try_find( key, cmp, f, m_pRoot, 1, 0 );
599 assert( result != find_result::retry );
600 return result == find_result::found;
603 template <typename K, typename Compare, typename Func>
604 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
606 check_deadlock_policy::check();
608 rcu_disposer removed_list;
611 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
615 template <typename K, typename Compare, typename Func>
616 bool do_remove( K const& key, Compare cmp, Func func )
618 check_deadlock_policy::check();
620 rcu_disposer removed_list;
623 return try_remove_root( key, cmp, func, removed_list );
631 template <typename Q, typename Compare, typename Func>
632 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
634 assert( gc::is_locked() );
638 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
640 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
641 m_stat.onFindRetry();
642 return find_result::retry;
645 m_stat.onFindFailed();
646 return find_result::not_found;
649 int nCmp = cmp( key, pChild->m_key );
651 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
653 node_scoped_lock l( m_Monitor, *pChild );
654 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
655 if ( f( m_Monitor, pChild ) ) {
656 m_stat.onFindSuccess();
657 return find_result::found;
662 m_stat.onFindFailed();
663 return find_result::not_found;
666 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
667 if ( nChildVersion & node_type::shrinking ) {
668 m_stat.onFindWaitShrinking();
669 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
671 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
672 m_stat.onFindRetry();
673 return find_result::retry;
676 else if ( nChildVersion != node_type::unlinked ) {
678 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
679 m_stat.onFindRetry();
680 return find_result::retry;
683 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
684 if ( found != find_result::retry )
690 template <typename K, typename Compare, typename Func>
691 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
693 assert( gc::is_locked() );
697 // get right child of root
698 node_type * pChild = child( m_pRoot, 1, memory_model::memory_order_acquire );
700 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
701 if ( nChildVersion & node_type::shrinking ) {
702 m_stat.onUpdateRootWaitShrinking();
703 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
704 result = update_flags::retry;
706 else if ( pChild == child( m_pRoot, 1, memory_model::memory_order_acquire )) {
707 result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
712 if ( nFlags & update_flags::allow_insert ) {
713 // insert into tree as right child of the root
715 node_scoped_lock l( m_Monitor, *m_pRoot );
717 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
718 mapped_type pVal = funcUpdate( pNew );
719 assert( pVal != nullptr );
720 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
722 m_pRoot->child( pNew, 1, memory_model::memory_order_relaxed );
723 m_pRoot->height( 2, memory_model::memory_order_relaxed );
727 m_stat.onInsertSuccess();
728 return update_flags::result_inserted;
731 return update_flags::failed;
733 } while ( result == update_flags::retry );
737 template <typename K, typename Compare, typename Func>
738 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
740 assert( gc::is_locked() );
744 // get right child of root
745 node_type * pChild = child( m_pRoot, 1, memory_model::memory_order_acquire );
747 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
748 if ( nChildVersion & node_type::shrinking ) {
749 m_stat.onUpdateRootWaitShrinking();
750 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
751 result = update_flags::retry;
753 else if ( pChild == child( m_pRoot, 1, memory_model::memory_order_acquire )) {
754 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
759 } while ( result == update_flags::retry );
761 return result == update_flags::result_removed;
764 template <typename K, typename Compare, typename Func>
765 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
767 assert( gc::is_locked() );
768 assert( nVersion != node_type::unlinked );
769 CDS_UNUSED( pParent );
771 int nCmp = cmp( key, pNode->m_key );
773 if ( nFlags & update_flags::allow_update ) {
774 return try_update_node( funcUpdate, pNode );
776 return update_flags::failed;
781 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
782 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
783 return update_flags::retry;
785 if ( pChild == nullptr ) {
787 if ( nFlags & update_flags::allow_insert )
788 result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
790 result = update_flags::failed;
794 result = update_flags::retry;
795 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
796 if ( nChildVersion & node_type::shrinking ) {
797 m_stat.onUpdateWaitShrinking();
798 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
801 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
802 // this second read is important, because it is protected by nChildVersion
804 // validate the read that our caller took to get to node
805 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
806 m_stat.onUpdateRetry();
807 return update_flags::retry;
810 // At this point we know that the traversal our parent took to get to node is still valid.
811 // The recursive implementation will validate the traversal from node to
812 // child, so just prior to the node nVersion validation both traversals were definitely okay.
813 // This means that we are no longer vulnerable to node shrinks, and we don't need
814 // to validate node version any more.
815 result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion, disp );
818 } while ( result == update_flags::retry );
822 template <typename K, typename Compare, typename Func>
823 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
825 assert( gc::is_locked() );
826 assert( nVersion != node_type::unlinked );
828 int nCmp = cmp( key, pNode->m_key );
830 return try_remove_node( pParent, pNode, nVersion, func, disp );
834 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
835 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
836 return update_flags::retry;
838 if ( pChild == nullptr ) {
839 return update_flags::failed;
843 result = update_flags::retry;
844 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
845 if ( nChildVersion & node_type::shrinking ) {
846 m_stat.onUpdateWaitShrinking();
847 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
850 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
851 // this second read is important, because it is protected by nChildVersion
853 // validate the read that our caller took to get to node
854 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
855 m_stat.onUpdateRetry();
856 return update_flags::retry;
859 // At this point we know that the traversal our parent took to get to node is still valid.
860 // The recursive implementation will validate the traversal from node to
861 // child, so just prior to the node nVersion validation both traversals were definitely okay.
862 // This means that we are no longer vulnerable to node shrinks, and we don't need
863 // to validate node version any more.
864 result = try_remove( key, cmp, func, pNode, pChild, nChildVersion, disp );
867 } while ( result == update_flags::retry );
871 template <typename K, typename Func>
872 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
876 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
877 mapped_type pVal = funcUpdate( pNew );
878 assert( pVal != nullptr );
879 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
882 if ( c_bRelaxedInsert ) {
883 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
884 || pNode->child( nDir ).load( memory_model::memory_order_relaxed ) != nullptr )
886 m_stat.onInsertRetry();
887 return update_flags::retry;
890 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
893 node_type * pDamaged;
895 assert( pNode != nullptr );
896 node_scoped_lock l( m_Monitor, *pNode );
898 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion
899 || pNode->child( nDir ).load( memory_model::memory_order_relaxed ) != nullptr )
901 if ( c_bRelaxedInsert ) {
903 m_stat.onRelaxedInsertFailed();
906 m_stat.onInsertRetry();
907 return update_flags::retry;
910 if ( !c_bRelaxedInsert )
911 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
913 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
914 pDamaged = fix_height_locked( pNode );
916 m_stat.onInsertSuccess();
918 fix_height_and_rebalance( pDamaged, disp );
920 return update_flags::result_inserted;
923 template <typename Func>
924 int try_update_node( Func funcUpdate, node_type * pNode )
928 assert( pNode != nullptr );
929 node_scoped_lock l( m_Monitor, *pNode );
931 if ( pNode->is_unlinked( memory_model::memory_order_relaxed )) {
932 m_stat.onUpdateUnlinked();
933 return update_flags::retry;
936 pOld = pNode->value( memory_model::memory_order_relaxed );
937 mapped_type pVal = funcUpdate( pNode );
938 assert( pVal != nullptr );
939 pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed );
944 m_stat.onDisposeNode();
947 m_stat.onUpdateSuccess();
948 return update_flags::result_updated;
951 template <typename Func>
952 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
954 assert( pParent != nullptr );
955 assert( pNode != nullptr );
957 if ( !pNode->is_valued( atomics::memory_order_relaxed ) )
958 return update_flags::failed;
960 if ( pNode->child( -1 ).load( atomics::memory_order_relaxed ) == nullptr
961 || pNode->child( 1 ).load( atomics::memory_order_relaxed ) == nullptr )
963 node_type * pDamaged;
966 node_scoped_lock lp( m_Monitor, *pParent );
967 if ( pParent->is_unlinked( atomics::memory_order_relaxed ) || parent( pNode, memory_model::memory_order_relaxed ) != pParent )
968 return update_flags::retry;
971 node_scoped_lock ln( m_Monitor, *pNode );
972 pOld = pNode->value( memory_model::memory_order_relaxed );
973 if ( pNode->version( memory_model::memory_order_relaxed ) == nVersion
975 && try_unlink_locked( pParent, pNode, disp ) )
977 pNode->m_pValue.store( nullptr, atomics::memory_order_relaxed );
980 return update_flags::retry;
982 pDamaged = fix_height_locked( pParent );
985 func( pOld ); // calls pOld disposer inside
986 m_stat.onDisposeNode();
988 fix_height_and_rebalance( pDamaged, disp );
989 return update_flags::result_removed;
992 int result = update_flags::retry;
995 node_scoped_lock ln( m_Monitor, *pNode );
996 pOld = pNode->value( atomics::memory_order_relaxed );
997 if ( pNode->version( atomics::memory_order_relaxed ) == nVersion && pOld ) {
998 pNode->m_pValue.store( nullptr, atomics::memory_order_relaxed );
999 result = update_flags::result_removed;
1003 if ( result == update_flags::result_removed ) {
1004 func( pOld ); // calls pOld disposer inside
1005 m_stat.onDisposeNode();
1012 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1014 // pParent and pNode must be locked
1015 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
1017 node_type * pParentLeft = child( pParent, -1, memory_model::memory_order_relaxed );
1018 node_type * pParentRight = child( pParent, 1, memory_model::memory_order_relaxed );
1019 if ( pNode != pParentLeft && pNode != pParentRight ) {
1020 // node is no longer a child of parent
1024 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ) );
1025 assert( pParent == parent( pNode, memory_model::memory_order_relaxed));
1027 node_type * pLeft = child( pNode, -1, memory_model::memory_order_relaxed );
1028 node_type * pRight = child( pNode, 1, memory_model::memory_order_relaxed );
1029 if ( pLeft != nullptr && pRight != nullptr ) {
1030 // splicing is no longer possible
1033 node_type * pSplice = pLeft ? pLeft : pRight;
1035 if ( pParentLeft == pNode )
1036 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
1038 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
1041 pSplice->m_pParent.store( pParent, memory_model::memory_order_release );
1043 // Mark the node as unlinked
1044 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1045 disp.dispose( pNode );
1052 private: // rotations
1054 int estimate_node_condition( node_type * pNode )
1056 node_type * pLeft = child( pNode, -1, memory_model::memory_order_relaxed );
1057 node_type * pRight = child( pNode, 1, memory_model::memory_order_relaxed );
1059 if ( (pLeft == nullptr || pRight == nullptr) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
1060 return unlink_required;
1062 int h = pNode->height( memory_model::memory_order_relaxed );
1063 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1064 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1066 int hNew = 1 + std::max( hL, hR );
1067 int nBalance = hL - hR;
1069 if ( nBalance < -1 || nBalance > 1 )
1070 return rebalance_required;
1072 return h != hNew ? hNew : nothing_required;
1075 node_type * fix_height( node_type * pNode )
1077 assert( pNode != nullptr );
1078 node_scoped_lock l( m_Monitor, *pNode );
1079 return fix_height_locked( pNode );
1082 node_type * fix_height_locked( node_type * pNode )
1084 // pNode must be locked!!!
1085 int h = estimate_node_condition( pNode );
1087 case rebalance_required:
1088 case unlink_required:
1090 case nothing_required:
1093 pNode->height( h, memory_model::memory_order_relaxed );
1094 return parent( pNode, memory_model::memory_order_relaxed );
1098 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1100 while ( pNode && pNode->m_pParent.load( memory_model::memory_order_relaxed )) {
1101 int nCond = estimate_node_condition( pNode );
1102 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
1105 if ( nCond != unlink_required && nCond != rebalance_required )
1106 pNode = fix_height( pNode );
1108 node_type * pParent = parent( pNode, memory_model::memory_order_relaxed );
1109 assert( pParent != nullptr );
1111 node_scoped_lock lp( m_Monitor, *pParent );
1112 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed )
1113 && parent( pNode, memory_model::memory_order_relaxed ) == pParent )
1115 node_scoped_lock ln( m_Monitor, *pNode );
1116 pNode = rebalance_locked( pParent, pNode, disp );
1123 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1125 // pParent and pNode should be locked.
1126 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1127 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1128 assert( child( pParent, -1, memory_model::memory_order_relaxed ) == pNode
1129 || child( pParent, 1, memory_model::memory_order_relaxed ) == pNode );
1131 node_type * pLeft = child( pNode, -1, memory_model::memory_order_relaxed );
1132 node_type * pRight = child( pNode, 1, memory_model::memory_order_relaxed );
1134 if ( (pLeft == nullptr || pRight == nullptr) && pNode->value( memory_model::memory_order_relaxed ) == nullptr ) {
1135 if ( try_unlink_locked( pParent, pNode, disp ))
1136 return fix_height_locked( pParent );
1138 // retry needed for pNode
1143 int h = pNode->height( memory_model::memory_order_relaxed );
1144 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1145 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1146 int hNew = 1 + std::max( hL, hR );
1147 int balance = hL - hR;
1150 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1151 else if ( balance < -1 )
1152 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1153 else if ( hNew != h ) {
1154 pNode->height( hNew, memory_model::memory_order_relaxed );
1156 // pParent is already locked
1157 return fix_height_locked( pParent );
1163 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1165 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1166 assert( child( pParent, -1, memory_model::memory_order_relaxed ) == pNode
1167 || child( pParent, 1, memory_model::memory_order_relaxed ) == pNode );
1169 // pParent and pNode is locked yet
1170 // pNode->pLeft is too large, we will rotate-right.
1171 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1174 assert( pLeft != nullptr );
1175 node_scoped_lock l( m_Monitor, *pLeft );
1176 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1177 return pNode; // retry for pNode
1179 int hL = pLeft->height( memory_model::memory_order_relaxed );
1181 return pNode; // retry
1183 node_type * pLRight = child( pLeft, 1, memory_model::memory_order_relaxed );
1184 int hLR = pLRight ? pLRight->height( memory_model::memory_order_relaxed ) : 0;
1185 node_type * pLLeft = child( pLeft, -1, memory_model::memory_order_relaxed );
1186 int hLL = pLLeft ? pLLeft->height( memory_model::memory_order_relaxed ) : 0;
1190 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1193 assert( pLRight != nullptr );
1195 node_scoped_lock lr( m_Monitor, *pLRight );
1196 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
1197 return pNode; // retry
1199 hLR = pLRight->height( memory_model::memory_order_relaxed );
1201 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1203 node_type * pLRLeft = child( pLRight, -1, memory_model::memory_order_relaxed );
1204 int hLRL = pLRLeft ? pLRLeft->height( memory_model::memory_order_relaxed ) : 0;
1205 int balance = hLL - hLRL;
1206 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && pLeft->value(memory_model::memory_order_relaxed) == nullptr) ) {
1207 // nParent.child.left won't be damaged after a double rotation
1208 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1212 // focus on pLeft, if necessary pNode will be balanced later
1213 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1218 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1220 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1221 assert( child( pParent, -1, memory_model::memory_order_relaxed ) == pNode
1222 || child( pParent, 1, memory_model::memory_order_relaxed ) == pNode );
1224 // pParent and pNode is locked yet
1226 assert( pRight != nullptr );
1227 node_scoped_lock l( m_Monitor, *pRight );
1228 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1229 return pNode; // retry for pNode
1231 int hR = pRight->height( memory_model::memory_order_relaxed );
1232 if ( hL - hR >= -1 )
1233 return pNode; // retry
1235 node_type * pRLeft = child( pRight, -1, memory_model::memory_order_relaxed );
1236 int hRL = pRLeft ? pRLeft->height( memory_model::memory_order_relaxed ) : 0;
1237 node_type * pRRight = child( pRight, 1, memory_model::memory_order_relaxed );
1238 int hRR = pRRight ? pRRight->height( memory_model::memory_order_relaxed ) : 0;
1240 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1243 assert( pRLeft != nullptr );
1244 node_scoped_lock lrl( m_Monitor, *pRLeft );
1245 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
1246 return pNode; // retry
1248 hRL = pRLeft->height( memory_model::memory_order_relaxed );
1250 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1252 node_type * pRLRight = child( pRLeft, 1, memory_model::memory_order_relaxed );
1253 int hRLR = pRLRight ? pRLRight->height( memory_model::memory_order_relaxed ) : 0;
1254 int balance = hRR - hRLR;
1255 if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && pRight->value( memory_model::memory_order_relaxed ) == nullptr))
1256 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1258 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1262 static void begin_change( node_type * pNode, version_type version )
1264 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1266 static void end_change( node_type * pNode, version_type version )
1268 // Clear shrinking and unlinked flags and increment version
1269 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1272 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1274 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1275 node_type * pParentLeft = child( pParent, -1, memory_model::memory_order_relaxed );
1277 begin_change( pNode, nodeVersion );
1279 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1280 if ( pLRight != nullptr )
1281 pLRight->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1283 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1284 pNode->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1286 if ( pParentLeft == pNode )
1287 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1289 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1290 pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed );
1292 pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1294 // fix up heights links
1295 int hNode = 1 + std::max( hLR, hR );
1296 pNode->height( hNode, memory_model::memory_order_relaxed );
1297 pLeft->height( 1 + std::max( hLL, hNode ), memory_model::memory_order_relaxed );
1299 end_change( pNode, nodeVersion );
1301 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1302 // parent.child). pNode is the deepest. Perform as many fixes as we can
1303 // with the locks we've got.
1305 // We've already fixed the height for pNode, but it might still be outside
1306 // our allowable balance range. In that case a simple fix_height_locked()
1308 int nodeBalance = hLR - hR;
1309 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1310 // we need another rotation at pNode
1314 // we've fixed balance and height damage for pNode, now handle
1315 // extra-routing node damage
1316 if ( (pLRight == nullptr || hR == 0) && pNode->value(memory_model::memory_order_relaxed) == nullptr ) {
1317 // we need to remove pNode and then repair
1321 // we've already fixed the height at pLeft, do we need a rotation here?
1322 int leftBalance = hLL - hNode;
1323 if ( leftBalance < -1 || leftBalance > 1 )
1326 // pLeft might also have routing node damage (if pLeft.left was null)
1327 if ( hLL == 0 && pLeft->value( memory_model::memory_order_relaxed ) == nullptr )
1330 // try to fix the parent height while we've still got the lock
1331 return fix_height_locked( pParent );
1334 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1336 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1337 node_type * pParentLeft = child( pParent, -1, memory_model::memory_order_relaxed );
1339 begin_change( pNode, nodeVersion );
1341 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1342 pNode->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1343 if ( pRLeft != nullptr )
1344 pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1346 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1347 pNode->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1349 if ( pParentLeft == pNode )
1350 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
1352 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1353 pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1355 pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1358 int hNode = 1 + std::max( hL, hRL );
1359 pNode->height( hNode, memory_model::memory_order_relaxed );
1360 pRight->height( 1 + std::max( hNode, hRR ), memory_model::memory_order_relaxed );
1362 end_change( pNode, nodeVersion );
1364 int nodeBalance = hRL - hL;
1365 if ( nodeBalance < -1 || nodeBalance > 1 )
1368 if ( (pRLeft == nullptr || hL == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
1371 int rightBalance = hRR - hNode;
1372 if ( rightBalance < -1 || rightBalance > 1 )
1375 if ( hRR == 0 && pRight->value( memory_model::memory_order_relaxed ) == nullptr )
1378 return fix_height_locked( pParent );
1381 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1383 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1384 version_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
1386 node_type * pPL = child( pParent, -1, memory_model::memory_order_relaxed );
1387 node_type * pLRL = child( pLRight, -1, memory_model::memory_order_relaxed );
1388 node_type * pLRR = child( pLRight, 1, memory_model::memory_order_relaxed );
1389 int hLRR = pLRR ? pLRR->height( memory_model::memory_order_relaxed ) : 0;
1391 begin_change( pNode, nodeVersion );
1392 begin_change( pLeft, leftVersion );
1394 // fix up pNode links, careful about the order!
1395 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
1396 if ( pLRR != nullptr )
1397 pLRR->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1399 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
1400 if ( pLRL != nullptr )
1401 pLRL->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1403 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1404 pLeft->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1405 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1406 pNode->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1409 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1411 assert( child( pParent, 1, memory_model::memory_order_relaxed ) == pNode );
1412 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
1414 pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1417 int hNode = 1 + std::max( hLRR, hR );
1418 pNode->height( hNode, memory_model::memory_order_relaxed );
1419 int hLeft = 1 + std::max( hLL, hLRL );
1420 pLeft->height( hLeft, memory_model::memory_order_relaxed );
1421 pLRight->height( 1 + std::max( hLeft, hNode ), memory_model::memory_order_relaxed );
1423 end_change( pNode, nodeVersion );
1424 end_change( pLeft, leftVersion );
1426 // caller should have performed only a single rotation if pLeft was going
1427 // to end up damaged
1428 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1429 assert( !((hLL == 0 || pLRL == nullptr) && pLeft->value( memory_model::memory_order_relaxed ) == nullptr ));
1431 // We have damaged pParent, pLR (now parent.child), and pNode (now
1432 // parent.child.right). pNode is the deepest. Perform as many fixes as we
1433 // can with the locks we've got.
1435 // We've already fixed the height for pNode, but it might still be outside
1436 // our allowable balance range. In that case a simple fix_height_locked()
1438 int nodeBalance = hLRR - hR;
1439 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1440 // we need another rotation at pNode
1444 // pNode might also be damaged by being an unnecessary routing node
1445 if ( (pLRR == nullptr || hR == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr ) {
1446 // repair involves splicing out pNode and maybe more rotations
1450 // we've already fixed the height at pLRight, do we need a rotation here?
1451 int balanceLR = hLeft - hNode;
1452 if ( balanceLR < -1 || balanceLR > 1 )
1455 // try to fix the parent height while we've still got the lock
1456 return fix_height_locked( pParent );
1459 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
1461 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1462 version_type rightVersion = pRight->version( memory_model::memory_order_relaxed );
1464 node_type * pPL = child( pParent, -1, memory_model::memory_order_relaxed );
1465 node_type * pRLL = child( pRLeft, -1, memory_model::memory_order_relaxed );
1466 node_type * pRLR = child( pRLeft, 1, memory_model::memory_order_relaxed );
1467 int hRLL = pRLL ? pRLL->height( memory_model::memory_order_relaxed ) : 0;
1469 begin_change( pNode, nodeVersion );
1470 begin_change( pRight, rightVersion );
1472 // fix up pNode links, careful about the order!
1473 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
1474 if ( pRLL != nullptr )
1475 pRLL->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1477 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
1478 if ( pRLR != nullptr )
1479 pRLR->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1481 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1482 pRight->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1483 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1484 pNode->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1487 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
1489 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1490 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1492 pRLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1495 int hNode = 1 + std::max( hL, hRLL );
1496 pNode->height( hNode, memory_model::memory_order_relaxed );
1497 int hRight = 1 + std::max( hRLR, hRR );
1498 pRight->height( hRight, memory_model::memory_order_relaxed );
1499 pRLeft->height( 1 + std::max( hNode, hRight ), memory_model::memory_order_relaxed );
1501 end_change( pNode, nodeVersion );
1502 end_change( pRight, rightVersion );
1504 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
1506 int nodeBalance = hRLL - hL;
1507 if ( nodeBalance < -1 || nodeBalance > 1 )
1509 if ( (pRLL == nullptr || hL == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
1512 int balRL = hRight - hNode;
1513 if ( balRL < -1 || balRL > 1 )
1516 return fix_height_locked( pParent );
1521 }} // namespace cds::container
1523 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H