2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
32 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
34 #include <type_traits> // is_base_of
35 #include <cds/container/details/bronson_avltree_base.h>
36 #include <cds/urcu/details/check_deadlock.h>
37 #include <cds/urcu/exempt_ptr.h>
39 namespace cds { namespace container {
41 /// Bronson et al AVL-tree (RCU specialization for pointers)
42 /** @ingroup cds_nonintrusive_map
43 @ingroup cds_nonintrusive_tree
44 @headerfile cds/container/bronson_avltree_map_rcu.h
45 @anchor cds_container_BronsonAVLTreeMap_rcu_ptr
47 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
48 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
49 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
50 the disposer functor provided by \p Traits template parameter.
52 <b>Template arguments</b>:
53 - \p RCU - one of \ref cds_urcu_gc "RCU type"
55 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
57 - \p Traits - tree traits, default is \p bronson_avltree::traits
58 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
59 instead of \p Traits template argument.
61 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
62 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
68 # ifdef CDS_DOXYGEN_INVOKED
69 typename Traits = bronson_avltree::traits
74 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
77 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
78 typedef Key key_type; ///< type of a key stored in the map
79 typedef T * mapped_type; ///< type of value stored in the map
80 typedef Traits traits; ///< Traits template parameter
82 # ifdef CDS_DOXYGEN_INVOKED
83 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
85 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
87 typedef typename traits::item_counter item_counter; ///< Item counting policy
88 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
89 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
90 typedef typename traits::stat stat; ///< internal statistics
91 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
92 typedef typename traits::back_off back_off; ///< Back-off strategy
93 typedef typename traits::disposer disposer; ///< Value disposer
94 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
96 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
97 static constexpr bool const c_bRelaxedInsert = traits::relaxed_insert;
99 /// Group of \p extract_xxx functions does not require external locking
100 static constexpr const bool c_bExtractLockExternal = false;
102 # ifdef CDS_DOXYGEN_INVOKED
103 /// Returned pointer to \p mapped_type of extracted node
104 typedef cds::urcu::exempt_ptr< gc, T, T, disposer, void > exempt_ptr;
106 typedef cds::urcu::exempt_ptr< gc,
107 typename std::remove_pointer<mapped_type>::type,
108 typename std::remove_pointer<mapped_type>::type,
114 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
118 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
119 typedef typename node_type::version_type version_type;
121 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
122 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
124 enum class find_result
141 result_inserted = allow_insert,
142 result_updated = allow_update,
149 nothing_required = -3,
150 rebalance_required = -2,
159 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
164 template <typename K>
165 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
167 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
170 static void free_node( node_type * pNode )
172 // Free node without disposer
173 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
174 assert( pNode->m_SyncMonitorInjection.check_free());
175 cxx_allocator().Delete( pNode );
178 static void free_value( mapped_type pVal )
183 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
185 return pNode->child( nDir, order );
188 static node_type * parent( node_type * pNode, atomics::memory_order order )
190 return pNode->parent( order );
196 node_type * m_pRetiredList; ///< head of retired node list
197 mapped_type m_pRetiredValue; ///< value retired
201 : m_pRetiredList( nullptr )
202 , m_pRetiredValue( nullptr )
210 void dispose( node_type * pNode )
212 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
213 pNode->m_pNextRemoved = m_pRetiredList;
214 m_pRetiredList = pNode;
217 void dispose_value( mapped_type pVal )
219 assert( m_pRetiredValue == nullptr );
220 m_pRetiredValue = pVal;
224 struct internal_disposer
226 void operator()( node_type * p ) const
234 assert( !gc::is_locked());
236 // TODO: use RCU::batch_retire
239 for ( node_type * p = m_pRetiredList; p; ) {
240 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
241 // Value already disposed
242 gc::template retire_ptr<internal_disposer>( p );
247 if ( m_pRetiredValue )
248 gc::template retire_ptr<disposer>( m_pRetiredValue );
256 typename node_type::base_class m_Root;
258 item_counter m_ItemCounter;
259 mutable sync_monitor m_Monitor;
264 /// Creates empty map
266 : m_pRoot( static_cast<node_type *>( &m_Root ))
277 The \p key_type should be constructible from a value of type \p K.
279 RCU \p synchronize() can be called. RCU should not be locked.
281 Returns \p true if inserting successful, \p false otherwise.
283 template <typename K>
284 bool insert( K const& key, mapped_type pVal )
286 return do_update(key, key_comparator(),
287 [pVal]( node_type * pNode ) -> mapped_type
289 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
293 update_flags::allow_insert
294 ) == update_flags::result_inserted;
297 /// Updates the value for \p key
299 The operation performs inserting or updating the value for \p key with lock-free manner.
300 If \p bInsert is \p false, only updating of existing node is possible.
302 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
303 then the new node created from \p key will be inserted into the map; note that in this case the \ref key_type should be
304 constructible from type \p K.
305 Otherwise, the value for \p key will be changed to \p pVal.
307 RCU \p synchronize() method can be called. RCU should not be locked.
309 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successful,
310 \p second is \p true if new node has been added or \p false if the node with \p key
313 template <typename K>
314 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
316 int result = do_update( key, key_comparator(),
317 [pVal]( node_type * ) -> mapped_type
321 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
323 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
328 /// Delete \p key from the map
330 RCU \p synchronize() method can be called. RCU should not be locked.
332 Return \p true if \p key is found and deleted, \p false otherwise
334 template <typename K>
335 bool erase( K const& key )
340 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
344 /// Deletes the item from the map using \p pred predicate for searching
346 The function is an analog of \p erase(K const&)
347 but \p pred is used for key comparing.
348 \p Less functor has the interface like \p std::less.
349 \p Less must imply the same element order as the comparator used for building the map.
351 template <typename K, typename Less>
352 bool erase_with( K const& key, Less pred )
357 cds::opt::details::make_comparator_from_less<Less>(),
358 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
362 /// Delete \p key from the map
364 The function searches an item with key \p key, calls \p f functor
365 and deletes the item. If \p key is not found, the functor is not called.
367 The functor \p Func interface:
370 void operator()( key_type const& key, std::remove_pointer<mapped_type>::type& val) { ... }
374 RCU \p synchronize method can be called. RCU should not be locked.
376 Return \p true if key is found and deleted, \p false otherwise
378 template <typename K, typename Func>
379 bool erase( K const& key, Func f )
384 [&f]( key_type const& k, mapped_type pVal, rcu_disposer& disp ) -> bool {
387 disp.dispose_value(pVal);
393 /// Deletes the item from the map using \p pred predicate for searching
395 The function is an analog of \p erase(K const&, Func)
396 but \p pred is used for key comparing.
397 \p Less functor has the interface like \p std::less.
398 \p Less must imply the same element order as the comparator used for building the map.
400 template <typename K, typename Less, typename Func>
401 bool erase_with( K const& key, Less pred, Func f )
406 cds::opt::details::make_comparator_from_less<Less>(),
407 [&f]( key_type const& k, mapped_type pVal, rcu_disposer& disp ) -> bool {
410 disp.dispose_value(pVal);
416 /// Extracts a value with minimal key from the map
418 Returns \p exempt_ptr to the leftmost item.
419 If the tree is empty, returns empty \p exempt_ptr.
421 Note that the function returns only the value for minimal key.
422 To retrieve its key use \p extract_min( Func ) member function.
424 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
425 It means that the function gets leftmost leaf of the tree and tries to unlink it.
426 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
427 So, the function returns the item with minimum key at the moment of tree traversing.
429 RCU \p synchronize method can be called. RCU should NOT be locked.
430 The function does not free the item.
431 The deallocator will be implicitly invoked when the returned object is destroyed or when
432 its \p release() member function is called.
434 exempt_ptr extract_min()
436 return exempt_ptr(do_extract_min( []( key_type const& ) {}));
439 /// Extracts minimal key and corresponding value
441 Returns \p exempt_ptr to the leftmost item.
442 If the tree is empty, returns empty \p exempt_ptr.
444 \p Func functor is used to store minimal key.
445 \p Func has the following signature:
448 void operator()( key_type const& key );
451 If the tree is empty, \p f is not called.
452 Otherwise, it is called with minimal key, the pointer to corresponding value is returned
455 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
456 It means that the function gets leftmost leaf of the tree and tries to unlink it.
457 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
458 So, the function returns the item with minimum key at the moment of tree traversing.
460 RCU \p synchronize method can be called. RCU should NOT be locked.
461 The function does not free the item.
462 The deallocator will be implicitly invoked when the returned object is destroyed or when
463 its \p release() member function is called.
465 template <typename Func>
466 exempt_ptr extract_min( Func f )
468 return exempt_ptr(do_extract_min( [&f]( key_type const& key ) { f(key); }));
471 /// Extracts minimal key and corresponding value
473 This function is a shortcut for the following call:
476 exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } );
478 \p key_type should be copy-assignable. The copy of minimal key
479 is returned in \p min_key argument.
481 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
482 extract_min_key( key_type& min_key )
484 return exempt_ptr(do_extract_min( [&min_key]( key_type const& key ) { min_key = key; }));
487 /// Extracts a value with maximal key from the tree
489 Returns \p exempt_ptr pointer to the rightmost item.
490 If the set is empty, returns empty \p exempt_ptr.
492 Note that the function returns only the value for maximal key.
493 To retrieve its key use \p extract_max( Func ) member function.
495 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
496 It means that the function gets rightmost leaf of the tree and tries to unlink it.
497 During unlinking, a concurrent thread may insert an item with key greater than rightmost item's key.
498 So, the function returns the item with maximum key at the moment of tree traversing.
500 RCU \p synchronize method can be called. RCU should NOT be locked.
501 The function does not free the item.
502 The deallocator will be implicitly invoked when the returned object is destroyed or when
503 its \p release() is called.
505 exempt_ptr extract_max()
507 return exempt_ptr(do_extract_max( []( key_type const& ) {}));
510 /// Extracts the maximal key and corresponding value
512 Returns \p exempt_ptr pointer to the rightmost item.
513 If the set is empty, returns empty \p exempt_ptr.
515 \p Func functor is used to store maximal key.
516 \p Func has the following signature:
519 void operator()( key_type const& key );
522 If the tree is empty, \p f is not called.
523 Otherwise, it is called with maximal key, the pointer to corresponding value is returned
526 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
527 It means that the function gets rightmost leaf of the tree and tries to unlink it.
528 During unlinking, a concurrent thread may insert an item with key greater than rightmost item's key.
529 So, the function returns the item with maximum key at the moment of tree traversing.
531 RCU \p synchronize method can be called. RCU should NOT be locked.
532 The function does not free the item.
533 The deallocator will be implicitly invoked when the returned object is destroyed or when
534 its \p release() is called.
536 template <typename Func>
537 exempt_ptr extract_max( Func f )
539 return exempt_ptr(do_extract_max( [&f]( key_type const& key ) { f(key); }));
542 /// Extracts the maximal key and corresponding value
544 This function is a shortcut for the following call:
547 exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } );
549 \p key_type should be copy-assignable. The copy of maximal key
550 is returned in \p max_key argument.
552 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
553 extract_max_key( key_type& max_key )
555 return exempt_ptr(do_extract_max( [&max_key]( key_type const& key ) { max_key = key; }));
558 /// Extracts an item from the map
560 The function searches an item with key equal to \p key in the tree,
561 unlinks it, and returns \p exempt_ptr pointer to a value found.
562 If \p key is not found the function returns an empty \p exempt_ptr.
564 RCU \p synchronize method can be called. RCU should NOT be locked.
565 The function does not destroy the value found.
566 The disposer will be implicitly invoked when the returned object is destroyed or when
567 its \p release() member function is called.
569 template <typename Q>
570 exempt_ptr extract( Q const& key )
572 return exempt_ptr(do_extract( key ));
576 /// Extracts an item from the map using \p pred for searching
578 The function is an analog of \p extract(Q const&)
579 but \p pred is used for key compare.
580 \p Less has the interface like \p std::less.
581 \p pred must imply the same element order as the comparator used for building the tree.
583 template <typename Q, typename Less>
584 exempt_ptr extract_with( Q const& key, Less pred )
586 return exempt_ptr(do_extract_with( key, pred ));
589 /// Find the key \p key
591 The function searches the item with key equal to \p key and calls the functor \p f for item found.
592 The interface of \p Func functor is:
595 void operator()( key_type const& key, std::remove_pointer< mapped_type )::type& item );
598 where \p item is the item found.
599 The functor is called under node-level lock.
601 The function applies RCU lock internally.
603 The function returns \p true if \p key is found, \p false otherwise.
605 template <typename K, typename Func>
606 bool find( K const& key, Func f )
608 return do_find( key, key_comparator(),
609 [&f]( node_type * pNode ) -> bool {
610 assert( pNode != nullptr );
611 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
613 f( pNode->m_key, *pVal );
621 /// Finds the key \p val using \p pred predicate for searching
623 The function is an analog of \p find(K const&, Func)
624 but \p pred is used for key comparing.
625 \p Less functor has the interface like \p std::less.
626 \p Less must imply the same element order as the comparator used for building the map.
628 template <typename K, typename Less, typename Func>
629 bool find_with( K const& key, Less pred, Func f )
632 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
633 [&f]( node_type * pNode ) -> bool {
634 assert( pNode != nullptr );
635 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
637 f( pNode->m_key, *pVal );
645 /// Checks whether the map contains \p key
647 The function searches the item with key equal to \p key
648 and returns \p true if it is found, and \p false otherwise.
650 The function applies RCU lock internally.
652 template <typename K>
653 bool contains( K const& key )
655 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
658 /// Checks whether the map contains \p key using \p pred predicate for searching
660 The function is similar to <tt>contains( key )</tt> but \p pred is used for key comparing.
661 \p Less functor has the interface like \p std::less.
662 \p Less must imply the same element order as the comparator used for building the set.
664 template <typename K, typename Less>
665 bool contains( K const& key, Less pred )
668 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
671 /// Clears the tree (thread safe, not atomic)
673 The function unlink all items from the tree.
674 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
678 assert( set.empty());
680 the assertion could be raised.
682 For each node the \ref disposer will be called after unlinking.
684 RCU \p synchronize method can be called. RCU should not be locked.
688 while ( extract_min());
691 /// Clears the tree (not thread safe)
693 This function is not thread safe and may be called only when no other thread deals with the tree.
694 The function is used in the tree destructor.
698 clear(); // temp solution
702 /// Checks if the map is empty
705 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
708 /// Returns item count in the map
710 Only leaf nodes containing user data are counted.
712 The value returned depends on item counter type provided by \p Traits template parameter.
713 If it is \p atomicity::empty_item_counter this function always returns 0.
715 The function is not suitable for checking the tree emptiness, use \p empty()
716 member function for this purpose.
720 return m_ItemCounter;
723 /// Returns const reference to internal statistics
724 stat const& statistics() const
729 /// Returns reference to \p sync_monitor object
730 sync_monitor& monitor()
735 sync_monitor const& monitor() const
741 /// Checks internal consistency (not atomic, not thread-safe)
743 The debugging function to check internal consistency of the tree.
745 bool check_consistency() const
747 return check_consistency([]( size_t /*nLevel*/, size_t /*hLeft*/, size_t /*hRight*/ ){} );
750 /// Checks internal consistency (not atomic, not thread-safe)
752 The debugging function to check internal consistency of the tree.
753 The functor \p Func is called if a violation of internal tree structure
757 void operator()( size_t nLevel, size_t hLeft, size_t hRight );
761 - \p nLevel - the level where the violation is found
762 - \p hLeft - the height of left subtree
763 - \p hRight - the height of right subtree
765 The functor is called for each violation found.
767 template <typename Func>
768 bool check_consistency( Func f ) const
770 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
773 do_check_consistency( pChild, 1, f, nErrors );
781 template <typename Func>
782 size_t do_check_consistency( node_type * pNode, size_t nLevel, Func f, size_t& nErrors ) const
786 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_acquire );
787 node_type * pRight = child( pNode, right_child, memory_model::memory_order_acquire );
788 if ( pLeft && cmp( pLeft->m_key, pNode->m_key ) > 0 )
790 if ( pRight && cmp( pNode->m_key, pRight->m_key ) > 0 )
793 size_t hLeft = do_check_consistency( pLeft, nLevel + 1, f, nErrors );
794 size_t hRight = do_check_consistency( pRight, nLevel + 1, f, nErrors );
796 if ( hLeft >= hRight ) {
797 if ( hLeft - hRight > 1 ) {
798 f( nLevel, hLeft, hRight );
804 if ( hRight - hLeft > 1 ) {
805 f( nLevel, hLeft, hRight );
814 template <typename Q, typename Compare, typename Func>
815 bool do_find( Q& key, Compare cmp, Func f ) const
820 result = try_find( key, cmp, f, m_pRoot, right_child, 0 );
822 assert( result != find_result::retry );
823 return result == find_result::found;
826 template <typename K, typename Compare, typename Func>
827 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
829 check_deadlock_policy::check();
831 rcu_disposer removed_list;
834 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
838 template <typename K, typename Compare, typename Func>
839 bool do_remove( K const& key, Compare cmp, Func func )
841 // Func must return true if the value was disposed
842 // or false if the value was extracted
844 check_deadlock_policy::check();
846 rcu_disposer removed_list;
849 return try_remove_root( key, cmp, func, removed_list );
853 template <typename Func>
854 mapped_type do_extract_min( Func f )
856 mapped_type pExtracted = nullptr;
859 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
864 template <typename Func>
865 mapped_type do_extract_max( Func f )
867 mapped_type pExtracted = nullptr;
870 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
875 template <typename Func>
876 void do_extract_minmax( int nDir, Func func )
878 check_deadlock_policy::check();
880 rcu_disposer removed_list;
887 // get right child of root
888 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
890 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
891 if ( nChildVersion & node_type::shrinking ) {
892 m_stat.onRemoveRootWaitShrinking();
893 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
894 result = update_flags::retry;
896 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
897 result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list );
900 result = update_flags::retry;
905 if ( result == update_flags::retry )
906 m_stat.onRemoveRetry();
908 m_stat.onExtract( result == update_flags::result_removed );
915 template <typename Q>
916 mapped_type do_extract( Q const& key )
918 mapped_type pExtracted = nullptr;
922 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
924 m_stat.onExtract( pExtracted != nullptr );
928 template <typename Q, typename Less>
929 mapped_type do_extract_with( Q const& key, Less pred )
932 mapped_type pExtracted = nullptr;
935 cds::opt::details::make_comparator_from_less<Less>(),
936 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
938 m_stat.onExtract( pExtracted != nullptr );
945 static int height( node_type * pNode, atomics::memory_order order )
948 return pNode->m_nHeight.load( order );
950 static void set_height( node_type * pNode, int h, atomics::memory_order order )
953 pNode->m_nHeight.store( h, order );
955 static int height_null( node_type * pNode, atomics::memory_order order )
957 return pNode ? height( pNode, order ) : 0;
960 static constexpr int const c_stackSize = 64;
962 template <typename Q, typename Compare, typename Func>
963 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
965 assert( gc::is_locked());
971 version_type nVersion;
975 stack_record stack[c_stackSize];
977 stack[0].pNode = pNode;
978 stack[0].nVersion = nVersion;
979 stack[0].nDir = nDir;
982 pNode = stack[pos].pNode;
983 nVersion = stack[pos].nVersion;
984 nDir = stack[pos].nDir;
987 node_type * pChild = child( pNode, nDir, memory_model::memory_order_acquire );
989 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
991 m_stat.onFindRetry();
994 m_stat.onFindFailed();
995 return find_result::not_found;
998 int nCmp = cmp( key, pChild->m_key );
1000 if ( pChild->is_valued( memory_model::memory_order_acquire )) {
1002 node_scoped_lock l( m_Monitor, *pChild );
1003 if ( child(pNode, nDir, memory_model::memory_order_acquire) == pChild ) {
1004 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
1006 m_stat.onFindSuccess();
1007 return find_result::found;
1012 m_stat.onFindRetry();
1016 m_stat.onFindFailed();
1017 return find_result::not_found;
1020 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1021 if ( nChildVersion & node_type::shrinking ) {
1022 m_stat.onFindWaitShrinking();
1023 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1025 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1027 m_stat.onFindRetry();
1031 else if ( nChildVersion != node_type::unlinked && child( pNode, nDir, memory_model::memory_order_acquire ) == pChild )
1033 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1035 m_stat.onFindRetry();
1040 assert(pos < c_stackSize);
1041 stack[pos].pNode = pChild;
1042 stack[pos].nVersion = nChildVersion;
1043 stack[pos].nDir = nCmp;
1044 break; // child iteration
1047 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1049 m_stat.onFindRetry();
1053 m_stat.onFindRetry();
1056 return find_result::retry;
1059 template <typename K, typename Compare, typename Func>
1060 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
1062 assert( gc::is_locked());
1067 // get right child of root
1068 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1070 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1071 if ( nChildVersion & node_type::shrinking ) {
1072 m_stat.onUpdateRootWaitShrinking();
1073 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1074 result = update_flags::retry;
1076 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire ))
1077 result = try_update( key, cmp, nFlags, funcUpdate, pChild, nChildVersion, disp );
1079 result = update_flags::retry;
1082 // the tree is empty
1083 if ( nFlags & update_flags::allow_insert ) {
1084 // insert into tree as right child of the root
1086 node_scoped_lock l( m_Monitor, *m_pRoot );
1087 if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
1088 result = update_flags::retry;
1092 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
1093 mapped_type pVal = funcUpdate( pNew );
1094 assert( pVal != nullptr );
1095 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
1097 m_pRoot->child( pNew, right_child, memory_model::memory_order_release);
1098 set_height( m_pRoot, 2, memory_model::memory_order_release );
1102 m_stat.onInsertSuccess();
1103 return update_flags::result_inserted;
1106 return update_flags::failed;
1109 if ( result != update_flags::retry )
1114 template <typename K, typename Compare, typename Func>
1115 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
1117 assert( gc::is_locked());
1122 // get right child of root
1123 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1125 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1126 if ( nChildVersion & node_type::shrinking ) {
1127 m_stat.onRemoveRootWaitShrinking();
1128 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1129 result = update_flags::retry;
1131 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
1132 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
1135 result = update_flags::retry;
1140 if ( result == update_flags::retry )
1141 m_stat.onRemoveRetry();
1143 m_stat.onRemove( result == update_flags::result_removed );
1144 return result == update_flags::result_removed;
1149 template <typename K, typename Compare, typename Func>
1150 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1152 assert( gc::is_locked());
1153 assert( nVersion != node_type::unlinked );
1158 version_type nVersion;
1161 stack_record stack[c_stackSize];
1163 stack[0].pNode = pNode;
1164 stack[0].nVersion = nVersion;
1166 while ( pos >= 0 ) {
1167 pNode = stack[pos].pNode;
1168 nVersion = stack[pos].nVersion;
1170 int nCmp = cmp( key, pNode->m_key );
1172 int result = try_update_node( nFlags, funcUpdate, pNode, nVersion, disp );
1173 if ( result != update_flags::retry )
1176 m_stat.onUpdateRetry();
1181 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_acquire );
1182 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1184 m_stat.onUpdateRetry();
1188 if ( pChild == nullptr ) {
1190 if ( nFlags & update_flags::allow_insert ) {
1191 int result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
1192 if ( result != update_flags::retry )
1195 m_stat.onUpdateRetry();
1199 return update_flags::failed;
1203 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1204 if ( nChildVersion & node_type::shrinking ) {
1205 m_stat.onUpdateWaitShrinking();
1206 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1209 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_acquire )) {
1210 // this second read is important, because it is protected by nChildVersion
1212 // validate the read that our caller took to get to node
1213 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1215 m_stat.onUpdateRetry();
1218 // At this point we know that the traversal our parent took to get to node is still valid.
1219 // The recursive implementation will validate the traversal from node to
1220 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1221 // This means that we are no longer vulnerable to node shrinks, and we don't need
1222 // to validate node version any more.
1224 assert( pos < c_stackSize );
1225 stack[pos].pNode = pChild;
1226 stack[pos].nVersion = nChildVersion;
1227 assert( nChildVersion != node_type::unlinked );
1228 break; // child iteration
1230 m_stat.onUpdateRetry();
1234 return update_flags::retry;
1237 template <typename K, typename Compare, typename Func>
1238 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1240 assert( gc::is_locked());
1241 assert( nVersion != node_type::unlinked );
1245 node_type * pParent;
1247 version_type nVersion;
1250 stack_record stack[c_stackSize];
1252 stack[0].pParent = pParent;
1253 stack[0].pNode = pNode;
1254 stack[0].nVersion = nVersion;
1256 while ( pos >= 0 ) {
1257 pParent = stack[pos].pParent;
1258 pNode = stack[pos].pNode;
1259 nVersion = stack[pos].nVersion;
1261 int nCmp = cmp( key, pNode->m_key );
1263 int result = try_remove_node( pParent, pNode, nVersion, func, disp );
1264 if ( result != update_flags::retry )
1267 m_stat.onRemoveRetry();
1272 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_acquire );
1273 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1275 m_stat.onRemoveRetry();
1279 if ( pChild == nullptr )
1280 return update_flags::failed;
1283 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1284 if ( nChildVersion & node_type::shrinking ) {
1285 m_stat.onRemoveWaitShrinking();
1286 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1289 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_acquire )) {
1290 // this second read is important, because it is protected by nChildVersion
1292 // validate the read that our caller took to get to node
1293 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1295 m_stat.onRemoveRetry();
1299 // At this point we know that the traversal our parent took to get to node is still valid.
1300 // The recursive implementation will validate the traversal from node to
1301 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1302 // This means that we are no longer vulnerable to node shrinks, and we don't need
1303 // to validate node version any more.
1305 assert( pos < c_stackSize );
1306 stack[pos].pParent = pNode;
1307 stack[pos].pNode = pChild;
1308 stack[pos].nVersion = nChildVersion;
1309 break; // child iteration
1311 m_stat.onRemoveRetry();
1314 return update_flags::retry;
1317 template <typename Func>
1318 int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1320 assert( gc::is_locked());
1321 assert( nVersion != node_type::unlinked );
1325 node_type * pParent;
1327 version_type nVersion;
1330 stack_record stack[c_stackSize];
1332 stack[0].pParent = pParent;
1333 stack[0].pNode = pNode;
1334 stack[0].nVersion = nVersion;
1336 while ( pos >= 0 ) {
1337 pParent = stack[pos].pParent;
1338 pNode = stack[pos].pNode;
1339 nVersion = stack[pos].nVersion;
1343 node_type * pChild = child( pNode, iterDir, memory_model::memory_order_acquire );
1344 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1346 m_stat.onRemoveRetry();
1352 if ( pNode->is_valued( memory_model::memory_order_acquire )) {
1353 int result = try_remove_node( pParent, pNode, nVersion, func, disp );
1355 if ( result == update_flags::result_removed )
1359 m_stat.onRemoveRetry();
1363 // check right (for min) or left (for max) child node
1365 pChild = child( pNode, iterDir, memory_model::memory_order_acquire );
1368 m_stat.onRemoveRetry();
1374 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1375 if ( nChildVersion & node_type::shrinking ) {
1376 m_stat.onRemoveWaitShrinking();
1377 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1380 else if ( pChild == child( pNode, iterDir, memory_model::memory_order_acquire )) {
1381 // this second read is important, because it is protected by nChildVersion
1383 // validate the read that our caller took to get to node
1384 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1386 m_stat.onRemoveRetry();
1390 // At this point we know that the traversal our parent took to get to node is still valid.
1391 // The recursive implementation will validate the traversal from node to
1392 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1393 // This means that we are no longer vulnerable to node shrinks, and we don't need
1394 // to validate node version any more.
1396 assert( pos < c_stackSize );
1397 stack[pos].pParent = pNode;
1398 stack[pos].pNode = pChild;
1399 stack[pos].nVersion = nChildVersion;
1400 break; // child iteration
1402 m_stat.onRemoveRetry();
1405 return update_flags::retry;
1408 template <typename K, typename Func>
1409 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
1413 auto fnCreateNode = [&funcUpdate]( node_type * node ) {
1414 mapped_type pVal = funcUpdate( node );
1415 assert( pVal != nullptr );
1416 node->m_pValue.store( pVal, memory_model::memory_order_release );
1419 constexpr_if ( c_bRelaxedInsert ) {
1420 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1421 || child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr )
1423 m_stat.onInsertRetry();
1424 return update_flags::retry;
1427 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1430 node_type * pDamaged;
1432 assert( pNode != nullptr );
1433 node_scoped_lock l( m_Monitor, *pNode );
1435 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1436 || child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr )
1438 constexpr_if ( c_bRelaxedInsert ) {
1439 mapped_type pVal = pNew->m_pValue.load( memory_model::memory_order_relaxed );
1440 pNew->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1443 m_stat.onRelaxedInsertFailed();
1446 m_stat.onInsertRetry();
1447 return update_flags::retry;
1450 constexpr_if ( !c_bRelaxedInsert )
1451 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1453 pNode->child( pNew, nDir, memory_model::memory_order_release );
1454 pDamaged = fix_height_locked( pNode );
1458 m_stat.onInsertSuccess();
1461 fix_height_and_rebalance( pDamaged, disp );
1462 m_stat.onInsertRebalanceRequired();
1465 return update_flags::result_inserted;
1468 template <typename Func>
1469 int try_update_node( int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1473 assert( pNode != nullptr );
1475 node_scoped_lock l( m_Monitor, *pNode );
1477 if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
1478 return update_flags::retry;
1480 if ( pNode->is_unlinked( memory_model::memory_order_acquire )) {
1481 m_stat.onUpdateUnlinked();
1482 return update_flags::retry;
1485 if ( pNode->is_valued( memory_model::memory_order_relaxed ) && !(nFlags & update_flags::allow_update)) {
1486 m_stat.onInsertFailed();
1487 return update_flags::failed;
1490 pOld = pNode->value( memory_model::memory_order_relaxed );
1491 bInserted = pOld == nullptr;
1492 mapped_type pVal = funcUpdate( pNode );
1496 assert( pVal != nullptr );
1497 pNode->m_pValue.store( pVal, memory_model::memory_order_release );
1502 disp.dispose_value(pOld);
1503 m_stat.onDisposeValue();
1508 m_stat.onInsertSuccess();
1509 return update_flags::result_inserted;
1512 m_stat.onUpdateSuccess();
1513 return update_flags::result_updated;
1516 template <typename Func>
1517 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
1519 assert( pParent != nullptr );
1520 assert( pNode != nullptr );
1522 if ( !pNode->is_valued( memory_model::memory_order_acquire ))
1523 return update_flags::failed;
1525 if ( child( pNode, left_child, memory_model::memory_order_acquire ) == nullptr
1526 || child( pNode, right_child, memory_model::memory_order_acquire ) == nullptr )
1528 // pNode can be replaced with its child
1530 node_type * pDamaged;
1533 node_scoped_lock lp( m_Monitor, *pParent );
1534 if ( pParent->is_unlinked( memory_model::memory_order_acquire ) || parent( pNode, memory_model::memory_order_acquire ) != pParent )
1535 return update_flags::retry;
1538 node_scoped_lock ln( m_Monitor, *pNode );
1539 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1540 return update_flags::retry;
1542 pOld = pNode->value( memory_model::memory_order_relaxed );
1544 return update_flags::failed;
1546 if ( !try_unlink_locked( pParent, pNode, disp ))
1547 return update_flags::retry;
1549 pDamaged = fix_height_locked( pParent );
1553 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1554 m_stat.onDisposeValue();
1556 m_stat.onExtractValue();
1559 fix_height_and_rebalance( pDamaged, disp );
1560 m_stat.onRemoveRebalanceRequired();
1564 // pNode is an internal with two children
1568 node_scoped_lock ln( m_Monitor, *pNode );
1569 pOld = pNode->value( memory_model::memory_order_relaxed );
1570 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion )
1571 return update_flags::retry;
1573 return update_flags::failed;
1575 pNode->m_pValue.store( nullptr, memory_model::memory_order_release );
1576 m_stat.onMakeRoutingNode();
1580 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1581 m_stat.onDisposeValue();
1583 m_stat.onExtractValue();
1585 return update_flags::result_removed;
1588 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1590 // pParent and pNode must be locked
1591 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed));
1593 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1594 node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
1595 if ( pNode != pParentLeft && pNode != pParentRight ) {
1596 // node is no longer a child of parent
1600 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ));
1601 assert( pParent == parent( pNode, memory_model::memory_order_relaxed ));
1603 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1604 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1605 if ( pLeft != nullptr && pRight != nullptr ) {
1606 // splicing is no longer possible
1609 node_type * pSplice = pLeft ? pLeft : pRight;
1611 if ( pParentLeft == pNode )
1612 pParent->m_pLeft.store( pSplice, memory_model::memory_order_release );
1614 pParent->m_pRight.store( pSplice, memory_model::memory_order_release );
1617 pSplice->parent( pParent, memory_model::memory_order_release );
1619 // Mark the node as unlinked
1620 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1622 // The value will be disposed by calling function
1623 pNode->m_pValue.store( nullptr, memory_model::memory_order_release );
1625 disp.dispose( pNode );
1626 m_stat.onDisposeNode();
1633 private: // rotations
1635 int check_node_ordering( node_type* pParent, node_type* pChild )
1637 return key_comparator()( pParent->m_key, pChild->m_key );
1640 int estimate_node_condition( node_type * pNode )
1642 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_acquire );
1643 node_type * pRight = child( pNode, right_child, memory_model::memory_order_acquire );
1645 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_acquire ))
1646 return unlink_required;
1648 int h = height( pNode, memory_model::memory_order_acquire );
1649 int hL = height_null( pLeft, memory_model::memory_order_acquire );
1650 int hR = height_null( pRight, memory_model::memory_order_acquire );
1652 int hNew = 1 + std::max( hL, hR );
1653 int nBalance = hL - hR;
1655 if ( nBalance < -1 || nBalance > 1 )
1656 return rebalance_required;
1658 return h != hNew ? hNew : nothing_required;
1661 node_type * fix_height( node_type * pNode )
1663 assert( pNode != nullptr );
1664 node_scoped_lock l( m_Monitor, *pNode );
1665 return fix_height_locked( pNode );
1668 node_type * fix_height_locked( node_type * pNode )
1670 // pNode must be locked!!!
1671 int h = estimate_node_condition( pNode );
1673 case rebalance_required:
1674 case unlink_required:
1676 case nothing_required:
1679 set_height( pNode, h, memory_model::memory_order_release );
1680 return parent( pNode, memory_model::memory_order_relaxed );
1684 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1686 while ( pNode && parent( pNode, memory_model::memory_order_acquire )) {
1687 int nCond = estimate_node_condition( pNode );
1688 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_acquire ))
1691 if ( nCond != unlink_required && nCond != rebalance_required )
1692 pNode = fix_height( pNode );
1694 node_type * pParent = parent( pNode, memory_model::memory_order_acquire );
1695 assert( pParent != nullptr );
1697 node_scoped_lock lp( m_Monitor, *pParent );
1698 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed ) && parent( pNode, memory_model::memory_order_acquire ) == pParent ) {
1699 node_scoped_lock ln( m_Monitor, *pNode );
1700 pNode = rebalance_locked( pParent, pNode, disp );
1707 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1709 // pParent and pNode should be locked.
1710 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1711 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1713 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1714 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1716 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1717 if ( try_unlink_locked( pParent, pNode, disp ))
1718 return fix_height_locked( pParent );
1720 // retry needed for pNode
1725 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1726 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1728 int h = height( pNode, memory_model::memory_order_acquire );
1729 int hL = height_null( pLeft, memory_model::memory_order_acquire );
1730 int hR = height_null( pRight, memory_model::memory_order_acquire );
1731 int hNew = 1 + std::max( hL, hR );
1732 int balance = hL - hR;
1735 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1736 else if ( balance < -1 )
1737 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1738 else if ( hNew != h ) {
1739 set_height( pNode, hNew, memory_model::memory_order_release );
1741 // pParent is already locked
1742 return fix_height_locked( pParent );
1748 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1750 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1751 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1752 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1754 // pParent and pNode is locked yet
1755 // pNode->pLeft is too large, we will rotate-right.
1756 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1758 assert( pLeft != nullptr );
1759 node_scoped_lock l( m_Monitor, *pLeft );
1760 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1761 return pNode; // retry for pNode
1763 assert( check_node_ordering( pNode, pLeft ) > 0 );
1765 int hL = height( pLeft, memory_model::memory_order_acquire );
1767 return pNode; // retry
1769 node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
1770 int hLR = height_null( pLRight, memory_model::memory_order_acquire );
1771 node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
1772 int hLL = height_null( pLLeft, memory_model::memory_order_acquire );
1776 node_scoped_lock lr( m_Monitor, *pLRight );
1777 if ( pLeft->m_pRight.load( memory_model::memory_order_acquire ) != pLRight )
1778 return pNode; // retry
1780 assert( check_node_ordering( pLeft, pLRight ) < 0 );
1782 hLR = height( pLRight, memory_model::memory_order_acquire );
1784 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1786 int hLRL = height_null( child( pLRight, left_child, memory_model::memory_order_relaxed ), memory_model::memory_order_acquire );
1787 int balance = hLL - hLRL;
1788 if ( balance >= -1 && balance <= 1 && !( ( hLL == 0 || hLRL == 0 ) && !pLeft->is_valued( memory_model::memory_order_relaxed ))) {
1789 // nParent.child.left won't be damaged after a double rotation
1790 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1794 // focus on pLeft, if necessary pNode will be balanced later
1795 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1797 else if ( hLL > hLR ) {
1799 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1802 return pNode; // retry
1805 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1807 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1808 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1809 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1811 // pParent and pNode is locked yet
1812 assert( pRight != nullptr );
1813 node_scoped_lock l( m_Monitor, *pRight );
1814 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1815 return pNode; // retry for pNode
1817 assert( check_node_ordering( pNode, pRight ) < 0 );
1819 int hR = height( pRight, memory_model::memory_order_acquire );
1820 if ( hL - hR >= -1 )
1821 return pNode; // retry
1823 node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
1824 int hRL = height_null( pRLeft, memory_model::memory_order_acquire );
1825 node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
1826 int hRR = height_null( pRRight, memory_model::memory_order_acquire );
1830 node_scoped_lock lrl( m_Monitor, *pRLeft );
1831 if ( pRight->m_pLeft.load( memory_model::memory_order_acquire ) != pRLeft )
1832 return pNode; // retry
1834 assert( check_node_ordering( pRight, pRLeft ) > 0 );
1836 hRL = height( pRLeft, memory_model::memory_order_acquire );
1838 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1840 node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1841 int hRLR = height_null( pRLRight, memory_model::memory_order_acquire );
1842 int balance = hRR - hRLR;
1843 if ( balance >= -1 && balance <= 1 && !( ( hRR == 0 || hRLR == 0 ) && !pRight->is_valued( memory_model::memory_order_relaxed )))
1844 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1847 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1849 else if ( hRR > hRL )
1850 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1852 return pNode; // retry
1855 static void begin_change( node_type * pNode, version_type version )
1857 assert(pNode->version(memory_model::memory_order_acquire) == version );
1858 assert( (version & node_type::shrinking) == 0 );
1859 pNode->exchange_version( version | node_type::shrinking, memory_model::memory_order_acquire );
1861 static void end_change( node_type * pNode, version_type version )
1863 // Clear shrinking and unlinked flags and increment version
1864 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1867 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1869 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1870 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1872 begin_change( pNode, nodeVersion );
1874 pNode->m_pLeft.store( pLRight, memory_model::memory_order_release );
1876 if ( pLRight != nullptr ) {
1877 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
1878 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRight->m_pParent );
1879 pLRight->parent( pNode, memory_model::memory_order_relaxed );
1880 assert( check_node_ordering( pNode, pLRight ) > 0 );
1883 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
1884 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLeft->m_pRight );
1885 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1887 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
1889 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_pParent );
1890 pNode->parent( pLeft, memory_model::memory_order_relaxed );
1891 assert( check_node_ordering( pLeft, pNode ) < 0 );
1893 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
1894 if ( pParentLeft == pNode ) {
1895 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pLeft );
1896 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1899 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1900 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pRight );
1901 pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed );
1904 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
1905 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLeft->m_pParent );
1906 pLeft->parent( pParent, memory_model::memory_order_relaxed );
1908 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
1910 // fix up heights links
1911 int hNode = 1 + std::max( hLR, hR );
1912 set_height( pNode, hNode, memory_model::memory_order_release );
1913 set_height( pLeft, 1 + std::max( hLL, hNode ), memory_model::memory_order_release);
1915 end_change( pNode, nodeVersion );
1916 m_stat.onRotateRight();
1918 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1919 // parent.child). pNode is the deepest. Perform as many fixes as we can
1920 // with the locks we've got.
1922 // We've already fixed the height for pNode, but it might still be outside
1923 // our allowable balance range. In that case a simple fix_height_locked()
1925 int nodeBalance = hLR - hR;
1926 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1927 // we need another rotation at pNode
1928 m_stat.onRotateAfterRightRotation();
1932 // we've fixed balance and height damage for pNode, now handle
1933 // extra-routing node damage
1934 if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
1935 // we need to remove pNode and then repair
1936 m_stat.onRemoveAfterRightRotation();
1940 // we've already fixed the height at pLeft, do we need a rotation here?
1941 int leftBalance = hLL - hNode;
1942 if ( leftBalance < -1 || leftBalance > 1 ) {
1943 m_stat.onRotateAfterRightRotation();
1947 // pLeft might also have routing node damage (if pLeft.left was null)
1948 if ( hLL == 0 && !pLeft->is_valued(memory_model::memory_order_acquire)) {
1949 m_stat.onDamageAfterRightRotation();
1953 // try to fix the parent height while we've still got the lock
1954 return fix_height_locked( pParent );
1957 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1959 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1960 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1962 begin_change( pNode, nodeVersion );
1964 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1965 pNode->m_pRight.store( pRLeft, memory_model::memory_order_release );
1966 if ( pRLeft != nullptr ) {
1967 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
1968 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLeft->m_pParent );
1969 pRLeft->parent( pNode, memory_model::memory_order_relaxed );
1972 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
1973 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRight->m_pLeft );
1974 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1976 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
1977 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_pParent );
1978 pNode->parent( pRight, memory_model::memory_order_relaxed );
1979 assert( check_node_ordering( pRight, pNode ) > 0 );
1981 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
1982 if ( pParentLeft == pNode ) {
1983 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pLeft );
1984 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
1987 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1988 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pRight );
1989 pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1992 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
1993 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRight->m_pParent );
1994 pRight->parent( pParent, memory_model::memory_order_relaxed );
1996 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
1999 int hNode = 1 + std::max( hL, hRL );
2000 set_height( pNode, hNode, memory_model::memory_order_release );
2001 set_height( pRight, 1 + std::max( hNode, hRR ), memory_model::memory_order_release);
2003 end_change( pNode, nodeVersion );
2004 m_stat.onRotateLeft();
2006 int nodeBalance = hRL - hL;
2007 if ( nodeBalance < -1 || nodeBalance > 1 ) {
2008 m_stat.onRotateAfterLeftRotation();
2012 if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
2013 m_stat.onRemoveAfterLeftRotation();
2017 int rightBalance = hRR - hNode;
2018 if ( rightBalance < -1 || rightBalance > 1 ) {
2019 m_stat.onRotateAfterLeftRotation();
2023 if ( hRR == 0 && !pRight->is_valued(memory_model::memory_order_acquire)) {
2024 m_stat.onDamageAfterLeftRotation();
2028 return fix_height_locked( pParent );
2031 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
2033 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
2034 version_type leftVersion = pLeft->version( memory_model::memory_order_acquire );
2036 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
2037 node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_acquire );
2038 node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_acquire );
2039 int hLRR = height_null( pLRR, memory_model::memory_order_acquire );
2041 begin_change( pNode, nodeVersion );
2042 begin_change( pLeft, leftVersion );
2044 // fix up pNode links, careful about the order!
2045 pNode->m_pLeft.store( pLRR, memory_model::memory_order_release );
2046 if ( pLRR != nullptr ) {
2047 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
2048 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRR->m_pParent );
2049 pLRR->parent( pNode, memory_model::memory_order_relaxed );
2052 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
2053 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLeft->m_pRight );
2054 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
2056 if ( pLRL != nullptr ) {
2057 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
2058 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRL->m_pParent );
2059 pLRL->parent( pLeft, memory_model::memory_order_relaxed );
2062 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
2063 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRight->m_pLeft );
2064 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
2066 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
2067 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLeft->m_pParent );
2068 pLeft->parent( pLRight, memory_model::memory_order_relaxed );
2069 assert( check_node_ordering( pLRight, pLeft ) > 0 );
2071 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
2072 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRight->m_pRight );
2073 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
2075 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
2076 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_pParent );
2077 pNode->parent( pLRight, memory_model::memory_order_relaxed );
2078 assert( check_node_ordering( pLRight, pNode ) < 0 );
2080 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
2081 if ( pPL == pNode ) {
2082 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pLeft );
2083 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
2086 assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
2087 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pRight );
2088 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
2091 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
2092 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRight->m_pParent );
2093 pLRight->parent( pParent, memory_model::memory_order_relaxed );
2095 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
2098 int hNode = 1 + std::max( hLRR, hR );
2099 set_height( pNode, hNode, memory_model::memory_order_release );
2100 int hLeft = 1 + std::max( hLL, hLRL );
2101 set_height( pLeft, hLeft, memory_model::memory_order_release );
2102 set_height( pLRight, 1 + std::max( hLeft, hNode ), memory_model::memory_order_release);
2104 end_change( pNode, nodeVersion );
2105 end_change( pLeft, leftVersion );
2106 m_stat.onRotateRightOverLeft();
2108 // caller should have performed only a single rotation if pLeft was going
2109 // to end up damaged
2110 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
2111 assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_acquire )));
2113 // We have damaged pParent, pLR (now parent.child), and pNode (now
2114 // parent.child.right). pNode is the deepest. Perform as many fixes as we
2115 // can with the locks we've got.
2117 // We've already fixed the height for pNode, but it might still be outside
2118 // our allowable balance range. In that case a simple fix_height_locked()
2120 int nodeBalance = hLRR - hR;
2121 if ( nodeBalance < -1 || nodeBalance > 1 ) {
2122 // we need another rotation at pNode
2123 m_stat.onRotateAfterRLRotation();
2127 // pNode might also be damaged by being an unnecessary routing node
2128 if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
2129 // repair involves splicing out pNode and maybe more rotations
2130 m_stat.onRemoveAfterRLRotation();
2134 // we've already fixed the height at pLRight, do we need a rotation here?
2135 int balanceLR = hLeft - hNode;
2136 if ( balanceLR < -1 || balanceLR > 1 ) {
2137 m_stat.onRotateAfterRLRotation();
2141 // try to fix the parent height while we've still got the lock
2142 return fix_height_locked( pParent );
2145 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
2147 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
2148 version_type rightVersion = pRight->version( memory_model::memory_order_acquire );
2150 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
2151 node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_acquire );
2152 node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_acquire );
2153 int hRLL = height_null( pRLL, memory_model::memory_order_acquire );
2155 begin_change( pNode, nodeVersion );
2156 begin_change( pRight, rightVersion );
2158 // fix up pNode links, careful about the order!
2159 pNode->m_pRight.store( pRLL, memory_model::memory_order_release );
2160 if ( pRLL != nullptr ) {
2161 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
2162 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLL->m_pParent );
2163 pRLL->parent( pNode, memory_model::memory_order_relaxed );
2166 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
2167 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRight->m_pLeft );
2168 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
2170 if ( pRLR != nullptr ) {
2171 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
2172 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLR->m_pParent );
2173 pRLR->parent( pRight, memory_model::memory_order_relaxed );
2176 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
2177 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLeft->m_pRight );
2178 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
2180 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
2181 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRight->m_pParent );
2182 pRight->parent( pRLeft, memory_model::memory_order_relaxed );
2183 assert( check_node_ordering( pRLeft, pRight ) < 0 );
2185 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
2186 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLeft->m_pLeft );
2187 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
2189 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
2190 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_pParent );
2191 pNode->parent( pRLeft, memory_model::memory_order_relaxed );
2192 assert( check_node_ordering( pRLeft, pNode ) > 0 );
2194 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
2195 if ( pPL == pNode ) {
2196 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pLeft );
2197 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
2200 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
2201 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pRight );
2202 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
2205 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
2206 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLeft->m_pParent );
2207 pRLeft->parent( pParent, memory_model::memory_order_relaxed );
2209 atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
2212 int hNode = 1 + std::max( hL, hRLL );
2213 set_height( pNode, hNode, memory_model::memory_order_release );
2214 int hRight = 1 + std::max( hRLR, hRR );
2215 set_height( pRight, hRight, memory_model::memory_order_release );
2216 set_height( pRLeft, 1 + std::max( hNode, hRight ), memory_model::memory_order_release);
2218 end_change( pNode, nodeVersion );
2219 end_change( pRight, rightVersion );
2220 m_stat.onRotateLeftOverRight();
2222 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
2224 int nodeBalance = hRLL - hL;
2225 if ( nodeBalance < -1 || nodeBalance > 1 ) {
2226 m_stat.onRotateAfterLRRotation();
2230 if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
2231 m_stat.onRemoveAfterLRRotation();
2235 int balRL = hRight - hNode;
2236 if ( balRL < -1 || balRL > 1 ) {
2237 m_stat.onRotateAfterLRRotation();
2241 return fix_height_locked( pParent );
2246 }} // namespace cds::container
2248 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H