3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <memory> // unique_ptr
7 #include <cds/container/details/bronson_avltree_base.h>
8 #include <cds/urcu/details/check_deadlock.h>
10 namespace cds { namespace container {
12 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
13 /** @ingroup cds_nonintrusive_map
14 @ingroup cds_nonintrusive_tree
15 @headerfile cds/container/bronson_avltree_map_rcu.h
17 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
18 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
19 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
20 the disposer functor provided by \p Traits template parameter.
22 <b>Template arguments</b>:
23 - \p RCU - one of \ref cds_urcu_gc "RCU type"
25 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
27 - \p Traits - tree traits, default is \p bronson_avltree::traits
28 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
29 instead of \p Traits template argument.
31 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
32 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
38 # ifdef CDS_DOXYGEN_INVOKED
39 typename Traits = bronson_avltree::traits
44 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
47 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
48 typedef Key key_type; ///< type of a key stored in the map
49 typedef T * mapped_type; ///< type of value stored in the map
50 typedef Traits traits; ///< Traits template parameter
52 # ifdef CDS_DOXYGEN_INVOKED
53 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
55 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
57 typedef typename traits::item_counter item_counter; ///< Item counting policy
58 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
59 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
60 typedef typename traits::stat stat; ///< internal statistics
61 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
62 typedef typename traits::back_off back_off; ///< Back-off strategy
63 typedef typename traits::disposer disposer; ///< Value disposer
64 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
66 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
67 static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
69 /// Returned pointer to \p mapped_type of extracted node
70 typedef std::unique_ptr< T, disposer > unique_ptr;
72 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
76 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
77 typedef typename node_type::version_type version_type;
79 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
80 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
82 enum class find_result
99 result_inserted = allow_insert,
100 result_updated = allow_update,
107 nothing_required = -3,
108 rebalance_required = -2,
117 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
122 template <typename K>
123 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
125 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
128 static void free_node( node_type * pNode )
130 // Free node without disposer
131 cxx_allocator().Delete( pNode );
134 static void free_value( mapped_type pVal )
139 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
141 return pNode->child( nDir ).load( order );
144 static node_type * parent( node_type * pNode, atomics::memory_order order )
146 return pNode->m_pParent.load( order );
149 // RCU safe disposer for node
152 node_type * m_pRetiredList; ///< head of retired list
156 : m_pRetiredList( nullptr )
164 void dispose( node_type * pNode )
166 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
167 pNode->m_pNextRemoved = m_pRetiredList;
168 m_pRetiredList = pNode;
172 struct internal_disposer
174 void operator()( node_type * p ) const
182 assert( !gc::is_locked() );
184 for ( node_type * p = m_pRetiredList; p; ) {
185 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
186 // Value already disposed
187 gc::template retire_ptr<internal_disposer>( p );
197 typename node_type::base_class m_Root;
199 item_counter m_ItemCounter;
200 mutable sync_monitor m_Monitor;
205 /// Creates empty map
207 : m_pRoot( static_cast<node_type *>( &m_Root ))
218 The \p key_type should be constructible from a value of type \p K.
220 RCU \p synchronize() can be called. RCU should not be locked.
222 Returns \p true if inserting successful, \p false otherwise.
224 template <typename K>
225 bool insert( K const& key, mapped_type pVal )
227 return do_update(key, key_comparator(),
228 [pVal]( node_type * pNode ) -> mapped_type
230 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
233 update_flags::allow_insert
234 ) == update_flags::result_inserted;
237 /// Updates the value for \p key
239 The operation performs inserting or updating the value for \p key with lock-free manner.
240 If \p bInsert is \p false, only updating of existing node is possible.
242 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
243 then the new node created from \p key is inserted into the map; note that in this case the \ref key_type should be
244 constructible from type \p K.
245 Otherwise, the value is changed to \p pVal.
247 RCU \p synchronize() method can be called. RCU should not be locked.
249 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
250 \p second is \p true if new node has been added or \p false if the node with \p key
251 already is in the tree.
253 template <typename K, typename Func>
254 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
256 int result = do_update( key, key_comparator(),
257 [pVal]( node_type * ) -> mapped_type
261 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
263 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
266 /// Delete \p key from the map
268 RCU \p synchronize() method can be called. RCU should not be locked.
270 Return \p true if \p key is found and deleted, \p false otherwise
272 template <typename K>
273 bool erase( K const& key )
278 []( mapped_type pVal ) -> bool { free_value( pVal ); return true; }
282 /// Deletes the item from the map using \p pred predicate for searching
284 The function is an analog of \p erase(K const&)
285 but \p pred is used for key comparing.
286 \p Less functor has the interface like \p std::less.
287 \p Less must imply the same element order as the comparator used for building the map.
289 template <typename K, typename Less>
290 bool erase_with( K const& key, Less pred )
295 cds::opt::details::make_comparator_from_less<Less>(),
296 []( mapped_type pVal ) -> bool { free_value( pVal ); return true; }
300 /// Delete \p key from the map
302 The function searches an item with key \p key, calls \p f functor
303 and deletes the item. If \p key is not found, the functor is not called.
305 The functor \p Func interface:
308 void operator()(mapped_type& item) { ... }
312 RCU \p synchronize method can be called. RCU should not be locked.
314 Return \p true if key is found and deleted, \p false otherwise
316 template <typename K, typename Func>
317 bool erase( K const& key, Func f )
322 [&f]( mapped_type pVal ) -> bool {
331 /// Deletes the item from the map using \p pred predicate for searching
333 The function is an analog of \p erase(K const&, Func)
334 but \p pred is used for key comparing.
335 \p Less functor has the interface like \p std::less.
336 \p Less must imply the same element order as the comparator used for building the map.
338 template <typename K, typename Less, typename Func>
339 bool erase_with( K const& key, Less pred, Func f )
344 cds::opt::details::make_comparator_from_less<Less>(),
345 [&f]( mapped_type pVal ) -> bool {
354 /// Extracts an item with minimal key from the map
356 Returns \p std::unique_ptr to the leftmost item.
357 If the set is empty, returns empty \p std::unique_ptr.
359 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
360 It means that the function gets leftmost leaf of the tree and tries to unlink it.
361 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
362 So, the function returns the item with minimum key at the moment of tree traversing.
364 RCU \p synchronize method can be called. RCU should NOT be locked.
365 The function does not free the item.
366 The deallocator will be implicitly invoked when the returned object is destroyed or when
367 its \p reset(nullptr) member function is called.
369 unique_ptr extract_min()
371 unique_ptr pExtracted;
375 [&pExtracted]( mapped_type pVal ) -> bool { pExtracted.reset( pVal ); return false; }
380 /// Extracts an item with maximal key from the map
382 Returns \p std::unique_ptr pointer to the rightmost item.
383 If the set is empty, returns empty \p std::unique_ptr.
385 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
386 It means that the function gets rightmost leaf of the tree and tries to unlink it.
387 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
388 So, the function returns the item with maximum key at the moment of tree traversing.
390 RCU \p synchronize method can be called. RCU should NOT be locked.
391 The function does not free the item.
392 The deallocator will be implicitly invoked when the returned object is destroyed or when
393 its \p reset(nullptr) is called.
394 @note Before reusing \p result object you should call its \p release() method.
396 unique_ptr extract_max()
398 unique_ptr pExtracted;
402 [&pExtracted]( mapped_type pVal ) -> bool { pExtracted.reset( pVal ); return false; }
407 /// Extracts an item from the map
409 The function searches an item with key equal to \p key in the tree,
410 unlinks it, and returns \p std::unique_ptr pointer to a value found.
411 If \p key is not found the function returns an empty \p std::unique_ptr.
413 RCU \p synchronize method can be called. RCU should NOT be locked.
414 The function does not destroy the value found.
415 The disposer will be implicitly invoked when the returned object is destroyed or when
416 its \p reset(nullptr) member function is called.
418 template <typename Q>
419 unique_ptr extract( Q const& key )
421 unique_ptr pExtracted;
426 [&pExtracted]( mapped_type pVal ) -> bool { pExtracted.reset( pVal ); return false; }
431 /// Extracts an item from the map using \p pred for searching
433 The function is an analog of \p extract(Q const&)
434 but \p pred is used for key compare.
435 \p Less has the interface like \p std::less.
436 \p pred must imply the same element order as the comparator used for building the tree.
438 template <typename Q, typename Less>
439 unique_ptr extract_with( Q const& key, Less pred )
442 unique_ptr pExtracted;
446 cds::opt::details::make_comparator_from_less<Less>(),
447 [&pExtracted]( mapped_type pVal ) -> bool { pExtracted.reset( pVal ); return false; }
452 /// Find the key \p key
454 The function searches the item with key equal to \p key and calls the functor \p f for item found.
455 The interface of \p Func functor is:
458 void operator()( key_type const& key, mapped_type& item );
461 where \p item is the item found.
462 The functor is called under node-level lock.
464 The function applies RCU lock internally.
466 The function returns \p true if \p key is found, \p false otherwise.
468 template <typename K, typename Func>
469 bool find( K const& key, Func f )
471 return do_find( key, key_comparator(),
472 [&f]( node_type * pNode ) -> bool {
473 assert( pNode != nullptr );
474 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
476 f( pNode->m_key, *pVal );
484 /// Finds the key \p val using \p pred predicate for searching
486 The function is an analog of \p find(K const&, Func)
487 but \p pred is used for key comparing.
488 \p Less functor has the interface like \p std::less.
489 \p Less must imply the same element order as the comparator used for building the map.
491 template <typename K, typename Less, typename Func>
492 bool find_with( K const& key, Less pred, Func f )
495 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
496 [&f]( node_type * pNode ) -> bool {
497 assert( pNode != nullptr );
498 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
500 f( pNode->m_key, *pVal );
508 /// Find the key \p key
510 The function searches the item with key equal to \p key
511 and returns \p true if it is found, and \p false otherwise.
513 The function applies RCU lock internally.
515 template <typename K>
516 bool find( K const& key )
518 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
521 /// Finds the key \p val using \p pred predicate for searching
523 The function is an analog of \p find(K const&)
524 but \p pred is used for key comparing.
525 \p Less functor has the interface like \p std::less.
526 \p Less must imply the same element order as the comparator used for building the map.
528 template <typename K, typename Less>
529 bool find_with( K const& key, Less pred )
532 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
535 /// Clears the tree (thread safe, not atomic)
537 The function unlink all items from the tree.
538 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
542 assert( set.empty() );
544 the assertion could be raised.
546 For each node the \ref disposer will be called after unlinking.
548 RCU \p synchronize method can be called. RCU should not be locked.
552 while ( extract_min() );
555 /// Clears the tree (not thread safe)
557 This function is not thread safe and may be called only when no other thread deals with the tree.
558 The function is used in the tree destructor.
565 /// Checks if the map is empty
568 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
571 /// Returns item count in the map
573 Only leaf nodes containing user data are counted.
575 The value returned depends on item counter type provided by \p Traits template parameter.
576 If it is \p atomicity::empty_item_counter this function always returns 0.
578 The function is not suitable for checking the tree emptiness, use \p empty()
579 member function for this purpose.
583 return m_ItemCounter;
586 /// Returns const reference to internal statistics
587 stat const& statistics() const
592 /// Checks internal consistency (not atomic, not thread-safe)
594 The debugging function to check internal consistency of the tree.
596 bool check_consistency() const
604 template <typename Q, typename Compare, typename Func>
605 bool do_find( Q& key, Compare cmp, Func f ) const
610 result = try_find( key, cmp, f, m_pRoot, 1, 0 );
612 assert( result != find_result::retry );
613 return result == find_result::found;
616 template <typename K, typename Compare, typename Func>
617 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
619 check_deadlock_policy::check();
621 rcu_disposer removed_list;
624 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
628 template <typename K, typename Compare, typename Func>
629 bool do_remove( K const& key, Compare cmp, Func func )
631 // Func must return true if the value was disposed
632 // or false if the value was extracted
634 check_deadlock_policy::check();
636 rcu_disposer removed_list;
639 return try_remove_root( key, cmp, func, removed_list );
643 template <typename Func>
644 void do_extract_minmax( int nDir, Func func )
646 check_deadlock_policy::check();
648 rcu_disposer removed_list;
654 // get right child of root
655 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
657 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
658 if ( nChildVersion & node_type::shrinking ) {
659 m_stat.onRemoveRootWaitShrinking();
660 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
661 result = update_flags::retry;
663 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
664 result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list );
667 } while ( result == update_flags::retry );
675 template <typename Q, typename Compare, typename Func>
676 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
678 assert( gc::is_locked() );
682 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
684 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
685 m_stat.onFindRetry();
686 return find_result::retry;
689 m_stat.onFindFailed();
690 return find_result::not_found;
693 int nCmp = cmp( key, pChild->m_key );
695 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
697 node_scoped_lock l( m_Monitor, *pChild );
698 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
700 m_stat.onFindSuccess();
701 return find_result::found;
706 m_stat.onFindFailed();
707 return find_result::not_found;
710 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
711 if ( nChildVersion & node_type::shrinking ) {
712 m_stat.onFindWaitShrinking();
713 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
715 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
716 m_stat.onFindRetry();
717 return find_result::retry;
720 else if ( nChildVersion != node_type::unlinked ) {
722 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
723 m_stat.onFindRetry();
724 return find_result::retry;
727 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
728 if ( found != find_result::retry )
734 template <typename K, typename Compare, typename Func>
735 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
737 assert( gc::is_locked() );
741 // get right child of root
742 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
744 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
745 if ( nChildVersion & node_type::shrinking ) {
746 m_stat.onUpdateRootWaitShrinking();
747 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
748 result = update_flags::retry;
750 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
751 result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
756 if ( nFlags & update_flags::allow_insert ) {
757 // insert into tree as right child of the root
759 node_scoped_lock l( m_Monitor, *m_pRoot );
760 if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
761 result = result == update_flags::retry;
765 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
766 mapped_type pVal = funcUpdate( pNew );
767 assert( pVal != nullptr );
768 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
770 m_pRoot->child( pNew, right_child, memory_model::memory_order_relaxed );
771 m_pRoot->height( 2, memory_model::memory_order_relaxed );
775 m_stat.onInsertSuccess();
776 return update_flags::result_inserted;
779 return update_flags::failed;
781 } while ( result == update_flags::retry );
785 template <typename K, typename Compare, typename Func>
786 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
788 assert( gc::is_locked() );
792 // get right child of root
793 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
795 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
796 if ( nChildVersion & node_type::shrinking ) {
797 m_stat.onRemoveRootWaitShrinking();
798 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
799 result = update_flags::retry;
801 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
802 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
807 } while ( result == update_flags::retry );
809 return result == update_flags::result_removed;
812 template <typename K, typename Compare, typename Func>
813 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
815 assert( gc::is_locked() );
816 assert( nVersion != node_type::unlinked );
817 CDS_UNUSED( pParent );
819 int nCmp = cmp( key, pNode->m_key );
821 if ( nFlags & update_flags::allow_update ) {
822 return try_update_node( funcUpdate, pNode );
824 return update_flags::failed;
829 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
830 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
831 m_stat.onUpdateRetry();
832 return update_flags::retry;
835 if ( pChild == nullptr ) {
837 if ( nFlags & update_flags::allow_insert )
838 result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
840 result = update_flags::failed;
844 result = update_flags::retry;
845 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
846 if ( nChildVersion & node_type::shrinking ) {
847 m_stat.onUpdateWaitShrinking();
848 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
851 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
852 // this second read is important, because it is protected by nChildVersion
854 // validate the read that our caller took to get to node
855 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
856 m_stat.onUpdateRetry();
857 return update_flags::retry;
860 // At this point we know that the traversal our parent took to get to node is still valid.
861 // The recursive implementation will validate the traversal from node to
862 // child, so just prior to the node nVersion validation both traversals were definitely okay.
863 // This means that we are no longer vulnerable to node shrinks, and we don't need
864 // to validate node version any more.
865 result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion, disp );
868 } while ( result == update_flags::retry );
872 template <typename K, typename Compare, typename Func>
873 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
875 assert( gc::is_locked() );
876 assert( nVersion != node_type::unlinked );
878 int nCmp = cmp( key, pNode->m_key );
880 return try_remove_node( pParent, pNode, nVersion, func, disp );
884 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
885 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
886 m_stat.onRemoveRetry();
887 return update_flags::retry;
890 if ( pChild == nullptr ) {
891 return update_flags::failed;
895 result = update_flags::retry;
896 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
897 if ( nChildVersion & node_type::shrinking ) {
898 m_stat.onRemoveWaitShrinking();
899 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
902 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
903 // this second read is important, because it is protected by nChildVersion
905 // validate the read that our caller took to get to node
906 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
907 m_stat.onRemoveRetry();
908 return update_flags::retry;
911 // At this point we know that the traversal our parent took to get to node is still valid.
912 // The recursive implementation will validate the traversal from node to
913 // child, so just prior to the node nVersion validation both traversals were definitely okay.
914 // This means that we are no longer vulnerable to node shrinks, and we don't need
915 // to validate node version any more.
916 result = try_remove( key, cmp, func, pNode, pChild, nChildVersion, disp );
919 } while ( result == update_flags::retry );
923 template <typename Func>
924 int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
926 assert( gc::is_locked() );
927 assert( nVersion != node_type::unlinked );
931 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
932 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
933 m_stat.onRemoveRetry();
934 return update_flags::retry;
937 if ( pChild == nullptr ) {
939 return try_remove_node( pParent, pNode, nVersion, func, disp );
942 result = update_flags::retry;
943 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
944 if ( nChildVersion & node_type::shrinking ) {
945 m_stat.onRemoveWaitShrinking();
946 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
949 else if ( pChild == child( pNode, nDir, memory_model::memory_order_relaxed )) {
950 // this second read is important, because it is protected by nChildVersion
952 // validate the read that our caller took to get to node
953 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
954 m_stat.onRemoveRetry();
955 return update_flags::retry;
958 // At this point we know that the traversal our parent took to get to node is still valid.
959 // The recursive implementation will validate the traversal from node to
960 // child, so just prior to the node nVersion validation both traversals were definitely okay.
961 // This means that we are no longer vulnerable to node shrinks, and we don't need
962 // to validate node version any more.
963 result = try_extract_minmax( nDir, func, pNode, pChild, nChildVersion, disp );
966 } while ( result == update_flags::retry );
970 template <typename K, typename Func>
971 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
975 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
976 mapped_type pVal = funcUpdate( pNew );
977 assert( pVal != nullptr );
978 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
981 if ( c_bRelaxedInsert ) {
982 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
983 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
985 m_stat.onInsertRetry();
986 return update_flags::retry;
989 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
992 node_type * pDamaged;
994 assert( pNode != nullptr );
995 node_scoped_lock l( m_Monitor, *pNode );
997 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion
998 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
1000 if ( c_bRelaxedInsert ) {
1002 m_stat.onRelaxedInsertFailed();
1005 m_stat.onInsertRetry();
1006 return update_flags::retry;
1009 if ( !c_bRelaxedInsert )
1010 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1012 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
1013 pDamaged = fix_height_locked( pNode );
1017 m_stat.onInsertSuccess();
1019 fix_height_and_rebalance( pDamaged, disp );
1021 return update_flags::result_inserted;
1024 template <typename Func>
1025 int try_update_node( Func funcUpdate, node_type * pNode )
1028 assert( pNode != nullptr );
1030 node_scoped_lock l( m_Monitor, *pNode );
1032 if ( pNode->is_unlinked( memory_model::memory_order_relaxed )) {
1033 m_stat.onUpdateUnlinked();
1034 return update_flags::retry;
1037 pOld = pNode->value( memory_model::memory_order_relaxed );
1038 mapped_type pVal = funcUpdate( pNode );
1042 assert( pVal != nullptr );
1043 pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1049 m_stat.onDisposeValue();
1052 m_stat.onUpdateSuccess();
1053 return update_flags::result_updated;
1056 template <typename Func>
1057 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
1059 assert( pParent != nullptr );
1060 assert( pNode != nullptr );
1062 if ( !pNode->is_valued( atomics::memory_order_relaxed ) )
1063 return update_flags::failed;
1065 if ( child( pNode, left_child, memory_model::memory_order_relaxed ) == nullptr
1066 || child( pNode, right_child, memory_model::memory_order_relaxed ) == nullptr )
1068 node_type * pDamaged;
1071 node_scoped_lock lp( m_Monitor, *pParent );
1072 if ( pParent->is_unlinked( atomics::memory_order_relaxed ) || parent( pNode, memory_model::memory_order_relaxed ) != pParent )
1073 return update_flags::retry;
1076 node_scoped_lock ln( m_Monitor, *pNode );
1077 pOld = pNode->value( memory_model::memory_order_relaxed );
1078 if ( !( pNode->version( memory_model::memory_order_relaxed ) == nVersion
1080 && try_unlink_locked( pParent, pNode, disp )))
1082 return update_flags::retry;
1085 pDamaged = fix_height_locked( pParent );
1089 if ( func( pOld ) ) // calls pOld disposer inside
1090 m_stat.onDisposeValue();
1092 m_stat.onExtractValue();
1094 fix_height_and_rebalance( pDamaged, disp );
1095 return update_flags::result_removed;
1098 int result = update_flags::retry;
1101 node_scoped_lock ln( m_Monitor, *pNode );
1102 pOld = pNode->value( atomics::memory_order_relaxed );
1103 if ( pNode->version( atomics::memory_order_relaxed ) == nVersion && pOld ) {
1104 pNode->m_pValue.store( nullptr, atomics::memory_order_relaxed );
1105 result = update_flags::result_removed;
1109 if ( result == update_flags::result_removed ) {
1111 if ( func( pOld )) // calls pOld disposer inside
1112 m_stat.onDisposeValue();
1114 m_stat.onExtractValue();
1121 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1123 // pParent and pNode must be locked
1124 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
1126 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1127 node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
1128 if ( pNode != pParentLeft && pNode != pParentRight ) {
1129 // node is no longer a child of parent
1133 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ) );
1134 assert( pParent == parent( pNode, memory_model::memory_order_relaxed));
1136 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1137 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1138 if ( pLeft != nullptr && pRight != nullptr ) {
1139 // splicing is no longer possible
1142 node_type * pSplice = pLeft ? pLeft : pRight;
1144 if ( pParentLeft == pNode )
1145 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
1147 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
1150 pSplice->m_pParent.store( pParent, memory_model::memory_order_release );
1152 // Mark the node as unlinked
1153 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1155 // The value will be disposed by calling function
1156 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1158 disp.dispose( pNode );
1159 m_stat.onDisposeNode();
1166 private: // rotations
1168 int estimate_node_condition( node_type * pNode )
1170 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1171 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1173 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1174 return unlink_required;
1176 int h = pNode->height( memory_model::memory_order_relaxed );
1177 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1178 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1180 int hNew = 1 + std::max( hL, hR );
1181 int nBalance = hL - hR;
1183 if ( nBalance < -1 || nBalance > 1 )
1184 return rebalance_required;
1186 return h != hNew ? hNew : nothing_required;
1189 node_type * fix_height( node_type * pNode )
1191 assert( pNode != nullptr );
1192 node_scoped_lock l( m_Monitor, *pNode );
1193 return fix_height_locked( pNode );
1196 node_type * fix_height_locked( node_type * pNode )
1198 // pNode must be locked!!!
1199 int h = estimate_node_condition( pNode );
1201 case rebalance_required:
1202 case unlink_required:
1204 case nothing_required:
1207 pNode->height( h, memory_model::memory_order_relaxed );
1208 return parent( pNode, memory_model::memory_order_relaxed );
1212 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1214 while ( pNode && parent( pNode, memory_model::memory_order_relaxed )) {
1215 int nCond = estimate_node_condition( pNode );
1216 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
1219 if ( nCond != unlink_required && nCond != rebalance_required )
1220 pNode = fix_height( pNode );
1222 node_type * pParent = parent( pNode, memory_model::memory_order_relaxed );
1223 assert( pParent != nullptr );
1225 node_scoped_lock lp( m_Monitor, *pParent );
1226 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed )
1227 && parent( pNode, memory_model::memory_order_relaxed ) == pParent )
1229 node_scoped_lock ln( m_Monitor, *pNode );
1230 pNode = rebalance_locked( pParent, pNode, disp );
1237 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1239 // pParent and pNode should be locked.
1240 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1241 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1242 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1243 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1245 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1246 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1248 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1249 if ( try_unlink_locked( pParent, pNode, disp ))
1250 return fix_height_locked( pParent );
1252 // retry needed for pNode
1257 int h = pNode->height( memory_model::memory_order_relaxed );
1258 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1259 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1260 int hNew = 1 + std::max( hL, hR );
1261 int balance = hL - hR;
1264 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1265 else if ( balance < -1 )
1266 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1267 else if ( hNew != h ) {
1268 pNode->height( hNew, memory_model::memory_order_relaxed );
1270 // pParent is already locked
1271 return fix_height_locked( pParent );
1277 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1279 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1280 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1281 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1283 // pParent and pNode is locked yet
1284 // pNode->pLeft is too large, we will rotate-right.
1285 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1288 assert( pLeft != nullptr );
1289 node_scoped_lock l( m_Monitor, *pLeft );
1290 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1291 return pNode; // retry for pNode
1293 int hL = pLeft->height( memory_model::memory_order_relaxed );
1295 return pNode; // retry
1297 node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
1298 int hLR = pLRight ? pLRight->height( memory_model::memory_order_relaxed ) : 0;
1299 node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
1300 int hLL = pLLeft ? pLLeft->height( memory_model::memory_order_relaxed ) : 0;
1304 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1307 assert( pLRight != nullptr );
1309 node_scoped_lock lr( m_Monitor, *pLRight );
1310 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
1311 return pNode; // retry
1313 hLR = pLRight->height( memory_model::memory_order_relaxed );
1315 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1317 node_type * pLRLeft = child( pLRight, left_child, memory_model::memory_order_relaxed );
1318 int hLRL = pLRLeft ? pLRLeft->height( memory_model::memory_order_relaxed ) : 0;
1319 int balance = hLL - hLRL;
1320 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
1321 // nParent.child.left won't be damaged after a double rotation
1322 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1326 // focus on pLeft, if necessary pNode will be balanced later
1327 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1332 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1334 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1335 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1336 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1338 // pParent and pNode is locked yet
1340 assert( pRight != nullptr );
1341 node_scoped_lock l( m_Monitor, *pRight );
1342 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1343 return pNode; // retry for pNode
1345 int hR = pRight->height( memory_model::memory_order_relaxed );
1346 if ( hL - hR >= -1 )
1347 return pNode; // retry
1349 node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
1350 int hRL = pRLeft ? pRLeft->height( memory_model::memory_order_relaxed ) : 0;
1351 node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
1352 int hRR = pRRight ? pRRight->height( memory_model::memory_order_relaxed ) : 0;
1354 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1357 assert( pRLeft != nullptr );
1358 node_scoped_lock lrl( m_Monitor, *pRLeft );
1359 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
1360 return pNode; // retry
1362 hRL = pRLeft->height( memory_model::memory_order_relaxed );
1364 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1366 node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1367 int hRLR = pRLRight ? pRLRight->height( memory_model::memory_order_relaxed ) : 0;
1368 int balance = hRR - hRLR;
1369 if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
1370 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1372 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1376 static void begin_change( node_type * pNode, version_type version )
1378 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1380 static void end_change( node_type * pNode, version_type version )
1382 // Clear shrinking and unlinked flags and increment version
1383 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1386 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1388 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1389 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1391 begin_change( pNode, nodeVersion );
1393 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1394 if ( pLRight != nullptr )
1395 pLRight->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1397 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1398 pNode->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1400 if ( pParentLeft == pNode )
1401 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1403 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1404 pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed );
1406 pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1408 // fix up heights links
1409 int hNode = 1 + std::max( hLR, hR );
1410 pNode->height( hNode, memory_model::memory_order_relaxed );
1411 pLeft->height( 1 + std::max( hLL, hNode ), memory_model::memory_order_relaxed );
1413 end_change( pNode, nodeVersion );
1414 m_stat.onRotateRight();
1416 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1417 // parent.child). pNode is the deepest. Perform as many fixes as we can
1418 // with the locks we've got.
1420 // We've already fixed the height for pNode, but it might still be outside
1421 // our allowable balance range. In that case a simple fix_height_locked()
1423 int nodeBalance = hLR - hR;
1424 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1425 // we need another rotation at pNode
1429 // we've fixed balance and height damage for pNode, now handle
1430 // extra-routing node damage
1431 if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
1432 // we need to remove pNode and then repair
1436 // we've already fixed the height at pLeft, do we need a rotation here?
1437 int leftBalance = hLL - hNode;
1438 if ( leftBalance < -1 || leftBalance > 1 )
1441 // pLeft might also have routing node damage (if pLeft.left was null)
1442 if ( hLL == 0 && !pLeft->is_valued( memory_model::memory_order_relaxed ))
1445 // try to fix the parent height while we've still got the lock
1446 return fix_height_locked( pParent );
1449 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1451 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1452 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1454 begin_change( pNode, nodeVersion );
1456 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1457 pNode->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1458 if ( pRLeft != nullptr )
1459 pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1461 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1462 pNode->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1464 if ( pParentLeft == pNode )
1465 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
1467 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1468 pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1470 pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1473 int hNode = 1 + std::max( hL, hRL );
1474 pNode->height( hNode, memory_model::memory_order_relaxed );
1475 pRight->height( 1 + std::max( hNode, hRR ), memory_model::memory_order_relaxed );
1477 end_change( pNode, nodeVersion );
1478 m_stat.onRotateLeft();
1480 int nodeBalance = hRL - hL;
1481 if ( nodeBalance < -1 || nodeBalance > 1 )
1484 if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1487 int rightBalance = hRR - hNode;
1488 if ( rightBalance < -1 || rightBalance > 1 )
1491 if ( hRR == 0 && !pRight->is_valued( memory_model::memory_order_relaxed ))
1494 return fix_height_locked( pParent );
1497 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1499 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1500 version_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
1502 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1503 node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_relaxed );
1504 node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_relaxed );
1505 int hLRR = pLRR ? pLRR->height( memory_model::memory_order_relaxed ) : 0;
1507 begin_change( pNode, nodeVersion );
1508 begin_change( pLeft, leftVersion );
1510 // fix up pNode links, careful about the order!
1511 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
1512 if ( pLRR != nullptr )
1513 pLRR->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1515 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
1516 if ( pLRL != nullptr )
1517 pLRL->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1519 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1520 pLeft->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1521 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1522 pNode->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1525 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1527 assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1528 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
1530 pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1533 int hNode = 1 + std::max( hLRR, hR );
1534 pNode->height( hNode, memory_model::memory_order_relaxed );
1535 int hLeft = 1 + std::max( hLL, hLRL );
1536 pLeft->height( hLeft, memory_model::memory_order_relaxed );
1537 pLRight->height( 1 + std::max( hLeft, hNode ), memory_model::memory_order_relaxed );
1539 end_change( pNode, nodeVersion );
1540 end_change( pLeft, leftVersion );
1541 m_stat.onRotateRightOverLeft();
1543 // caller should have performed only a single rotation if pLeft was going
1544 // to end up damaged
1545 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1546 assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_relaxed )));
1548 // We have damaged pParent, pLR (now parent.child), and pNode (now
1549 // parent.child.right). pNode is the deepest. Perform as many fixes as we
1550 // can with the locks we've got.
1552 // We've already fixed the height for pNode, but it might still be outside
1553 // our allowable balance range. In that case a simple fix_height_locked()
1555 int nodeBalance = hLRR - hR;
1556 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1557 // we need another rotation at pNode
1561 // pNode might also be damaged by being an unnecessary routing node
1562 if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1563 // repair involves splicing out pNode and maybe more rotations
1567 // we've already fixed the height at pLRight, do we need a rotation here?
1568 int balanceLR = hLeft - hNode;
1569 if ( balanceLR < -1 || balanceLR > 1 )
1572 // try to fix the parent height while we've still got the lock
1573 return fix_height_locked( pParent );
1576 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
1578 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1579 version_type rightVersion = pRight->version( memory_model::memory_order_relaxed );
1581 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1582 node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_relaxed );
1583 node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1584 int hRLL = pRLL ? pRLL->height( memory_model::memory_order_relaxed ) : 0;
1586 begin_change( pNode, nodeVersion );
1587 begin_change( pRight, rightVersion );
1589 // fix up pNode links, careful about the order!
1590 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
1591 if ( pRLL != nullptr )
1592 pRLL->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1594 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
1595 if ( pRLR != nullptr )
1596 pRLR->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1598 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1599 pRight->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1600 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1601 pNode->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1604 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
1606 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1607 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1609 pRLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1612 int hNode = 1 + std::max( hL, hRLL );
1613 pNode->height( hNode, memory_model::memory_order_relaxed );
1614 int hRight = 1 + std::max( hRLR, hRR );
1615 pRight->height( hRight, memory_model::memory_order_relaxed );
1616 pRLeft->height( 1 + std::max( hNode, hRight ), memory_model::memory_order_relaxed );
1618 end_change( pNode, nodeVersion );
1619 end_change( pRight, rightVersion );
1620 m_stat.onRotateLeftOverRight();
1622 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
1624 int nodeBalance = hRLL - hL;
1625 if ( nodeBalance < -1 || nodeBalance > 1 )
1627 if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1630 int balRL = hRight - hNode;
1631 if ( balRL < -1 || balRL > 1 )
1634 return fix_height_locked( pParent );
1639 }} // namespace cds::container
1641 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H