3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <type_traits> // is_base_of
7 #include <cds/container/details/bronson_avltree_base.h>
8 #include <cds/urcu/details/check_deadlock.h>
9 #include <cds/urcu/exempt_ptr.h>
11 namespace cds { namespace container {
13 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
14 /** @ingroup cds_nonintrusive_map
15 @ingroup cds_nonintrusive_tree
16 @headerfile cds/container/bronson_avltree_map_rcu.h
18 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
19 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
20 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
21 the disposer functor provided by \p Traits template parameter.
23 <b>Template arguments</b>:
24 - \p RCU - one of \ref cds_urcu_gc "RCU type"
26 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
28 - \p Traits - tree traits, default is \p bronson_avltree::traits
29 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
30 instead of \p Traits template argument.
32 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
33 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
39 # ifdef CDS_DOXYGEN_INVOKED
40 typename Traits = bronson_avltree::traits
45 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
48 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
49 typedef Key key_type; ///< type of a key stored in the map
50 typedef T * mapped_type; ///< type of value stored in the map
51 typedef Traits traits; ///< Traits template parameter
53 # ifdef CDS_DOXYGEN_INVOKED
54 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
56 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
58 typedef typename traits::item_counter item_counter; ///< Item counting policy
59 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
60 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
61 typedef typename traits::stat stat; ///< internal statistics
62 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
63 typedef typename traits::back_off back_off; ///< Back-off strategy
64 typedef typename traits::disposer disposer; ///< Value disposer
65 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
67 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
68 static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
70 # ifdef CDSDOXYGEN_INVOKED
71 /// Returned pointer to \p mapped_type of extracted node
72 typedef cds::urcu::exempt_ptr< gc, T, T, disposer, void > exempt_ptr;
74 typedef cds::urcu::exempt_ptr< gc,
75 typename std::remove_pointer<mapped_type>::type,
76 typename std::remove_pointer<mapped_type>::type,
82 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
86 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
87 typedef typename node_type::version_type version_type;
89 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
90 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
92 static CDS_CONSTEXPR bool const c_bRetiringValue = std::is_base_of< bronson_avltree::value, typename std::remove_pointer<mapped_type>::type>::value;
94 enum class find_result
111 result_inserted = allow_insert,
112 result_updated = allow_update,
119 nothing_required = -3,
120 rebalance_required = -2,
129 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
134 template <typename K>
135 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
137 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
140 static void free_node( node_type * pNode )
142 // Free node without disposer
143 cxx_allocator().Delete( pNode );
146 static void free_value( mapped_type pVal )
151 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
153 return pNode->child( nDir ).load( order );
156 static node_type * parent( node_type * pNode, atomics::memory_order order )
158 return pNode->m_pParent.load( order );
162 template <bool Enabled>
163 class rcu_value_disposer;
166 class rcu_value_disposer<true>
168 bronson_avltree::value * m_pValueRetiredList; ///< head of retired value list
171 : m_pValueRetiredList(nullptr)
174 ~rcu_value_disposer()
179 void dispose( mapped_type pVal )
182 static_cast< bronson_avltree::value *>(pVal)->m_pNextRetired = m_pValueRetiredList;
183 m_pValueRetiredList = static_cast< bronson_avltree::value *>(pVal);
187 struct internal_disposer
189 void operator()( bronson_avltree::value * p ) const
191 free_value( static_cast<mapped_type>( p ));
197 // TODO: use RCU::batch_retire
198 for ( auto p = m_pValueRetiredList; p; ) {
199 auto pNext = p->m_pNextRetired;
200 gc::template retire_ptr<internal_disposer>( p );
207 class rcu_value_disposer<false>
210 void dispose( mapped_type pVal )
219 node_type * m_pRetiredList; ///< head of retired node list
220 rcu_value_disposer< c_bRetiringValue > m_RetiredValueList;
224 : m_pRetiredList( nullptr )
232 void dispose( node_type * pNode )
234 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
235 pNode->m_pNextRemoved = m_pRetiredList;
236 m_pRetiredList = pNode;
239 void dispose_value( mapped_type pVal )
241 m_RetiredValueList.dispose( pVal );
245 struct internal_disposer
247 void operator()( node_type * p ) const
255 assert( !gc::is_locked() );
257 // TODO: use RCU::batch_retire
260 for ( node_type * p = m_pRetiredList; p; ) {
261 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
262 // Value already disposed
263 gc::template retire_ptr<internal_disposer>( p );
273 typename node_type::base_class m_Root;
275 item_counter m_ItemCounter;
276 mutable sync_monitor m_Monitor;
281 /// Creates empty map
283 : m_pRoot( static_cast<node_type *>( &m_Root ))
294 The \p key_type should be constructible from a value of type \p K.
296 RCU \p synchronize() can be called. RCU should not be locked.
298 Returns \p true if inserting successful, \p false otherwise.
300 template <typename K>
301 bool insert( K const& key, mapped_type pVal )
303 return do_update(key, key_comparator(),
304 [pVal]( node_type * pNode ) -> mapped_type
306 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
309 update_flags::allow_insert
310 ) == update_flags::result_inserted;
313 /// Updates the value for \p key
315 The operation performs inserting or updating the value for \p key with lock-free manner.
316 If \p bInsert is \p false, only updating of existing node is possible.
318 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
319 then the new node created from \p key is inserted into the map; note that in this case the \ref key_type should be
320 constructible from type \p K.
321 Otherwise, the value is changed to \p pVal.
323 RCU \p synchronize() method can be called. RCU should not be locked.
325 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
326 \p second is \p true if new node has been added or \p false if the node with \p key
327 already is in the tree.
329 template <typename K, typename Func>
330 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
332 int result = do_update( key, key_comparator(),
333 [pVal]( node_type * ) -> mapped_type
337 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
339 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
342 /// Delete \p key from the map
344 RCU \p synchronize() method can be called. RCU should not be locked.
346 Return \p true if \p key is found and deleted, \p false otherwise
348 template <typename K>
349 bool erase( K const& key )
354 []( mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
358 /// Deletes the item from the map using \p pred predicate for searching
360 The function is an analog of \p erase(K const&)
361 but \p pred is used for key comparing.
362 \p Less functor has the interface like \p std::less.
363 \p Less must imply the same element order as the comparator used for building the map.
365 template <typename K, typename Less>
366 bool erase_with( K const& key, Less pred )
371 cds::opt::details::make_comparator_from_less<Less>(),
372 []( mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
376 /// Delete \p key from the map
378 The function searches an item with key \p key, calls \p f functor
379 and deletes the item. If \p key is not found, the functor is not called.
381 The functor \p Func interface:
384 void operator()(mapped_type& item) { ... }
388 RCU \p synchronize method can be called. RCU should not be locked.
390 Return \p true if key is found and deleted, \p false otherwise
392 template <typename K, typename Func>
393 bool erase( K const& key, Func f )
398 [&f]( mapped_type pVal, rcu_disposer& disp ) -> bool {
401 disp.dispose_value(pVal);
407 /// Deletes the item from the map using \p pred predicate for searching
409 The function is an analog of \p erase(K const&, Func)
410 but \p pred is used for key comparing.
411 \p Less functor has the interface like \p std::less.
412 \p Less must imply the same element order as the comparator used for building the map.
414 template <typename K, typename Less, typename Func>
415 bool erase_with( K const& key, Less pred, Func f )
420 cds::opt::details::make_comparator_from_less<Less>(),
421 [&f]( mapped_type pVal, rcu_disposer& disp ) -> bool {
424 disp.dispose_value(pVal);
430 /// Extracts an item with minimal key from the map
432 Returns \p exempt_ptr to the leftmost item.
433 If the set is empty, returns empty \p exempt_ptr.
435 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
436 It means that the function gets leftmost leaf of the tree and tries to unlink it.
437 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
438 So, the function returns the item with minimum key at the moment of tree traversing.
440 RCU \p synchronize method can be called. RCU should NOT be locked.
441 The function does not free the item.
442 The deallocator will be implicitly invoked when the returned object is destroyed or when
443 its \p release() member function is called.
445 exempt_ptr extract_min()
447 return exempt_ptr(do_extract_min());
450 /// Extracts an item with maximal key from the map
452 Returns \p exempt_ptr pointer to the rightmost item.
453 If the set is empty, returns empty \p exempt_ptr.
455 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
456 It means that the function gets rightmost leaf of the tree and tries to unlink it.
457 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
458 So, the function returns the item with maximum key at the moment of tree traversing.
460 RCU \p synchronize method can be called. RCU should NOT be locked.
461 The function does not free the item.
462 The deallocator will be implicitly invoked when the returned object is destroyed or when
463 its \p release() is called.
465 exempt_ptr extract_max()
467 return exempt_ptr(do_extract_max());
470 /// Extracts an item from the map
472 The function searches an item with key equal to \p key in the tree,
473 unlinks it, and returns \p exempt_ptr pointer to a value found.
474 If \p key is not found the function returns an empty \p exempt_ptr.
476 RCU \p synchronize method can be called. RCU should NOT be locked.
477 The function does not destroy the value found.
478 The disposer will be implicitly invoked when the returned object is destroyed or when
479 its \p release() member function is called.
481 template <typename Q>
482 exempt_ptr extract( Q const& key )
484 return exempt_ptr(do_extract( key ));
487 /// Extracts an item from the map using \p pred for searching
489 The function is an analog of \p extract(Q const&)
490 but \p pred is used for key compare.
491 \p Less has the interface like \p std::less.
492 \p pred must imply the same element order as the comparator used for building the tree.
494 template <typename Q, typename Less>
495 exempt_ptr extract_with( Q const& key, Less pred )
497 return exempt_ptr(do_extract_with( key, pred ));
500 /// Find the key \p key
502 The function searches the item with key equal to \p key and calls the functor \p f for item found.
503 The interface of \p Func functor is:
506 void operator()( key_type const& key, mapped_type& item );
509 where \p item is the item found.
510 The functor is called under node-level lock.
512 The function applies RCU lock internally.
514 The function returns \p true if \p key is found, \p false otherwise.
516 template <typename K, typename Func>
517 bool find( K const& key, Func f )
519 return do_find( key, key_comparator(),
520 [&f]( node_type * pNode ) -> bool {
521 assert( pNode != nullptr );
522 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
524 f( pNode->m_key, *pVal );
532 /// Finds the key \p val using \p pred predicate for searching
534 The function is an analog of \p find(K const&, Func)
535 but \p pred is used for key comparing.
536 \p Less functor has the interface like \p std::less.
537 \p Less must imply the same element order as the comparator used for building the map.
539 template <typename K, typename Less, typename Func>
540 bool find_with( K const& key, Less pred, Func f )
543 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
544 [&f]( node_type * pNode ) -> bool {
545 assert( pNode != nullptr );
546 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
548 f( pNode->m_key, *pVal );
556 /// Find the key \p key
558 The function searches the item with key equal to \p key
559 and returns \p true if it is found, and \p false otherwise.
561 The function applies RCU lock internally.
563 template <typename K>
564 bool find( K const& key )
566 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
569 /// Finds the key \p val using \p pred predicate for searching
571 The function is an analog of \p find(K const&)
572 but \p pred is used for key comparing.
573 \p Less functor has the interface like \p std::less.
574 \p Less must imply the same element order as the comparator used for building the map.
576 template <typename K, typename Less>
577 bool find_with( K const& key, Less pred )
580 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
583 /// Clears the tree (thread safe, not atomic)
585 The function unlink all items from the tree.
586 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
590 assert( set.empty() );
592 the assertion could be raised.
594 For each node the \ref disposer will be called after unlinking.
596 RCU \p synchronize method can be called. RCU should not be locked.
600 while ( extract_min() );
603 /// Clears the tree (not thread safe)
605 This function is not thread safe and may be called only when no other thread deals with the tree.
606 The function is used in the tree destructor.
610 clear(); // temp solution
614 /// Checks if the map is empty
617 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
620 /// Returns item count in the map
622 Only leaf nodes containing user data are counted.
624 The value returned depends on item counter type provided by \p Traits template parameter.
625 If it is \p atomicity::empty_item_counter this function always returns 0.
627 The function is not suitable for checking the tree emptiness, use \p empty()
628 member function for this purpose.
632 return m_ItemCounter;
635 /// Returns const reference to internal statistics
636 stat const& statistics() const
641 /// Checks internal consistency (not atomic, not thread-safe)
643 The debugging function to check internal consistency of the tree.
645 bool check_consistency() const
653 template <typename Q, typename Compare, typename Func>
654 bool do_find( Q& key, Compare cmp, Func f ) const
659 result = try_find( key, cmp, f, m_pRoot, 1, 0 );
661 assert( result != find_result::retry );
662 return result == find_result::found;
665 template <typename K, typename Compare, typename Func>
666 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
668 check_deadlock_policy::check();
670 rcu_disposer removed_list;
673 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
677 template <typename K, typename Compare, typename Func>
678 bool do_remove( K const& key, Compare cmp, Func func )
680 // Func must return true if the value was disposed
681 // or false if the value was extracted
683 check_deadlock_policy::check();
685 rcu_disposer removed_list;
688 return try_remove_root( key, cmp, func, removed_list );
692 mapped_type do_extract_min()
694 mapped_type pExtracted = nullptr;
697 [&pExtracted]( mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
702 mapped_type do_extract_max()
704 mapped_type pExtracted = nullptr;
707 [&pExtracted]( mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
712 template <typename Func>
713 void do_extract_minmax( int nDir, Func func )
715 check_deadlock_policy::check();
717 rcu_disposer removed_list;
721 int result = update_flags::failed;
723 // get right child of root
724 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
726 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
727 if ( nChildVersion & node_type::shrinking ) {
728 m_stat.onRemoveRootWaitShrinking();
729 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
730 result = update_flags::retry;
732 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
733 result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list );
736 } while ( result == update_flags::retry );
740 template <typename Q>
741 mapped_type do_extract( Q const& key )
743 mapped_type pExtracted = nullptr;
747 [&pExtracted]( mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
752 template <typename Q, typename Less>
753 mapped_type do_extract_with( Q const& key, Less pred )
756 mapped_type pExtracted = nullptr;
759 cds::opt::details::make_comparator_from_less<Less>(),
760 [&pExtracted]( mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
769 template <typename Q, typename Compare, typename Func>
770 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
772 assert( gc::is_locked() );
776 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
778 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
779 m_stat.onFindRetry();
780 return find_result::retry;
783 m_stat.onFindFailed();
784 return find_result::not_found;
787 int nCmp = cmp( key, pChild->m_key );
789 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
791 node_scoped_lock l( m_Monitor, *pChild );
792 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
794 m_stat.onFindSuccess();
795 return find_result::found;
800 m_stat.onFindFailed();
801 return find_result::not_found;
804 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
805 if ( nChildVersion & node_type::shrinking ) {
806 m_stat.onFindWaitShrinking();
807 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
809 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
810 m_stat.onFindRetry();
811 return find_result::retry;
814 else if ( nChildVersion != node_type::unlinked ) {
816 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
817 m_stat.onFindRetry();
818 return find_result::retry;
821 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
822 if ( found != find_result::retry )
828 template <typename K, typename Compare, typename Func>
829 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
831 assert( gc::is_locked() );
835 // get right child of root
836 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
838 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
839 if ( nChildVersion & node_type::shrinking ) {
840 m_stat.onUpdateRootWaitShrinking();
841 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
842 result = update_flags::retry;
844 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
845 result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
850 if ( nFlags & update_flags::allow_insert ) {
851 // insert into tree as right child of the root
853 node_scoped_lock l( m_Monitor, *m_pRoot );
854 if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
855 result = result == update_flags::retry;
859 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
860 mapped_type pVal = funcUpdate( pNew );
861 assert( pVal != nullptr );
862 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
864 m_pRoot->child( pNew, right_child, memory_model::memory_order_relaxed );
865 m_pRoot->height( 2, memory_model::memory_order_relaxed );
869 m_stat.onInsertSuccess();
870 return update_flags::result_inserted;
873 return update_flags::failed;
875 } while ( result == update_flags::retry );
879 template <typename K, typename Compare, typename Func>
880 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
882 assert( gc::is_locked() );
886 // get right child of root
887 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
889 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
890 if ( nChildVersion & node_type::shrinking ) {
891 m_stat.onRemoveRootWaitShrinking();
892 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
893 result = update_flags::retry;
895 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
896 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
901 } while ( result == update_flags::retry );
903 return result == update_flags::result_removed;
906 template <typename K, typename Compare, typename Func>
907 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
909 assert( gc::is_locked() );
910 assert( nVersion != node_type::unlinked );
911 CDS_UNUSED( pParent );
913 int nCmp = cmp( key, pNode->m_key );
915 if ( nFlags & update_flags::allow_update ) {
916 return try_update_node( funcUpdate, pNode, disp );
918 return update_flags::failed;
923 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
924 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
925 m_stat.onUpdateRetry();
926 return update_flags::retry;
929 if ( pChild == nullptr ) {
931 if ( nFlags & update_flags::allow_insert )
932 result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
934 result = update_flags::failed;
938 result = update_flags::retry;
939 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
940 if ( nChildVersion & node_type::shrinking ) {
941 m_stat.onUpdateWaitShrinking();
942 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
945 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
946 // this second read is important, because it is protected by nChildVersion
948 // validate the read that our caller took to get to node
949 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
950 m_stat.onUpdateRetry();
951 return update_flags::retry;
954 // At this point we know that the traversal our parent took to get to node is still valid.
955 // The recursive implementation will validate the traversal from node to
956 // child, so just prior to the node nVersion validation both traversals were definitely okay.
957 // This means that we are no longer vulnerable to node shrinks, and we don't need
958 // to validate node version any more.
959 result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion, disp );
962 } while ( result == update_flags::retry );
966 template <typename K, typename Compare, typename Func>
967 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
969 assert( gc::is_locked() );
970 assert( nVersion != node_type::unlinked );
972 int nCmp = cmp( key, pNode->m_key );
974 return try_remove_node( pParent, pNode, nVersion, func, disp );
978 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
979 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
980 m_stat.onRemoveRetry();
981 return update_flags::retry;
984 if ( pChild == nullptr ) {
985 return update_flags::failed;
989 result = update_flags::retry;
990 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
991 if ( nChildVersion & node_type::shrinking ) {
992 m_stat.onRemoveWaitShrinking();
993 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
996 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
997 // this second read is important, because it is protected by nChildVersion
999 // validate the read that our caller took to get to node
1000 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1001 m_stat.onRemoveRetry();
1002 return update_flags::retry;
1005 // At this point we know that the traversal our parent took to get to node is still valid.
1006 // The recursive implementation will validate the traversal from node to
1007 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1008 // This means that we are no longer vulnerable to node shrinks, and we don't need
1009 // to validate node version any more.
1010 result = try_remove( key, cmp, func, pNode, pChild, nChildVersion, disp );
1013 } while ( result == update_flags::retry );
1017 template <typename Func>
1018 int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1020 assert( gc::is_locked() );
1021 assert( nVersion != node_type::unlinked );
1025 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
1026 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1027 m_stat.onRemoveRetry();
1028 return update_flags::retry;
1031 if ( pChild == nullptr ) {
1033 return try_remove_node( pParent, pNode, nVersion, func, disp );
1036 result = update_flags::retry;
1037 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1038 if ( nChildVersion & node_type::shrinking ) {
1039 m_stat.onRemoveWaitShrinking();
1040 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1043 else if ( pChild == child( pNode, nDir, memory_model::memory_order_relaxed )) {
1044 // this second read is important, because it is protected by nChildVersion
1046 // validate the read that our caller took to get to node
1047 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1048 m_stat.onRemoveRetry();
1049 return update_flags::retry;
1052 // At this point we know that the traversal our parent took to get to node is still valid.
1053 // The recursive implementation will validate the traversal from node to
1054 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1055 // This means that we are no longer vulnerable to node shrinks, and we don't need
1056 // to validate node version any more.
1057 result = try_extract_minmax( nDir, func, pNode, pChild, nChildVersion, disp );
1060 } while ( result == update_flags::retry );
1064 template <typename K, typename Func>
1065 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
1069 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
1070 mapped_type pVal = funcUpdate( pNew );
1071 assert( pVal != nullptr );
1072 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1075 if ( c_bRelaxedInsert ) {
1076 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1077 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
1079 m_stat.onInsertRetry();
1080 return update_flags::retry;
1083 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1086 node_type * pDamaged;
1088 assert( pNode != nullptr );
1089 node_scoped_lock l( m_Monitor, *pNode );
1091 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion
1092 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
1094 if ( c_bRelaxedInsert ) {
1096 m_stat.onRelaxedInsertFailed();
1099 m_stat.onInsertRetry();
1100 return update_flags::retry;
1103 if ( !c_bRelaxedInsert )
1104 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1106 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
1107 pDamaged = fix_height_locked( pNode );
1111 m_stat.onInsertSuccess();
1113 fix_height_and_rebalance( pDamaged, disp );
1115 return update_flags::result_inserted;
1118 template <typename Func>
1119 int try_update_node( Func funcUpdate, node_type * pNode, rcu_disposer& disp )
1122 assert( pNode != nullptr );
1124 node_scoped_lock l( m_Monitor, *pNode );
1126 if ( pNode->is_unlinked( memory_model::memory_order_relaxed )) {
1127 m_stat.onUpdateUnlinked();
1128 return update_flags::retry;
1131 pOld = pNode->value( memory_model::memory_order_relaxed );
1132 mapped_type pVal = funcUpdate( pNode );
1136 assert( pVal != nullptr );
1137 pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1142 disp.dispose_value(pOld);
1143 m_stat.onDisposeValue();
1146 m_stat.onUpdateSuccess();
1147 return update_flags::result_updated;
1150 template <typename Func>
1151 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
1153 assert( pParent != nullptr );
1154 assert( pNode != nullptr );
1156 if ( !pNode->is_valued( atomics::memory_order_relaxed ) )
1157 return update_flags::failed;
1159 if ( child( pNode, left_child, memory_model::memory_order_relaxed ) == nullptr
1160 || child( pNode, right_child, memory_model::memory_order_relaxed ) == nullptr )
1162 node_type * pDamaged;
1165 node_scoped_lock lp( m_Monitor, *pParent );
1166 if ( pParent->is_unlinked( atomics::memory_order_relaxed ) || parent( pNode, memory_model::memory_order_relaxed ) != pParent )
1167 return update_flags::retry;
1170 node_scoped_lock ln( m_Monitor, *pNode );
1171 pOld = pNode->value( memory_model::memory_order_relaxed );
1172 if ( !( pNode->version( memory_model::memory_order_relaxed ) == nVersion
1174 && try_unlink_locked( pParent, pNode, disp )))
1176 return update_flags::retry;
1179 pDamaged = fix_height_locked( pParent );
1183 if ( func( pOld, disp )) // calls pOld disposer inside
1184 m_stat.onDisposeValue();
1186 m_stat.onExtractValue();
1188 fix_height_and_rebalance( pDamaged, disp );
1189 return update_flags::result_removed;
1192 int result = update_flags::retry;
1195 node_scoped_lock ln( m_Monitor, *pNode );
1196 pOld = pNode->value( atomics::memory_order_relaxed );
1197 if ( pNode->version( atomics::memory_order_relaxed ) == nVersion && pOld ) {
1198 pNode->m_pValue.store( nullptr, atomics::memory_order_relaxed );
1199 result = update_flags::result_removed;
1203 if ( result == update_flags::result_removed ) {
1205 if ( func( pOld, disp )) // calls pOld disposer inside
1206 m_stat.onDisposeValue();
1208 m_stat.onExtractValue();
1215 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1217 // pParent and pNode must be locked
1218 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
1220 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1221 node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
1222 if ( pNode != pParentLeft && pNode != pParentRight ) {
1223 // node is no longer a child of parent
1227 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ) );
1228 assert( pParent == parent( pNode, memory_model::memory_order_relaxed));
1230 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1231 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1232 if ( pLeft != nullptr && pRight != nullptr ) {
1233 // splicing is no longer possible
1236 node_type * pSplice = pLeft ? pLeft : pRight;
1238 if ( pParentLeft == pNode )
1239 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
1241 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
1244 pSplice->m_pParent.store( pParent, memory_model::memory_order_release );
1246 // Mark the node as unlinked
1247 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1249 // The value will be disposed by calling function
1250 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1252 disp.dispose( pNode );
1253 m_stat.onDisposeNode();
1260 private: // rotations
1262 int estimate_node_condition( node_type * pNode )
1264 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1265 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1267 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1268 return unlink_required;
1270 int h = pNode->height( memory_model::memory_order_relaxed );
1271 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1272 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1274 int hNew = 1 + std::max( hL, hR );
1275 int nBalance = hL - hR;
1277 if ( nBalance < -1 || nBalance > 1 )
1278 return rebalance_required;
1280 return h != hNew ? hNew : nothing_required;
1283 node_type * fix_height( node_type * pNode )
1285 assert( pNode != nullptr );
1286 node_scoped_lock l( m_Monitor, *pNode );
1287 return fix_height_locked( pNode );
1290 node_type * fix_height_locked( node_type * pNode )
1292 // pNode must be locked!!!
1293 int h = estimate_node_condition( pNode );
1295 case rebalance_required:
1296 case unlink_required:
1298 case nothing_required:
1301 pNode->height( h, memory_model::memory_order_relaxed );
1302 return parent( pNode, memory_model::memory_order_relaxed );
1306 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1308 while ( pNode && parent( pNode, memory_model::memory_order_relaxed )) {
1309 int nCond = estimate_node_condition( pNode );
1310 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
1313 if ( nCond != unlink_required && nCond != rebalance_required )
1314 pNode = fix_height( pNode );
1316 node_type * pParent = parent( pNode, memory_model::memory_order_relaxed );
1317 assert( pParent != nullptr );
1319 node_scoped_lock lp( m_Monitor, *pParent );
1320 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed )
1321 && parent( pNode, memory_model::memory_order_relaxed ) == pParent )
1323 node_scoped_lock ln( m_Monitor, *pNode );
1324 pNode = rebalance_locked( pParent, pNode, disp );
1331 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1333 // pParent and pNode should be locked.
1334 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1335 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1336 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1337 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1339 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1340 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1342 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1343 if ( try_unlink_locked( pParent, pNode, disp ))
1344 return fix_height_locked( pParent );
1346 // retry needed for pNode
1351 int h = pNode->height( memory_model::memory_order_relaxed );
1352 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1353 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1354 int hNew = 1 + std::max( hL, hR );
1355 int balance = hL - hR;
1358 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1359 else if ( balance < -1 )
1360 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1361 else if ( hNew != h ) {
1362 pNode->height( hNew, memory_model::memory_order_relaxed );
1364 // pParent is already locked
1365 return fix_height_locked( pParent );
1371 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1373 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1374 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1375 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1377 // pParent and pNode is locked yet
1378 // pNode->pLeft is too large, we will rotate-right.
1379 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1382 assert( pLeft != nullptr );
1383 node_scoped_lock l( m_Monitor, *pLeft );
1384 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1385 return pNode; // retry for pNode
1387 int hL = pLeft->height( memory_model::memory_order_relaxed );
1389 return pNode; // retry
1391 node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
1392 int hLR = pLRight ? pLRight->height( memory_model::memory_order_relaxed ) : 0;
1393 node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
1394 int hLL = pLLeft ? pLLeft->height( memory_model::memory_order_relaxed ) : 0;
1398 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1401 assert( pLRight != nullptr );
1403 node_scoped_lock lr( m_Monitor, *pLRight );
1404 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
1405 return pNode; // retry
1407 hLR = pLRight->height( memory_model::memory_order_relaxed );
1409 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1411 node_type * pLRLeft = child( pLRight, left_child, memory_model::memory_order_relaxed );
1412 int hLRL = pLRLeft ? pLRLeft->height( memory_model::memory_order_relaxed ) : 0;
1413 int balance = hLL - hLRL;
1414 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
1415 // nParent.child.left won't be damaged after a double rotation
1416 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1420 // focus on pLeft, if necessary pNode will be balanced later
1421 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1426 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1428 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1429 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1430 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1432 // pParent and pNode is locked yet
1434 assert( pRight != nullptr );
1435 node_scoped_lock l( m_Monitor, *pRight );
1436 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1437 return pNode; // retry for pNode
1439 int hR = pRight->height( memory_model::memory_order_relaxed );
1440 if ( hL - hR >= -1 )
1441 return pNode; // retry
1443 node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
1444 int hRL = pRLeft ? pRLeft->height( memory_model::memory_order_relaxed ) : 0;
1445 node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
1446 int hRR = pRRight ? pRRight->height( memory_model::memory_order_relaxed ) : 0;
1448 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1451 assert( pRLeft != nullptr );
1452 node_scoped_lock lrl( m_Monitor, *pRLeft );
1453 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
1454 return pNode; // retry
1456 hRL = pRLeft->height( memory_model::memory_order_relaxed );
1458 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1460 node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1461 int hRLR = pRLRight ? pRLRight->height( memory_model::memory_order_relaxed ) : 0;
1462 int balance = hRR - hRLR;
1463 if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
1464 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1466 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1470 static void begin_change( node_type * pNode, version_type version )
1472 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1474 static void end_change( node_type * pNode, version_type version )
1476 // Clear shrinking and unlinked flags and increment version
1477 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1480 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1482 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1483 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1485 begin_change( pNode, nodeVersion );
1487 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1488 if ( pLRight != nullptr )
1489 pLRight->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1491 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1492 pNode->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1494 if ( pParentLeft == pNode )
1495 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1497 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1498 pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed );
1500 pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1502 // fix up heights links
1503 int hNode = 1 + std::max( hLR, hR );
1504 pNode->height( hNode, memory_model::memory_order_relaxed );
1505 pLeft->height( 1 + std::max( hLL, hNode ), memory_model::memory_order_relaxed );
1507 end_change( pNode, nodeVersion );
1508 m_stat.onRotateRight();
1510 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1511 // parent.child). pNode is the deepest. Perform as many fixes as we can
1512 // with the locks we've got.
1514 // We've already fixed the height for pNode, but it might still be outside
1515 // our allowable balance range. In that case a simple fix_height_locked()
1517 int nodeBalance = hLR - hR;
1518 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1519 // we need another rotation at pNode
1523 // we've fixed balance and height damage for pNode, now handle
1524 // extra-routing node damage
1525 if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
1526 // we need to remove pNode and then repair
1530 // we've already fixed the height at pLeft, do we need a rotation here?
1531 int leftBalance = hLL - hNode;
1532 if ( leftBalance < -1 || leftBalance > 1 )
1535 // pLeft might also have routing node damage (if pLeft.left was null)
1536 if ( hLL == 0 && !pLeft->is_valued( memory_model::memory_order_relaxed ))
1539 // try to fix the parent height while we've still got the lock
1540 return fix_height_locked( pParent );
1543 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1545 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1546 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1548 begin_change( pNode, nodeVersion );
1550 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1551 pNode->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1552 if ( pRLeft != nullptr )
1553 pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1555 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1556 pNode->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1558 if ( pParentLeft == pNode )
1559 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
1561 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1562 pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1564 pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1567 int hNode = 1 + std::max( hL, hRL );
1568 pNode->height( hNode, memory_model::memory_order_relaxed );
1569 pRight->height( 1 + std::max( hNode, hRR ), memory_model::memory_order_relaxed );
1571 end_change( pNode, nodeVersion );
1572 m_stat.onRotateLeft();
1574 int nodeBalance = hRL - hL;
1575 if ( nodeBalance < -1 || nodeBalance > 1 )
1578 if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1581 int rightBalance = hRR - hNode;
1582 if ( rightBalance < -1 || rightBalance > 1 )
1585 if ( hRR == 0 && !pRight->is_valued( memory_model::memory_order_relaxed ))
1588 return fix_height_locked( pParent );
1591 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1593 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1594 version_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
1596 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1597 node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_relaxed );
1598 node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_relaxed );
1599 int hLRR = pLRR ? pLRR->height( memory_model::memory_order_relaxed ) : 0;
1601 begin_change( pNode, nodeVersion );
1602 begin_change( pLeft, leftVersion );
1604 // fix up pNode links, careful about the order!
1605 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
1606 if ( pLRR != nullptr )
1607 pLRR->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1609 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
1610 if ( pLRL != nullptr )
1611 pLRL->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1613 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1614 pLeft->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1615 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1616 pNode->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1619 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1621 assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1622 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
1624 pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1627 int hNode = 1 + std::max( hLRR, hR );
1628 pNode->height( hNode, memory_model::memory_order_relaxed );
1629 int hLeft = 1 + std::max( hLL, hLRL );
1630 pLeft->height( hLeft, memory_model::memory_order_relaxed );
1631 pLRight->height( 1 + std::max( hLeft, hNode ), memory_model::memory_order_relaxed );
1633 end_change( pNode, nodeVersion );
1634 end_change( pLeft, leftVersion );
1635 m_stat.onRotateRightOverLeft();
1637 // caller should have performed only a single rotation if pLeft was going
1638 // to end up damaged
1639 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1640 assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_relaxed )));
1642 // We have damaged pParent, pLR (now parent.child), and pNode (now
1643 // parent.child.right). pNode is the deepest. Perform as many fixes as we
1644 // can with the locks we've got.
1646 // We've already fixed the height for pNode, but it might still be outside
1647 // our allowable balance range. In that case a simple fix_height_locked()
1649 int nodeBalance = hLRR - hR;
1650 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1651 // we need another rotation at pNode
1655 // pNode might also be damaged by being an unnecessary routing node
1656 if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1657 // repair involves splicing out pNode and maybe more rotations
1661 // we've already fixed the height at pLRight, do we need a rotation here?
1662 int balanceLR = hLeft - hNode;
1663 if ( balanceLR < -1 || balanceLR > 1 )
1666 // try to fix the parent height while we've still got the lock
1667 return fix_height_locked( pParent );
1670 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
1672 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1673 version_type rightVersion = pRight->version( memory_model::memory_order_relaxed );
1675 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1676 node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_relaxed );
1677 node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1678 int hRLL = pRLL ? pRLL->height( memory_model::memory_order_relaxed ) : 0;
1680 begin_change( pNode, nodeVersion );
1681 begin_change( pRight, rightVersion );
1683 // fix up pNode links, careful about the order!
1684 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
1685 if ( pRLL != nullptr )
1686 pRLL->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1688 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
1689 if ( pRLR != nullptr )
1690 pRLR->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1692 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1693 pRight->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1694 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1695 pNode->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1698 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
1700 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1701 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1703 pRLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1706 int hNode = 1 + std::max( hL, hRLL );
1707 pNode->height( hNode, memory_model::memory_order_relaxed );
1708 int hRight = 1 + std::max( hRLR, hRR );
1709 pRight->height( hRight, memory_model::memory_order_relaxed );
1710 pRLeft->height( 1 + std::max( hNode, hRight ), memory_model::memory_order_relaxed );
1712 end_change( pNode, nodeVersion );
1713 end_change( pRight, rightVersion );
1714 m_stat.onRotateLeftOverRight();
1716 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
1718 int nodeBalance = hRLL - hL;
1719 if ( nodeBalance < -1 || nodeBalance > 1 )
1721 if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1724 int balRL = hRight - hNode;
1725 if ( balRL < -1 || balRL > 1 )
1728 return fix_height_locked( pParent );
1733 }} // namespace cds::container
1735 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H