3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <memory> // unique_ptr
7 #include <type_traits> // is_base_of
8 #include <cds/container/details/bronson_avltree_base.h>
9 #include <cds/urcu/details/check_deadlock.h>
11 namespace cds { namespace container {
13 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
14 /** @ingroup cds_nonintrusive_map
15 @ingroup cds_nonintrusive_tree
16 @headerfile cds/container/bronson_avltree_map_rcu.h
18 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
19 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
20 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
21 the disposer functor provided by \p Traits template parameter.
23 <b>Template arguments</b>:
24 - \p RCU - one of \ref cds_urcu_gc "RCU type"
26 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
28 - \p Traits - tree traits, default is \p bronson_avltree::traits
29 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
30 instead of \p Traits template argument.
32 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
33 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
39 # ifdef CDS_DOXYGEN_INVOKED
40 typename Traits = bronson_avltree::traits
45 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
48 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
49 typedef Key key_type; ///< type of a key stored in the map
50 typedef T * mapped_type; ///< type of value stored in the map
51 typedef Traits traits; ///< Traits template parameter
53 # ifdef CDS_DOXYGEN_INVOKED
54 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
56 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
58 typedef typename traits::item_counter item_counter; ///< Item counting policy
59 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
60 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
61 typedef typename traits::stat stat; ///< internal statistics
62 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
63 typedef typename traits::back_off back_off; ///< Back-off strategy
64 typedef typename traits::disposer disposer; ///< Value disposer
65 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
67 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
68 static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
70 /// Returned pointer to \p mapped_type of extracted node
71 typedef std::unique_ptr< T, disposer > unique_ptr;
73 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
77 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
78 typedef typename node_type::version_type version_type;
80 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
81 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
83 static CDS_CONSTEXPR bool const c_bRetiringValue = std::is_base_of< bronson_avltree::value, typename std::remove_pointer<mapped_type>::type>::value;
85 enum class find_result
102 result_inserted = allow_insert,
103 result_updated = allow_update,
110 nothing_required = -3,
111 rebalance_required = -2,
120 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
125 template <typename K>
126 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
128 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
131 static void free_node( node_type * pNode )
133 // Free node without disposer
134 cxx_allocator().Delete( pNode );
137 static void free_value( mapped_type pVal )
142 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
144 return pNode->child( nDir ).load( order );
147 static node_type * parent( node_type * pNode, atomics::memory_order order )
149 return pNode->m_pParent.load( order );
153 template <bool Enabled>
154 class rcu_value_disposer;
157 class rcu_value_disposer<true>
159 bronson_avltree::value * m_pValueRetiredList; ///< head of retired value list
162 : m_pValueRetiredList(nullptr)
165 ~rcu_value_disposer()
170 void dispose( mapped_type pVal )
173 static_cast< bronson_avltree::value *>(pVal)->m_pNextRetired = m_pValueRetiredList;
174 m_pValueRetiredList = static_cast< bronson_avltree::value *>(pVal);
178 struct internal_disposer
180 void operator()( bronson_avltree::value * p ) const
182 free_value( static_cast<mapped_type>( p ));
188 // TODO: use RCU::batch_retire
189 for ( auto p = m_pValueRetiredList; p; ) {
190 auto pNext = p->m_pNextRetired;
191 gc::template retire_ptr<internal_disposer>( p );
198 class rcu_value_disposer<false>
201 void dispose( mapped_type pVal )
210 node_type * m_pRetiredList; ///< head of retired node list
211 rcu_value_disposer< c_bRetiringValue > m_RetiredValueList;
215 : m_pRetiredList( nullptr )
223 void dispose( node_type * pNode )
225 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
226 pNode->m_pNextRemoved = m_pRetiredList;
227 m_pRetiredList = pNode;
230 void dispose_value( mapped_type pVal )
232 m_RetiredValueList.dispose( pVal );
236 struct internal_disposer
238 void operator()( node_type * p ) const
246 assert( !gc::is_locked() );
248 // TODO: use RCU::batch_retire
251 for ( node_type * p = m_pRetiredList; p; ) {
252 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
253 // Value already disposed
254 gc::template retire_ptr<internal_disposer>( p );
264 typename node_type::base_class m_Root;
266 item_counter m_ItemCounter;
267 mutable sync_monitor m_Monitor;
272 /// Creates empty map
274 : m_pRoot( static_cast<node_type *>( &m_Root ))
285 The \p key_type should be constructible from a value of type \p K.
287 RCU \p synchronize() can be called. RCU should not be locked.
289 Returns \p true if inserting successful, \p false otherwise.
291 template <typename K>
292 bool insert( K const& key, mapped_type pVal )
294 return do_update(key, key_comparator(),
295 [pVal]( node_type * pNode ) -> mapped_type
297 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
300 update_flags::allow_insert
301 ) == update_flags::result_inserted;
304 /// Updates the value for \p key
306 The operation performs inserting or updating the value for \p key with lock-free manner.
307 If \p bInsert is \p false, only updating of existing node is possible.
309 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
310 then the new node created from \p key is inserted into the map; note that in this case the \ref key_type should be
311 constructible from type \p K.
312 Otherwise, the value is changed to \p pVal.
314 RCU \p synchronize() method can be called. RCU should not be locked.
316 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
317 \p second is \p true if new node has been added or \p false if the node with \p key
318 already is in the tree.
320 template <typename K, typename Func>
321 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
323 int result = do_update( key, key_comparator(),
324 [pVal]( node_type * ) -> mapped_type
328 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
330 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
333 /// Delete \p key from the map
335 RCU \p synchronize() method can be called. RCU should not be locked.
337 Return \p true if \p key is found and deleted, \p false otherwise
339 template <typename K>
340 bool erase( K const& key )
345 []( mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
349 /// Deletes the item from the map using \p pred predicate for searching
351 The function is an analog of \p erase(K const&)
352 but \p pred is used for key comparing.
353 \p Less functor has the interface like \p std::less.
354 \p Less must imply the same element order as the comparator used for building the map.
356 template <typename K, typename Less>
357 bool erase_with( K const& key, Less pred )
362 cds::opt::details::make_comparator_from_less<Less>(),
363 []( mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
367 /// Delete \p key from the map
369 The function searches an item with key \p key, calls \p f functor
370 and deletes the item. If \p key is not found, the functor is not called.
372 The functor \p Func interface:
375 void operator()(mapped_type& item) { ... }
379 RCU \p synchronize method can be called. RCU should not be locked.
381 Return \p true if key is found and deleted, \p false otherwise
383 template <typename K, typename Func>
384 bool erase( K const& key, Func f )
389 [&f]( mapped_type pVal, rcu_disposer& disp ) -> bool {
392 disp.dispose_value(pVal);
398 /// Deletes the item from the map using \p pred predicate for searching
400 The function is an analog of \p erase(K const&, Func)
401 but \p pred is used for key comparing.
402 \p Less functor has the interface like \p std::less.
403 \p Less must imply the same element order as the comparator used for building the map.
405 template <typename K, typename Less, typename Func>
406 bool erase_with( K const& key, Less pred, Func f )
411 cds::opt::details::make_comparator_from_less<Less>(),
412 [&f]( mapped_type pVal, rcu_disposer& disp ) -> bool {
415 disp.dispose_value(pVal);
421 /// Extracts an item with minimal key from the map
423 Returns \p std::unique_ptr to the leftmost item.
424 If the set is empty, returns empty \p std::unique_ptr.
426 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
427 It means that the function gets leftmost leaf of the tree and tries to unlink it.
428 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
429 So, the function returns the item with minimum key at the moment of tree traversing.
431 RCU \p synchronize method can be called. RCU should NOT be locked.
432 The function does not free the item.
433 The deallocator will be implicitly invoked when the returned object is destroyed or when
434 its \p reset(nullptr) member function is called.
436 unique_ptr extract_min()
438 unique_ptr pExtracted;
442 [&pExtracted]( mapped_type pVal, rcu_disposer& ) -> bool { pExtracted.reset( pVal ); return false; }
447 /// Extracts an item with maximal key from the map
449 Returns \p std::unique_ptr pointer to the rightmost item.
450 If the set is empty, returns empty \p std::unique_ptr.
452 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
453 It means that the function gets rightmost leaf of the tree and tries to unlink it.
454 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
455 So, the function returns the item with maximum key at the moment of tree traversing.
457 RCU \p synchronize method can be called. RCU should NOT be locked.
458 The function does not free the item.
459 The deallocator will be implicitly invoked when the returned object is destroyed or when
460 its \p reset(nullptr) is called.
461 @note Before reusing \p result object you should call its \p release() method.
463 unique_ptr extract_max()
465 unique_ptr pExtracted;
469 [&pExtracted]( mapped_type pVal, rcu_disposer& ) -> bool { pExtracted.reset( pVal ); return false; }
474 /// Extracts an item from the map
476 The function searches an item with key equal to \p key in the tree,
477 unlinks it, and returns \p std::unique_ptr pointer to a value found.
478 If \p key is not found the function returns an empty \p std::unique_ptr.
480 RCU \p synchronize method can be called. RCU should NOT be locked.
481 The function does not destroy the value found.
482 The disposer will be implicitly invoked when the returned object is destroyed or when
483 its \p reset(nullptr) member function is called.
485 template <typename Q>
486 unique_ptr extract( Q const& key )
488 unique_ptr pExtracted;
493 [&pExtracted]( mapped_type pVal, rcu_disposer& ) -> bool { pExtracted.reset( pVal ); return false; }
498 /// Extracts an item from the map using \p pred for searching
500 The function is an analog of \p extract(Q const&)
501 but \p pred is used for key compare.
502 \p Less has the interface like \p std::less.
503 \p pred must imply the same element order as the comparator used for building the tree.
505 template <typename Q, typename Less>
506 unique_ptr extract_with( Q const& key, Less pred )
509 unique_ptr pExtracted;
513 cds::opt::details::make_comparator_from_less<Less>(),
514 [&pExtracted]( mapped_type pVal, rcu_disposer& ) -> bool { pExtracted.reset( pVal ); return false; }
519 /// Find the key \p key
521 The function searches the item with key equal to \p key and calls the functor \p f for item found.
522 The interface of \p Func functor is:
525 void operator()( key_type const& key, mapped_type& item );
528 where \p item is the item found.
529 The functor is called under node-level lock.
531 The function applies RCU lock internally.
533 The function returns \p true if \p key is found, \p false otherwise.
535 template <typename K, typename Func>
536 bool find( K const& key, Func f )
538 return do_find( key, key_comparator(),
539 [&f]( node_type * pNode ) -> bool {
540 assert( pNode != nullptr );
541 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
543 f( pNode->m_key, *pVal );
551 /// Finds the key \p val using \p pred predicate for searching
553 The function is an analog of \p find(K const&, Func)
554 but \p pred is used for key comparing.
555 \p Less functor has the interface like \p std::less.
556 \p Less must imply the same element order as the comparator used for building the map.
558 template <typename K, typename Less, typename Func>
559 bool find_with( K const& key, Less pred, Func f )
562 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
563 [&f]( node_type * pNode ) -> bool {
564 assert( pNode != nullptr );
565 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
567 f( pNode->m_key, *pVal );
575 /// Find the key \p key
577 The function searches the item with key equal to \p key
578 and returns \p true if it is found, and \p false otherwise.
580 The function applies RCU lock internally.
582 template <typename K>
583 bool find( K const& key )
585 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
588 /// Finds the key \p val using \p pred predicate for searching
590 The function is an analog of \p find(K const&)
591 but \p pred is used for key comparing.
592 \p Less functor has the interface like \p std::less.
593 \p Less must imply the same element order as the comparator used for building the map.
595 template <typename K, typename Less>
596 bool find_with( K const& key, Less pred )
599 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
602 /// Clears the tree (thread safe, not atomic)
604 The function unlink all items from the tree.
605 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
609 assert( set.empty() );
611 the assertion could be raised.
613 For each node the \ref disposer will be called after unlinking.
615 RCU \p synchronize method can be called. RCU should not be locked.
619 while ( extract_min() );
622 /// Clears the tree (not thread safe)
624 This function is not thread safe and may be called only when no other thread deals with the tree.
625 The function is used in the tree destructor.
629 clear(); // temp solution
633 /// Checks if the map is empty
636 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
639 /// Returns item count in the map
641 Only leaf nodes containing user data are counted.
643 The value returned depends on item counter type provided by \p Traits template parameter.
644 If it is \p atomicity::empty_item_counter this function always returns 0.
646 The function is not suitable for checking the tree emptiness, use \p empty()
647 member function for this purpose.
651 return m_ItemCounter;
654 /// Returns const reference to internal statistics
655 stat const& statistics() const
660 /// Checks internal consistency (not atomic, not thread-safe)
662 The debugging function to check internal consistency of the tree.
664 bool check_consistency() const
672 template <typename Q, typename Compare, typename Func>
673 bool do_find( Q& key, Compare cmp, Func f ) const
678 result = try_find( key, cmp, f, m_pRoot, 1, 0 );
680 assert( result != find_result::retry );
681 return result == find_result::found;
684 template <typename K, typename Compare, typename Func>
685 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
687 check_deadlock_policy::check();
689 rcu_disposer removed_list;
692 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
696 template <typename K, typename Compare, typename Func>
697 bool do_remove( K const& key, Compare cmp, Func func )
699 // Func must return true if the value was disposed
700 // or false if the value was extracted
702 check_deadlock_policy::check();
704 rcu_disposer removed_list;
707 return try_remove_root( key, cmp, func, removed_list );
711 template <typename Func>
712 void do_extract_minmax( int nDir, Func func )
714 check_deadlock_policy::check();
716 rcu_disposer removed_list;
722 // get right child of root
723 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
725 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
726 if ( nChildVersion & node_type::shrinking ) {
727 m_stat.onRemoveRootWaitShrinking();
728 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
729 result = update_flags::retry;
731 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
732 result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list );
735 } while ( result == update_flags::retry );
743 template <typename Q, typename Compare, typename Func>
744 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
746 assert( gc::is_locked() );
750 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
752 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
753 m_stat.onFindRetry();
754 return find_result::retry;
757 m_stat.onFindFailed();
758 return find_result::not_found;
761 int nCmp = cmp( key, pChild->m_key );
763 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
765 node_scoped_lock l( m_Monitor, *pChild );
766 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
768 m_stat.onFindSuccess();
769 return find_result::found;
774 m_stat.onFindFailed();
775 return find_result::not_found;
778 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
779 if ( nChildVersion & node_type::shrinking ) {
780 m_stat.onFindWaitShrinking();
781 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
783 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
784 m_stat.onFindRetry();
785 return find_result::retry;
788 else if ( nChildVersion != node_type::unlinked ) {
790 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
791 m_stat.onFindRetry();
792 return find_result::retry;
795 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
796 if ( found != find_result::retry )
802 template <typename K, typename Compare, typename Func>
803 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
805 assert( gc::is_locked() );
809 // get right child of root
810 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
812 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
813 if ( nChildVersion & node_type::shrinking ) {
814 m_stat.onUpdateRootWaitShrinking();
815 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
816 result = update_flags::retry;
818 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
819 result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
824 if ( nFlags & update_flags::allow_insert ) {
825 // insert into tree as right child of the root
827 node_scoped_lock l( m_Monitor, *m_pRoot );
828 if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
829 result = result == update_flags::retry;
833 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
834 mapped_type pVal = funcUpdate( pNew );
835 assert( pVal != nullptr );
836 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
838 m_pRoot->child( pNew, right_child, memory_model::memory_order_relaxed );
839 m_pRoot->height( 2, memory_model::memory_order_relaxed );
843 m_stat.onInsertSuccess();
844 return update_flags::result_inserted;
847 return update_flags::failed;
849 } while ( result == update_flags::retry );
853 template <typename K, typename Compare, typename Func>
854 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
856 assert( gc::is_locked() );
860 // get right child of root
861 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
863 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
864 if ( nChildVersion & node_type::shrinking ) {
865 m_stat.onRemoveRootWaitShrinking();
866 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
867 result = update_flags::retry;
869 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
870 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
875 } while ( result == update_flags::retry );
877 return result == update_flags::result_removed;
880 template <typename K, typename Compare, typename Func>
881 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
883 assert( gc::is_locked() );
884 assert( nVersion != node_type::unlinked );
885 CDS_UNUSED( pParent );
887 int nCmp = cmp( key, pNode->m_key );
889 if ( nFlags & update_flags::allow_update ) {
890 return try_update_node( funcUpdate, pNode, disp );
892 return update_flags::failed;
897 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
898 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
899 m_stat.onUpdateRetry();
900 return update_flags::retry;
903 if ( pChild == nullptr ) {
905 if ( nFlags & update_flags::allow_insert )
906 result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
908 result = update_flags::failed;
912 result = update_flags::retry;
913 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
914 if ( nChildVersion & node_type::shrinking ) {
915 m_stat.onUpdateWaitShrinking();
916 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
919 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
920 // this second read is important, because it is protected by nChildVersion
922 // validate the read that our caller took to get to node
923 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
924 m_stat.onUpdateRetry();
925 return update_flags::retry;
928 // At this point we know that the traversal our parent took to get to node is still valid.
929 // The recursive implementation will validate the traversal from node to
930 // child, so just prior to the node nVersion validation both traversals were definitely okay.
931 // This means that we are no longer vulnerable to node shrinks, and we don't need
932 // to validate node version any more.
933 result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion, disp );
936 } while ( result == update_flags::retry );
940 template <typename K, typename Compare, typename Func>
941 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
943 assert( gc::is_locked() );
944 assert( nVersion != node_type::unlinked );
946 int nCmp = cmp( key, pNode->m_key );
948 return try_remove_node( pParent, pNode, nVersion, func, disp );
952 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
953 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
954 m_stat.onRemoveRetry();
955 return update_flags::retry;
958 if ( pChild == nullptr ) {
959 return update_flags::failed;
963 result = update_flags::retry;
964 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
965 if ( nChildVersion & node_type::shrinking ) {
966 m_stat.onRemoveWaitShrinking();
967 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
970 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
971 // this second read is important, because it is protected by nChildVersion
973 // validate the read that our caller took to get to node
974 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
975 m_stat.onRemoveRetry();
976 return update_flags::retry;
979 // At this point we know that the traversal our parent took to get to node is still valid.
980 // The recursive implementation will validate the traversal from node to
981 // child, so just prior to the node nVersion validation both traversals were definitely okay.
982 // This means that we are no longer vulnerable to node shrinks, and we don't need
983 // to validate node version any more.
984 result = try_remove( key, cmp, func, pNode, pChild, nChildVersion, disp );
987 } while ( result == update_flags::retry );
991 template <typename Func>
992 int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
994 assert( gc::is_locked() );
995 assert( nVersion != node_type::unlinked );
999 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
1000 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1001 m_stat.onRemoveRetry();
1002 return update_flags::retry;
1005 if ( pChild == nullptr ) {
1007 return try_remove_node( pParent, pNode, nVersion, func, disp );
1010 result = update_flags::retry;
1011 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1012 if ( nChildVersion & node_type::shrinking ) {
1013 m_stat.onRemoveWaitShrinking();
1014 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1017 else if ( pChild == child( pNode, nDir, memory_model::memory_order_relaxed )) {
1018 // this second read is important, because it is protected by nChildVersion
1020 // validate the read that our caller took to get to node
1021 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1022 m_stat.onRemoveRetry();
1023 return update_flags::retry;
1026 // At this point we know that the traversal our parent took to get to node is still valid.
1027 // The recursive implementation will validate the traversal from node to
1028 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1029 // This means that we are no longer vulnerable to node shrinks, and we don't need
1030 // to validate node version any more.
1031 result = try_extract_minmax( nDir, func, pNode, pChild, nChildVersion, disp );
1034 } while ( result == update_flags::retry );
1038 template <typename K, typename Func>
1039 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
1043 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
1044 mapped_type pVal = funcUpdate( pNew );
1045 assert( pVal != nullptr );
1046 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1049 if ( c_bRelaxedInsert ) {
1050 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1051 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
1053 m_stat.onInsertRetry();
1054 return update_flags::retry;
1057 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1060 node_type * pDamaged;
1062 assert( pNode != nullptr );
1063 node_scoped_lock l( m_Monitor, *pNode );
1065 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion
1066 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
1068 if ( c_bRelaxedInsert ) {
1070 m_stat.onRelaxedInsertFailed();
1073 m_stat.onInsertRetry();
1074 return update_flags::retry;
1077 if ( !c_bRelaxedInsert )
1078 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1080 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
1081 pDamaged = fix_height_locked( pNode );
1085 m_stat.onInsertSuccess();
1087 fix_height_and_rebalance( pDamaged, disp );
1089 return update_flags::result_inserted;
1092 template <typename Func>
1093 int try_update_node( Func funcUpdate, node_type * pNode, rcu_disposer& disp )
1096 assert( pNode != nullptr );
1098 node_scoped_lock l( m_Monitor, *pNode );
1100 if ( pNode->is_unlinked( memory_model::memory_order_relaxed )) {
1101 m_stat.onUpdateUnlinked();
1102 return update_flags::retry;
1105 pOld = pNode->value( memory_model::memory_order_relaxed );
1106 mapped_type pVal = funcUpdate( pNode );
1110 assert( pVal != nullptr );
1111 pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1116 disp.dispose_value(pOld);
1117 m_stat.onDisposeValue();
1120 m_stat.onUpdateSuccess();
1121 return update_flags::result_updated;
1124 template <typename Func>
1125 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
1127 assert( pParent != nullptr );
1128 assert( pNode != nullptr );
1130 if ( !pNode->is_valued( atomics::memory_order_relaxed ) )
1131 return update_flags::failed;
1133 if ( child( pNode, left_child, memory_model::memory_order_relaxed ) == nullptr
1134 || child( pNode, right_child, memory_model::memory_order_relaxed ) == nullptr )
1136 node_type * pDamaged;
1139 node_scoped_lock lp( m_Monitor, *pParent );
1140 if ( pParent->is_unlinked( atomics::memory_order_relaxed ) || parent( pNode, memory_model::memory_order_relaxed ) != pParent )
1141 return update_flags::retry;
1144 node_scoped_lock ln( m_Monitor, *pNode );
1145 pOld = pNode->value( memory_model::memory_order_relaxed );
1146 if ( !( pNode->version( memory_model::memory_order_relaxed ) == nVersion
1148 && try_unlink_locked( pParent, pNode, disp )))
1150 return update_flags::retry;
1153 pDamaged = fix_height_locked( pParent );
1157 if ( func( pOld, disp )) // calls pOld disposer inside
1158 m_stat.onDisposeValue();
1160 m_stat.onExtractValue();
1162 fix_height_and_rebalance( pDamaged, disp );
1163 return update_flags::result_removed;
1166 int result = update_flags::retry;
1169 node_scoped_lock ln( m_Monitor, *pNode );
1170 pOld = pNode->value( atomics::memory_order_relaxed );
1171 if ( pNode->version( atomics::memory_order_relaxed ) == nVersion && pOld ) {
1172 pNode->m_pValue.store( nullptr, atomics::memory_order_relaxed );
1173 result = update_flags::result_removed;
1177 if ( result == update_flags::result_removed ) {
1179 if ( func( pOld, disp )) // calls pOld disposer inside
1180 m_stat.onDisposeValue();
1182 m_stat.onExtractValue();
1189 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1191 // pParent and pNode must be locked
1192 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
1194 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1195 node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
1196 if ( pNode != pParentLeft && pNode != pParentRight ) {
1197 // node is no longer a child of parent
1201 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ) );
1202 assert( pParent == parent( pNode, memory_model::memory_order_relaxed));
1204 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1205 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1206 if ( pLeft != nullptr && pRight != nullptr ) {
1207 // splicing is no longer possible
1210 node_type * pSplice = pLeft ? pLeft : pRight;
1212 if ( pParentLeft == pNode )
1213 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
1215 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
1218 pSplice->m_pParent.store( pParent, memory_model::memory_order_release );
1220 // Mark the node as unlinked
1221 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1223 // The value will be disposed by calling function
1224 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1226 disp.dispose( pNode );
1227 m_stat.onDisposeNode();
1234 private: // rotations
1236 int estimate_node_condition( node_type * pNode )
1238 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1239 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1241 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1242 return unlink_required;
1244 int h = pNode->height( memory_model::memory_order_relaxed );
1245 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1246 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1248 int hNew = 1 + std::max( hL, hR );
1249 int nBalance = hL - hR;
1251 if ( nBalance < -1 || nBalance > 1 )
1252 return rebalance_required;
1254 return h != hNew ? hNew : nothing_required;
1257 node_type * fix_height( node_type * pNode )
1259 assert( pNode != nullptr );
1260 node_scoped_lock l( m_Monitor, *pNode );
1261 return fix_height_locked( pNode );
1264 node_type * fix_height_locked( node_type * pNode )
1266 // pNode must be locked!!!
1267 int h = estimate_node_condition( pNode );
1269 case rebalance_required:
1270 case unlink_required:
1272 case nothing_required:
1275 pNode->height( h, memory_model::memory_order_relaxed );
1276 return parent( pNode, memory_model::memory_order_relaxed );
1280 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1282 while ( pNode && parent( pNode, memory_model::memory_order_relaxed )) {
1283 int nCond = estimate_node_condition( pNode );
1284 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
1287 if ( nCond != unlink_required && nCond != rebalance_required )
1288 pNode = fix_height( pNode );
1290 node_type * pParent = parent( pNode, memory_model::memory_order_relaxed );
1291 assert( pParent != nullptr );
1293 node_scoped_lock lp( m_Monitor, *pParent );
1294 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed )
1295 && parent( pNode, memory_model::memory_order_relaxed ) == pParent )
1297 node_scoped_lock ln( m_Monitor, *pNode );
1298 pNode = rebalance_locked( pParent, pNode, disp );
1305 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1307 // pParent and pNode should be locked.
1308 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1309 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1310 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1311 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1313 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1314 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1316 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1317 if ( try_unlink_locked( pParent, pNode, disp ))
1318 return fix_height_locked( pParent );
1320 // retry needed for pNode
1325 int h = pNode->height( memory_model::memory_order_relaxed );
1326 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1327 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1328 int hNew = 1 + std::max( hL, hR );
1329 int balance = hL - hR;
1332 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1333 else if ( balance < -1 )
1334 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1335 else if ( hNew != h ) {
1336 pNode->height( hNew, memory_model::memory_order_relaxed );
1338 // pParent is already locked
1339 return fix_height_locked( pParent );
1345 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1347 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1348 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1349 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1351 // pParent and pNode is locked yet
1352 // pNode->pLeft is too large, we will rotate-right.
1353 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1356 assert( pLeft != nullptr );
1357 node_scoped_lock l( m_Monitor, *pLeft );
1358 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1359 return pNode; // retry for pNode
1361 int hL = pLeft->height( memory_model::memory_order_relaxed );
1363 return pNode; // retry
1365 node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
1366 int hLR = pLRight ? pLRight->height( memory_model::memory_order_relaxed ) : 0;
1367 node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
1368 int hLL = pLLeft ? pLLeft->height( memory_model::memory_order_relaxed ) : 0;
1372 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1375 assert( pLRight != nullptr );
1377 node_scoped_lock lr( m_Monitor, *pLRight );
1378 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
1379 return pNode; // retry
1381 hLR = pLRight->height( memory_model::memory_order_relaxed );
1383 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1385 node_type * pLRLeft = child( pLRight, left_child, memory_model::memory_order_relaxed );
1386 int hLRL = pLRLeft ? pLRLeft->height( memory_model::memory_order_relaxed ) : 0;
1387 int balance = hLL - hLRL;
1388 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
1389 // nParent.child.left won't be damaged after a double rotation
1390 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1394 // focus on pLeft, if necessary pNode will be balanced later
1395 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1400 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1402 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1403 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1404 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1406 // pParent and pNode is locked yet
1408 assert( pRight != nullptr );
1409 node_scoped_lock l( m_Monitor, *pRight );
1410 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1411 return pNode; // retry for pNode
1413 int hR = pRight->height( memory_model::memory_order_relaxed );
1414 if ( hL - hR >= -1 )
1415 return pNode; // retry
1417 node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
1418 int hRL = pRLeft ? pRLeft->height( memory_model::memory_order_relaxed ) : 0;
1419 node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
1420 int hRR = pRRight ? pRRight->height( memory_model::memory_order_relaxed ) : 0;
1422 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1425 assert( pRLeft != nullptr );
1426 node_scoped_lock lrl( m_Monitor, *pRLeft );
1427 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
1428 return pNode; // retry
1430 hRL = pRLeft->height( memory_model::memory_order_relaxed );
1432 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1434 node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1435 int hRLR = pRLRight ? pRLRight->height( memory_model::memory_order_relaxed ) : 0;
1436 int balance = hRR - hRLR;
1437 if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
1438 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1440 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1444 static void begin_change( node_type * pNode, version_type version )
1446 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1448 static void end_change( node_type * pNode, version_type version )
1450 // Clear shrinking and unlinked flags and increment version
1451 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1454 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1456 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1457 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1459 begin_change( pNode, nodeVersion );
1461 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1462 if ( pLRight != nullptr )
1463 pLRight->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1465 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1466 pNode->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1468 if ( pParentLeft == pNode )
1469 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1471 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1472 pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed );
1474 pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1476 // fix up heights links
1477 int hNode = 1 + std::max( hLR, hR );
1478 pNode->height( hNode, memory_model::memory_order_relaxed );
1479 pLeft->height( 1 + std::max( hLL, hNode ), memory_model::memory_order_relaxed );
1481 end_change( pNode, nodeVersion );
1482 m_stat.onRotateRight();
1484 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1485 // parent.child). pNode is the deepest. Perform as many fixes as we can
1486 // with the locks we've got.
1488 // We've already fixed the height for pNode, but it might still be outside
1489 // our allowable balance range. In that case a simple fix_height_locked()
1491 int nodeBalance = hLR - hR;
1492 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1493 // we need another rotation at pNode
1497 // we've fixed balance and height damage for pNode, now handle
1498 // extra-routing node damage
1499 if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
1500 // we need to remove pNode and then repair
1504 // we've already fixed the height at pLeft, do we need a rotation here?
1505 int leftBalance = hLL - hNode;
1506 if ( leftBalance < -1 || leftBalance > 1 )
1509 // pLeft might also have routing node damage (if pLeft.left was null)
1510 if ( hLL == 0 && !pLeft->is_valued( memory_model::memory_order_relaxed ))
1513 // try to fix the parent height while we've still got the lock
1514 return fix_height_locked( pParent );
1517 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1519 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1520 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1522 begin_change( pNode, nodeVersion );
1524 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1525 pNode->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1526 if ( pRLeft != nullptr )
1527 pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1529 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1530 pNode->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1532 if ( pParentLeft == pNode )
1533 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
1535 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1536 pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1538 pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1541 int hNode = 1 + std::max( hL, hRL );
1542 pNode->height( hNode, memory_model::memory_order_relaxed );
1543 pRight->height( 1 + std::max( hNode, hRR ), memory_model::memory_order_relaxed );
1545 end_change( pNode, nodeVersion );
1546 m_stat.onRotateLeft();
1548 int nodeBalance = hRL - hL;
1549 if ( nodeBalance < -1 || nodeBalance > 1 )
1552 if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1555 int rightBalance = hRR - hNode;
1556 if ( rightBalance < -1 || rightBalance > 1 )
1559 if ( hRR == 0 && !pRight->is_valued( memory_model::memory_order_relaxed ))
1562 return fix_height_locked( pParent );
1565 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1567 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1568 version_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
1570 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1571 node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_relaxed );
1572 node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_relaxed );
1573 int hLRR = pLRR ? pLRR->height( memory_model::memory_order_relaxed ) : 0;
1575 begin_change( pNode, nodeVersion );
1576 begin_change( pLeft, leftVersion );
1578 // fix up pNode links, careful about the order!
1579 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
1580 if ( pLRR != nullptr )
1581 pLRR->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1583 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
1584 if ( pLRL != nullptr )
1585 pLRL->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1587 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1588 pLeft->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1589 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1590 pNode->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1593 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1595 assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1596 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
1598 pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1601 int hNode = 1 + std::max( hLRR, hR );
1602 pNode->height( hNode, memory_model::memory_order_relaxed );
1603 int hLeft = 1 + std::max( hLL, hLRL );
1604 pLeft->height( hLeft, memory_model::memory_order_relaxed );
1605 pLRight->height( 1 + std::max( hLeft, hNode ), memory_model::memory_order_relaxed );
1607 end_change( pNode, nodeVersion );
1608 end_change( pLeft, leftVersion );
1609 m_stat.onRotateRightOverLeft();
1611 // caller should have performed only a single rotation if pLeft was going
1612 // to end up damaged
1613 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1614 assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_relaxed )));
1616 // We have damaged pParent, pLR (now parent.child), and pNode (now
1617 // parent.child.right). pNode is the deepest. Perform as many fixes as we
1618 // can with the locks we've got.
1620 // We've already fixed the height for pNode, but it might still be outside
1621 // our allowable balance range. In that case a simple fix_height_locked()
1623 int nodeBalance = hLRR - hR;
1624 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1625 // we need another rotation at pNode
1629 // pNode might also be damaged by being an unnecessary routing node
1630 if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1631 // repair involves splicing out pNode and maybe more rotations
1635 // we've already fixed the height at pLRight, do we need a rotation here?
1636 int balanceLR = hLeft - hNode;
1637 if ( balanceLR < -1 || balanceLR > 1 )
1640 // try to fix the parent height while we've still got the lock
1641 return fix_height_locked( pParent );
1644 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
1646 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1647 version_type rightVersion = pRight->version( memory_model::memory_order_relaxed );
1649 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1650 node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_relaxed );
1651 node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1652 int hRLL = pRLL ? pRLL->height( memory_model::memory_order_relaxed ) : 0;
1654 begin_change( pNode, nodeVersion );
1655 begin_change( pRight, rightVersion );
1657 // fix up pNode links, careful about the order!
1658 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
1659 if ( pRLL != nullptr )
1660 pRLL->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1662 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
1663 if ( pRLR != nullptr )
1664 pRLR->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1666 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1667 pRight->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1668 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1669 pNode->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1672 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
1674 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1675 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1677 pRLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1680 int hNode = 1 + std::max( hL, hRLL );
1681 pNode->height( hNode, memory_model::memory_order_relaxed );
1682 int hRight = 1 + std::max( hRLR, hRR );
1683 pRight->height( hRight, memory_model::memory_order_relaxed );
1684 pRLeft->height( 1 + std::max( hNode, hRight ), memory_model::memory_order_relaxed );
1686 end_change( pNode, nodeVersion );
1687 end_change( pRight, rightVersion );
1688 m_stat.onRotateLeftOverRight();
1690 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
1692 int nodeBalance = hRLL - hL;
1693 if ( nodeBalance < -1 || nodeBalance > 1 )
1695 if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1698 int balRL = hRight - hNode;
1699 if ( balRL < -1 || balRL > 1 )
1702 return fix_height_locked( pParent );
1707 }} // namespace cds::container
1709 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H