3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <type_traits> // is_base_of
7 #include <cds/container/details/bronson_avltree_base.h>
8 #include <cds/urcu/details/check_deadlock.h>
9 #include <cds/urcu/exempt_ptr.h>
11 namespace cds { namespace container {
13 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
14 /** @ingroup cds_nonintrusive_map
15 @ingroup cds_nonintrusive_tree
16 @headerfile cds/container/bronson_avltree_map_rcu.h
17 @anchor cds_container_BronsonAVLTreeMap_rcu_ptr
19 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
20 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
21 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
22 the disposer functor provided by \p Traits template parameter.
24 <b>Template arguments</b>:
25 - \p RCU - one of \ref cds_urcu_gc "RCU type"
27 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
29 - \p Traits - tree traits, default is \p bronson_avltree::traits
30 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
31 instead of \p Traits template argument.
33 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
34 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
40 # ifdef CDS_DOXYGEN_INVOKED
41 typename Traits = bronson_avltree::traits
46 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
49 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
50 typedef Key key_type; ///< type of a key stored in the map
51 typedef T * mapped_type; ///< type of value stored in the map
52 typedef Traits traits; ///< Traits template parameter
54 # ifdef CDS_DOXYGEN_INVOKED
55 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
57 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
59 typedef typename traits::item_counter item_counter; ///< Item counting policy
60 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
61 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
62 typedef typename traits::stat stat; ///< internal statistics
63 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
64 typedef typename traits::back_off back_off; ///< Back-off strategy
65 typedef typename traits::disposer disposer; ///< Value disposer
66 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
68 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
69 static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
71 /// Group of \p extract_xxx functions does not require external locking
72 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false;
74 # ifdef CDS_DOXYGEN_INVOKED
75 /// Returned pointer to \p mapped_type of extracted node
76 typedef cds::urcu::exempt_ptr< gc, T, T, disposer, void > exempt_ptr;
78 typedef cds::urcu::exempt_ptr< gc,
79 typename std::remove_pointer<mapped_type>::type,
80 typename std::remove_pointer<mapped_type>::type,
86 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
90 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
91 typedef typename node_type::version_type version_type;
93 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
94 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
96 enum class find_result
113 result_inserted = allow_insert,
114 result_updated = allow_update,
121 nothing_required = -3,
122 rebalance_required = -2,
131 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
136 template <typename K>
137 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
139 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
142 static void free_node( node_type * pNode )
144 // Free node without disposer
145 cxx_allocator().Delete( pNode );
148 static void free_value( mapped_type pVal )
153 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
155 return pNode->child( nDir ).load( order );
158 static node_type * parent( node_type * pNode, atomics::memory_order order )
160 return pNode->m_pParent.load( order );
166 node_type * m_pRetiredList; ///< head of retired node list
167 mapped_type m_pRetiredValue; ///< value retired
171 : m_pRetiredList( nullptr )
172 , m_pRetiredValue( nullptr )
180 void dispose( node_type * pNode )
182 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
183 pNode->m_pNextRemoved = m_pRetiredList;
184 m_pRetiredList = pNode;
187 void dispose_value( mapped_type pVal )
189 assert( m_pRetiredValue == nullptr );
190 m_pRetiredValue = pVal;
194 struct internal_disposer
196 void operator()( node_type * p ) const
204 assert( !gc::is_locked() );
206 // TODO: use RCU::batch_retire
209 for ( node_type * p = m_pRetiredList; p; ) {
210 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
211 // Value already disposed
212 gc::template retire_ptr<internal_disposer>( p );
217 if ( m_pRetiredValue )
218 gc::template retire_ptr<disposer>( m_pRetiredValue );
226 typename node_type::base_class m_Root;
228 item_counter m_ItemCounter;
229 mutable sync_monitor m_Monitor;
234 /// Creates empty map
236 : m_pRoot( static_cast<node_type *>( &m_Root ))
247 The \p key_type should be constructible from a value of type \p K.
249 RCU \p synchronize() can be called. RCU should not be locked.
251 Returns \p true if inserting successful, \p false otherwise.
253 template <typename K>
254 bool insert( K const& key, mapped_type pVal )
256 return do_update(key, key_comparator(),
257 [pVal]( node_type * pNode ) -> mapped_type
259 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
263 update_flags::allow_insert
264 ) == update_flags::result_inserted;
267 /// Updates the value for \p key
269 The operation performs inserting or updating the value for \p key with lock-free manner.
270 If \p bInsert is \p false, only updating of existing node is possible.
272 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
273 then the new node created from \p key will be inserted into the map; note that in this case the \ref key_type should be
274 constructible from type \p K.
275 Otherwise, the value for \p key will be changed to \p pVal.
277 RCU \p synchronize() method can be called. RCU should not be locked.
279 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
280 \p second is \p true if new node has been added or \p false if the node with \p key
283 template <typename K>
284 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
286 int result = do_update( key, key_comparator(),
287 [pVal]( node_type * ) -> mapped_type
291 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
293 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
297 template <typename K>
298 std::pair<bool, bool> ensure( K const& key, mapped_type pVal )
300 return update( key, pVal, true );
305 /// Delete \p key from the map
307 RCU \p synchronize() method can be called. RCU should not be locked.
309 Return \p true if \p key is found and deleted, \p false otherwise
311 template <typename K>
312 bool erase( K const& key )
317 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
321 /// Deletes the item from the map using \p pred predicate for searching
323 The function is an analog of \p erase(K const&)
324 but \p pred is used for key comparing.
325 \p Less functor has the interface like \p std::less.
326 \p Less must imply the same element order as the comparator used for building the map.
328 template <typename K, typename Less>
329 bool erase_with( K const& key, Less pred )
334 cds::opt::details::make_comparator_from_less<Less>(),
335 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
339 /// Delete \p key from the map
341 The function searches an item with key \p key, calls \p f functor
342 and deletes the item. If \p key is not found, the functor is not called.
344 The functor \p Func interface:
347 void operator()( key_type const& key, std::remove_pointer<mapped_type>::type& val) { ... }
351 RCU \p synchronize method can be called. RCU should not be locked.
353 Return \p true if key is found and deleted, \p false otherwise
355 template <typename K, typename Func>
356 bool erase( K const& key, Func f )
361 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
364 disp.dispose_value(pVal);
370 /// Deletes the item from the map using \p pred predicate for searching
372 The function is an analog of \p erase(K const&, Func)
373 but \p pred is used for key comparing.
374 \p Less functor has the interface like \p std::less.
375 \p Less must imply the same element order as the comparator used for building the map.
377 template <typename K, typename Less, typename Func>
378 bool erase_with( K const& key, Less pred, Func f )
383 cds::opt::details::make_comparator_from_less<Less>(),
384 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
387 disp.dispose_value(pVal);
393 /// Extracts a value with minimal key from the map
395 Returns \p exempt_ptr to the leftmost item.
396 If the tree is empty, returns empty \p exempt_ptr.
398 Note that the function returns only the value for minimal key.
399 To retrieve its key use \p extract_min( Func ) member function.
401 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
402 It means that the function gets leftmost leaf of the tree and tries to unlink it.
403 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
404 So, the function returns the item with minimum key at the moment of tree traversing.
406 RCU \p synchronize method can be called. RCU should NOT be locked.
407 The function does not free the item.
408 The deallocator will be implicitly invoked when the returned object is destroyed or when
409 its \p release() member function is called.
411 exempt_ptr extract_min()
413 return exempt_ptr(do_extract_min( []( key_type const& ) {}));
416 /// Extracts minimal key key and corresponding value
418 Returns \p exempt_ptr to the leftmost item.
419 If the tree is empty, returns empty \p exempt_ptr.
421 \p Func functor is used to store minimal key.
422 \p Func has the following signature:
425 void operator()( key_type const& key );
428 If the tree is empty, \p f is not called.
429 Otherwise, is it called with minimal key, the pointer to corresponding value is returned
432 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
433 It means that the function gets leftmost leaf of the tree and tries to unlink it.
434 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
435 So, the function returns the item with minimum key at the moment of tree traversing.
437 RCU \p synchronize method can be called. RCU should NOT be locked.
438 The function does not free the item.
439 The deallocator will be implicitly invoked when the returned object is destroyed or when
440 its \p release() member function is called.
442 template <typename Func>
443 exempt_ptr extract_min( Func f )
445 return exempt_ptr(do_extract_min( [&f]( key_type const& key ) { f(key); }));
448 /// Extracts minimal key key and corresponding value
450 This function is a shortcut for the following call:
453 exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } );
455 \p key_type should be copy-assignable. The copy of minimal key
456 is returned in \p min_key argument.
458 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
459 extract_min_key( key_type& min_key )
461 return exempt_ptr(do_extract_min( [&min_key]( key_type const& key ) { min_key = key; }));
464 /// Extracts a value with maximal key from the tree
466 Returns \p exempt_ptr pointer to the rightmost item.
467 If the set is empty, returns empty \p exempt_ptr.
469 Note that the function returns only the value for maximal key.
470 To retrieve its key use \p extract_max( Func ) member function.
472 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
473 It means that the function gets rightmost leaf of the tree and tries to unlink it.
474 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
475 So, the function returns the item with maximum key at the moment of tree traversing.
477 RCU \p synchronize method can be called. RCU should NOT be locked.
478 The function does not free the item.
479 The deallocator will be implicitly invoked when the returned object is destroyed or when
480 its \p release() is called.
482 exempt_ptr extract_max()
484 return exempt_ptr(do_extract_max( []( key_type const& ) {}));
487 /// Extracts the maximal key and corresponding value
489 Returns \p exempt_ptr pointer to the rightmost item.
490 If the set is empty, returns empty \p exempt_ptr.
492 \p Func functor is used to store maximal key.
493 \p Func has the following signature:
496 void operator()( key_type const& key );
499 If the tree is empty, \p f is not called.
500 Otherwise, is it called with maximal key, the pointer to corresponding value is returned
503 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
504 It means that the function gets rightmost leaf of the tree and tries to unlink it.
505 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
506 So, the function returns the item with maximum key at the moment of tree traversing.
508 RCU \p synchronize method can be called. RCU should NOT be locked.
509 The function does not free the item.
510 The deallocator will be implicitly invoked when the returned object is destroyed or when
511 its \p release() is called.
513 template <typename Func>
514 exempt_ptr extract_max( Func f )
516 return exempt_ptr(do_extract_max( [&f]( key_type const& key ) { f(key); }));
519 /// Extracts the maximal key and corresponding value
521 This function is a shortcut for the following call:
524 exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } );
526 \p key_type should be copy-assignable. The copy of maximal key
527 is returned in \p max_key argument.
529 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
530 extract_max_key( key_type& max_key )
532 return exempt_ptr(do_extract_max( [&max_key]( key_type const& key ) { max_key = key; }));
535 /// Extracts an item from the map
537 The function searches an item with key equal to \p key in the tree,
538 unlinks it, and returns \p exempt_ptr pointer to a value found.
539 If \p key is not found the function returns an empty \p exempt_ptr.
541 RCU \p synchronize method can be called. RCU should NOT be locked.
542 The function does not destroy the value found.
543 The disposer will be implicitly invoked when the returned object is destroyed or when
544 its \p release() member function is called.
546 template <typename Q>
547 exempt_ptr extract( Q const& key )
549 return exempt_ptr(do_extract( key ));
553 /// Extracts an item from the map using \p pred for searching
555 The function is an analog of \p extract(Q const&)
556 but \p pred is used for key compare.
557 \p Less has the interface like \p std::less.
558 \p pred must imply the same element order as the comparator used for building the tree.
560 template <typename Q, typename Less>
561 exempt_ptr extract_with( Q const& key, Less pred )
563 return exempt_ptr(do_extract_with( key, pred ));
566 /// Find the key \p key
568 The function searches the item with key equal to \p key and calls the functor \p f for item found.
569 The interface of \p Func functor is:
572 void operator()( key_type const& key, mapped_type& item );
575 where \p item is the item found.
576 The functor is called under node-level lock.
578 The function applies RCU lock internally.
580 The function returns \p true if \p key is found, \p false otherwise.
582 template <typename K, typename Func>
583 bool find( K const& key, Func f )
585 return do_find( key, key_comparator(),
586 [&f]( node_type * pNode ) -> bool {
587 assert( pNode != nullptr );
588 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
590 f( pNode->m_key, *pVal );
598 /// Finds the key \p val using \p pred predicate for searching
600 The function is an analog of \p find(K const&, Func)
601 but \p pred is used for key comparing.
602 \p Less functor has the interface like \p std::less.
603 \p Less must imply the same element order as the comparator used for building the map.
605 template <typename K, typename Less, typename Func>
606 bool find_with( K const& key, Less pred, Func f )
609 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
610 [&f]( node_type * pNode ) -> bool {
611 assert( pNode != nullptr );
612 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
614 f( pNode->m_key, *pVal );
622 /// Find the key \p key
624 The function searches the item with key equal to \p key
625 and returns \p true if it is found, and \p false otherwise.
627 The function applies RCU lock internally.
629 template <typename K>
630 bool find( K const& key )
632 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
635 /// Finds the key \p val using \p pred predicate for searching
637 The function is an analog of \p find(K const&)
638 but \p pred is used for key comparing.
639 \p Less functor has the interface like \p std::less.
640 \p Less must imply the same element order as the comparator used for building the map.
642 template <typename K, typename Less>
643 bool find_with( K const& key, Less pred )
646 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
649 /// Clears the tree (thread safe, not atomic)
651 The function unlink all items from the tree.
652 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
656 assert( set.empty() );
658 the assertion could be raised.
660 For each node the \ref disposer will be called after unlinking.
662 RCU \p synchronize method can be called. RCU should not be locked.
666 while ( extract_min() );
669 /// Clears the tree (not thread safe)
671 This function is not thread safe and may be called only when no other thread deals with the tree.
672 The function is used in the tree destructor.
676 clear(); // temp solution
680 /// Checks if the map is empty
683 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
686 /// Returns item count in the map
688 Only leaf nodes containing user data are counted.
690 The value returned depends on item counter type provided by \p Traits template parameter.
691 If it is \p atomicity::empty_item_counter this function always returns 0.
693 The function is not suitable for checking the tree emptiness, use \p empty()
694 member function for this purpose.
698 return m_ItemCounter;
701 /// Returns const reference to internal statistics
702 stat const& statistics() const
707 /// Checks internal consistency (not atomic, not thread-safe)
709 The debugging function to check internal consistency of the tree.
711 bool check_consistency() const
713 return check_consistency([]( size_t /*nLevel*/, size_t /*hLeft*/, size_t /*hRight*/ ){} );
716 /// Checks internal consistency (not atomic, not thread-safe)
718 The debugging function to check internal consistency of the tree.
719 The functor \p Func is called if a violation of internal tree structure
723 void operator()( size_t nLevel, size_t hLeft, size_t hRight );
727 - \p nLevel - the level where the violation is found
728 - \p hLeft - the height of left subtree
729 - \p hRight - the height of right subtree
731 The functor is called for each violation found.
733 template <typename Func>
734 bool check_consistency( Func f ) const
736 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_relaxed );
739 do_check_consistency( pChild, 1, f, nErrors );
747 template <typename Func>
748 size_t do_check_consistency( node_type * pNode, size_t nLevel, Func f, size_t& nErrors ) const
751 size_t hLeft = do_check_consistency( child( pNode, left_child, memory_model::memory_order_relaxed ), nLevel + 1, f, nErrors );
752 size_t hRight = do_check_consistency( child( pNode, right_child, memory_model::memory_order_relaxed ), nLevel + 1, f, nErrors );
754 if ( hLeft >= hRight ) {
755 if ( hLeft - hRight > 1 ) {
756 f( nLevel, hLeft, hRight );
762 if ( hRight - hLeft > 1 ) {
763 f( nLevel, hLeft, hRight );
772 template <typename Q, typename Compare, typename Func>
773 bool do_find( Q& key, Compare cmp, Func f ) const
778 result = try_find( key, cmp, f, m_pRoot, 1, 0 );
780 assert( result != find_result::retry );
781 return result == find_result::found;
784 template <typename K, typename Compare, typename Func>
785 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
787 check_deadlock_policy::check();
789 rcu_disposer removed_list;
792 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
796 template <typename K, typename Compare, typename Func>
797 bool do_remove( K const& key, Compare cmp, Func func )
799 // Func must return true if the value was disposed
800 // or false if the value was extracted
802 check_deadlock_policy::check();
804 rcu_disposer removed_list;
807 return try_remove_root( key, cmp, func, removed_list );
811 template <typename Func>
812 mapped_type do_extract_min( Func f )
814 mapped_type pExtracted = nullptr;
817 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
822 template <typename Func>
823 mapped_type do_extract_max( Func f )
825 mapped_type pExtracted = nullptr;
828 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
833 template <typename Func>
834 void do_extract_minmax( int nDir, Func func )
836 check_deadlock_policy::check();
838 rcu_disposer removed_list;
842 int result = update_flags::failed;
844 // get right child of root
845 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
847 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
848 if ( nChildVersion & node_type::shrinking ) {
849 m_stat.onRemoveRootWaitShrinking();
850 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
851 result = update_flags::retry;
853 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
854 result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list );
857 } while ( result == update_flags::retry );
861 template <typename Q>
862 mapped_type do_extract( Q const& key )
864 mapped_type pExtracted = nullptr;
868 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
873 template <typename Q, typename Less>
874 mapped_type do_extract_with( Q const& key, Less pred )
877 mapped_type pExtracted = nullptr;
880 cds::opt::details::make_comparator_from_less<Less>(),
881 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
890 template <typename Q, typename Compare, typename Func>
891 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
893 assert( gc::is_locked() );
897 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
899 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
900 m_stat.onFindRetry();
901 return find_result::retry;
904 m_stat.onFindFailed();
905 return find_result::not_found;
908 int nCmp = cmp( key, pChild->m_key );
910 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
912 node_scoped_lock l( m_Monitor, *pChild );
913 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
915 m_stat.onFindSuccess();
916 return find_result::found;
921 m_stat.onFindFailed();
922 return find_result::not_found;
925 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
926 if ( nChildVersion & node_type::shrinking ) {
927 m_stat.onFindWaitShrinking();
928 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
930 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
931 m_stat.onFindRetry();
932 return find_result::retry;
935 else if ( nChildVersion != node_type::unlinked ) {
937 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
938 m_stat.onFindRetry();
939 return find_result::retry;
942 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
943 if ( found != find_result::retry )
949 template <typename K, typename Compare, typename Func>
950 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
952 assert( gc::is_locked() );
956 // get right child of root
957 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
959 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
960 if ( nChildVersion & node_type::shrinking ) {
961 m_stat.onUpdateRootWaitShrinking();
962 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
963 result = update_flags::retry;
965 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
966 result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
971 if ( nFlags & update_flags::allow_insert ) {
972 // insert into tree as right child of the root
974 node_scoped_lock l( m_Monitor, *m_pRoot );
975 if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
976 result = result == update_flags::retry;
980 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
981 mapped_type pVal = funcUpdate( pNew );
982 assert( pVal != nullptr );
983 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
985 m_pRoot->child( pNew, right_child, memory_model::memory_order_relaxed );
986 m_pRoot->height( 2, memory_model::memory_order_relaxed );
990 m_stat.onInsertSuccess();
991 return update_flags::result_inserted;
994 return update_flags::failed;
996 } while ( result == update_flags::retry );
1000 template <typename K, typename Compare, typename Func>
1001 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
1003 assert( gc::is_locked() );
1007 // get right child of root
1008 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1010 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
1011 if ( nChildVersion & node_type::shrinking ) {
1012 m_stat.onRemoveRootWaitShrinking();
1013 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1014 result = update_flags::retry;
1016 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
1017 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
1022 } while ( result == update_flags::retry );
1024 return result == update_flags::result_removed;
1027 template <typename K, typename Compare, typename Func>
1028 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1030 assert( gc::is_locked() );
1031 assert( nVersion != node_type::unlinked );
1032 CDS_UNUSED( pParent );
1034 int nCmp = cmp( key, pNode->m_key );
1036 if ( nFlags & update_flags::allow_update ) {
1037 return try_update_node( funcUpdate, pNode, disp );
1039 return update_flags::failed;
1044 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
1045 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1046 m_stat.onUpdateRetry();
1047 return update_flags::retry;
1050 if ( pChild == nullptr ) {
1052 if ( nFlags & update_flags::allow_insert )
1053 result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
1055 result = update_flags::failed;
1059 result = update_flags::retry;
1060 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1061 if ( nChildVersion & node_type::shrinking ) {
1062 m_stat.onUpdateWaitShrinking();
1063 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1066 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
1067 // this second read is important, because it is protected by nChildVersion
1069 // validate the read that our caller took to get to node
1070 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1071 m_stat.onUpdateRetry();
1072 return update_flags::retry;
1075 // At this point we know that the traversal our parent took to get to node is still valid.
1076 // The recursive implementation will validate the traversal from node to
1077 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1078 // This means that we are no longer vulnerable to node shrinks, and we don't need
1079 // to validate node version any more.
1080 result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion, disp );
1083 } while ( result == update_flags::retry );
1087 template <typename K, typename Compare, typename Func>
1088 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1090 assert( gc::is_locked() );
1091 assert( nVersion != node_type::unlinked );
1093 int nCmp = cmp( key, pNode->m_key );
1095 return try_remove_node( pParent, pNode, nVersion, func, disp );
1099 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
1100 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1101 m_stat.onRemoveRetry();
1102 return update_flags::retry;
1105 if ( pChild == nullptr ) {
1106 return update_flags::failed;
1110 result = update_flags::retry;
1111 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1112 if ( nChildVersion & node_type::shrinking ) {
1113 m_stat.onRemoveWaitShrinking();
1114 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1117 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
1118 // this second read is important, because it is protected by nChildVersion
1120 // validate the read that our caller took to get to node
1121 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1122 m_stat.onRemoveRetry();
1123 return update_flags::retry;
1126 // At this point we know that the traversal our parent took to get to node is still valid.
1127 // The recursive implementation will validate the traversal from node to
1128 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1129 // This means that we are no longer vulnerable to node shrinks, and we don't need
1130 // to validate node version any more.
1131 result = try_remove( key, cmp, func, pNode, pChild, nChildVersion, disp );
1134 } while ( result == update_flags::retry );
1138 template <typename Func>
1139 int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1141 assert( gc::is_locked() );
1142 assert( nVersion != node_type::unlinked );
1146 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
1147 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1148 m_stat.onRemoveRetry();
1149 return update_flags::retry;
1152 if ( pChild == nullptr ) {
1154 return try_remove_node( pParent, pNode, nVersion, func, disp );
1157 result = update_flags::retry;
1158 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1159 if ( nChildVersion & node_type::shrinking ) {
1160 m_stat.onRemoveWaitShrinking();
1161 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1164 else if ( pChild == child( pNode, nDir, memory_model::memory_order_relaxed )) {
1165 // this second read is important, because it is protected by nChildVersion
1167 // validate the read that our caller took to get to node
1168 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1169 m_stat.onRemoveRetry();
1170 return update_flags::retry;
1173 // At this point we know that the traversal our parent took to get to node is still valid.
1174 // The recursive implementation will validate the traversal from node to
1175 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1176 // This means that we are no longer vulnerable to node shrinks, and we don't need
1177 // to validate node version any more.
1178 result = try_extract_minmax( nDir, func, pNode, pChild, nChildVersion, disp );
1181 } while ( result == update_flags::retry );
1185 template <typename K, typename Func>
1186 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
1190 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
1191 mapped_type pVal = funcUpdate( pNew );
1192 assert( pVal != nullptr );
1193 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1196 if ( c_bRelaxedInsert ) {
1197 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1198 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
1200 m_stat.onInsertRetry();
1201 return update_flags::retry;
1204 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1207 node_type * pDamaged;
1209 assert( pNode != nullptr );
1210 node_scoped_lock l( m_Monitor, *pNode );
1212 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion
1213 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
1215 if ( c_bRelaxedInsert ) {
1217 m_stat.onRelaxedInsertFailed();
1220 m_stat.onInsertRetry();
1221 return update_flags::retry;
1224 if ( !c_bRelaxedInsert )
1225 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1227 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
1228 pDamaged = fix_height_locked( pNode );
1232 m_stat.onInsertSuccess();
1234 fix_height_and_rebalance( pDamaged, disp );
1236 return update_flags::result_inserted;
1239 template <typename Func>
1240 int try_update_node( Func funcUpdate, node_type * pNode, rcu_disposer& disp )
1243 assert( pNode != nullptr );
1245 node_scoped_lock l( m_Monitor, *pNode );
1247 if ( pNode->is_unlinked( memory_model::memory_order_relaxed )) {
1248 m_stat.onUpdateUnlinked();
1249 return update_flags::retry;
1252 pOld = pNode->value( memory_model::memory_order_relaxed );
1253 mapped_type pVal = funcUpdate( pNode );
1257 assert( pVal != nullptr );
1258 pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1263 disp.dispose_value(pOld);
1264 m_stat.onDisposeValue();
1267 m_stat.onUpdateSuccess();
1268 return update_flags::result_updated;
1271 template <typename Func>
1272 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
1274 assert( pParent != nullptr );
1275 assert( pNode != nullptr );
1277 if ( !pNode->is_valued( atomics::memory_order_relaxed ) )
1278 return update_flags::failed;
1280 if ( child( pNode, left_child, memory_model::memory_order_relaxed ) == nullptr
1281 || child( pNode, right_child, memory_model::memory_order_relaxed ) == nullptr )
1283 node_type * pDamaged;
1286 node_scoped_lock lp( m_Monitor, *pParent );
1287 if ( pParent->is_unlinked( atomics::memory_order_relaxed ) || parent( pNode, memory_model::memory_order_relaxed ) != pParent )
1288 return update_flags::retry;
1291 node_scoped_lock ln( m_Monitor, *pNode );
1292 pOld = pNode->value( memory_model::memory_order_relaxed );
1293 if ( !( pNode->version( memory_model::memory_order_relaxed ) == nVersion
1295 && try_unlink_locked( pParent, pNode, disp )))
1297 return update_flags::retry;
1300 pDamaged = fix_height_locked( pParent );
1304 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1305 m_stat.onDisposeValue();
1307 m_stat.onExtractValue();
1309 fix_height_and_rebalance( pDamaged, disp );
1310 return update_flags::result_removed;
1313 int result = update_flags::retry;
1316 node_scoped_lock ln( m_Monitor, *pNode );
1317 pOld = pNode->value( atomics::memory_order_relaxed );
1318 if ( pNode->version( atomics::memory_order_relaxed ) == nVersion && pOld ) {
1319 pNode->m_pValue.store( nullptr, atomics::memory_order_relaxed );
1320 result = update_flags::result_removed;
1324 if ( result == update_flags::result_removed ) {
1326 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1327 m_stat.onDisposeValue();
1329 m_stat.onExtractValue();
1336 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1338 // pParent and pNode must be locked
1339 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
1341 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1342 node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
1343 if ( pNode != pParentLeft && pNode != pParentRight ) {
1344 // node is no longer a child of parent
1348 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ) );
1349 assert( pParent == parent( pNode, memory_model::memory_order_relaxed));
1351 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1352 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1353 if ( pLeft != nullptr && pRight != nullptr ) {
1354 // splicing is no longer possible
1357 node_type * pSplice = pLeft ? pLeft : pRight;
1359 if ( pParentLeft == pNode )
1360 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
1362 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
1365 pSplice->m_pParent.store( pParent, memory_model::memory_order_release );
1367 // Mark the node as unlinked
1368 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1370 // The value will be disposed by calling function
1371 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1373 disp.dispose( pNode );
1374 m_stat.onDisposeNode();
1381 private: // rotations
1383 int estimate_node_condition( node_type * pNode )
1385 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1386 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1388 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1389 return unlink_required;
1391 int h = pNode->height( memory_model::memory_order_relaxed );
1392 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1393 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1395 int hNew = 1 + std::max( hL, hR );
1396 int nBalance = hL - hR;
1398 if ( nBalance < -1 || nBalance > 1 )
1399 return rebalance_required;
1401 return h != hNew ? hNew : nothing_required;
1404 node_type * fix_height( node_type * pNode )
1406 assert( pNode != nullptr );
1407 node_scoped_lock l( m_Monitor, *pNode );
1408 return fix_height_locked( pNode );
1411 node_type * fix_height_locked( node_type * pNode )
1413 // pNode must be locked!!!
1414 int h = estimate_node_condition( pNode );
1416 case rebalance_required:
1417 case unlink_required:
1419 case nothing_required:
1422 pNode->height( h, memory_model::memory_order_relaxed );
1423 return parent( pNode, memory_model::memory_order_relaxed );
1427 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1429 while ( pNode && parent( pNode, memory_model::memory_order_relaxed )) {
1430 int nCond = estimate_node_condition( pNode );
1431 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
1434 if ( nCond != unlink_required && nCond != rebalance_required )
1435 pNode = fix_height( pNode );
1437 node_type * pParent = parent( pNode, memory_model::memory_order_relaxed );
1438 assert( pParent != nullptr );
1440 node_scoped_lock lp( m_Monitor, *pParent );
1441 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed )
1442 && parent( pNode, memory_model::memory_order_relaxed ) == pParent )
1444 node_scoped_lock ln( m_Monitor, *pNode );
1445 pNode = rebalance_locked( pParent, pNode, disp );
1452 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1454 // pParent and pNode should be locked.
1455 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1456 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1457 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1458 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1460 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1461 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1463 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1464 if ( try_unlink_locked( pParent, pNode, disp ))
1465 return fix_height_locked( pParent );
1467 // retry needed for pNode
1472 int h = pNode->height( memory_model::memory_order_relaxed );
1473 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1474 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1475 int hNew = 1 + std::max( hL, hR );
1476 int balance = hL - hR;
1479 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1480 else if ( balance < -1 )
1481 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1482 else if ( hNew != h ) {
1483 pNode->height( hNew, memory_model::memory_order_relaxed );
1485 // pParent is already locked
1486 return fix_height_locked( pParent );
1492 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1494 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1495 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1496 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1498 // pParent and pNode is locked yet
1499 // pNode->pLeft is too large, we will rotate-right.
1500 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1503 assert( pLeft != nullptr );
1504 node_scoped_lock l( m_Monitor, *pLeft );
1505 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1506 return pNode; // retry for pNode
1508 int hL = pLeft->height( memory_model::memory_order_relaxed );
1510 return pNode; // retry
1512 node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
1513 int hLR = pLRight ? pLRight->height( memory_model::memory_order_relaxed ) : 0;
1514 node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
1515 int hLL = pLLeft ? pLLeft->height( memory_model::memory_order_relaxed ) : 0;
1519 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1522 assert( pLRight != nullptr );
1524 node_scoped_lock lr( m_Monitor, *pLRight );
1525 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
1526 return pNode; // retry
1528 hLR = pLRight->height( memory_model::memory_order_relaxed );
1530 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1532 node_type * pLRLeft = child( pLRight, left_child, memory_model::memory_order_relaxed );
1533 int hLRL = pLRLeft ? pLRLeft->height( memory_model::memory_order_relaxed ) : 0;
1534 int balance = hLL - hLRL;
1535 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
1536 // nParent.child.left won't be damaged after a double rotation
1537 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1541 // focus on pLeft, if necessary pNode will be balanced later
1542 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1547 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1549 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1550 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1551 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1553 // pParent and pNode is locked yet
1555 assert( pRight != nullptr );
1556 node_scoped_lock l( m_Monitor, *pRight );
1557 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1558 return pNode; // retry for pNode
1560 int hR = pRight->height( memory_model::memory_order_relaxed );
1561 if ( hL - hR >= -1 )
1562 return pNode; // retry
1564 node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
1565 int hRL = pRLeft ? pRLeft->height( memory_model::memory_order_relaxed ) : 0;
1566 node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
1567 int hRR = pRRight ? pRRight->height( memory_model::memory_order_relaxed ) : 0;
1569 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1572 assert( pRLeft != nullptr );
1573 node_scoped_lock lrl( m_Monitor, *pRLeft );
1574 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
1575 return pNode; // retry
1577 hRL = pRLeft->height( memory_model::memory_order_relaxed );
1579 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1581 node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1582 int hRLR = pRLRight ? pRLRight->height( memory_model::memory_order_relaxed ) : 0;
1583 int balance = hRR - hRLR;
1584 if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
1585 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1587 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1591 static void begin_change( node_type * pNode, version_type version )
1593 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1595 static void end_change( node_type * pNode, version_type version )
1597 // Clear shrinking and unlinked flags and increment version
1598 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1601 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1603 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1604 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1606 begin_change( pNode, nodeVersion );
1608 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1609 if ( pLRight != nullptr )
1610 pLRight->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1612 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1613 pNode->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1615 if ( pParentLeft == pNode )
1616 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1618 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1619 pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed );
1621 pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1623 // fix up heights links
1624 int hNode = 1 + std::max( hLR, hR );
1625 pNode->height( hNode, memory_model::memory_order_relaxed );
1626 pLeft->height( 1 + std::max( hLL, hNode ), memory_model::memory_order_relaxed );
1628 end_change( pNode, nodeVersion );
1629 m_stat.onRotateRight();
1631 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1632 // parent.child). pNode is the deepest. Perform as many fixes as we can
1633 // with the locks we've got.
1635 // We've already fixed the height for pNode, but it might still be outside
1636 // our allowable balance range. In that case a simple fix_height_locked()
1638 int nodeBalance = hLR - hR;
1639 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1640 // we need another rotation at pNode
1644 // we've fixed balance and height damage for pNode, now handle
1645 // extra-routing node damage
1646 if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
1647 // we need to remove pNode and then repair
1651 // we've already fixed the height at pLeft, do we need a rotation here?
1652 int leftBalance = hLL - hNode;
1653 if ( leftBalance < -1 || leftBalance > 1 )
1656 // pLeft might also have routing node damage (if pLeft.left was null)
1657 if ( hLL == 0 && !pLeft->is_valued( memory_model::memory_order_relaxed ))
1660 // try to fix the parent height while we've still got the lock
1661 return fix_height_locked( pParent );
1664 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1666 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1667 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1669 begin_change( pNode, nodeVersion );
1671 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1672 pNode->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1673 if ( pRLeft != nullptr )
1674 pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1676 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1677 pNode->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1679 if ( pParentLeft == pNode )
1680 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
1682 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1683 pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1685 pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1688 int hNode = 1 + std::max( hL, hRL );
1689 pNode->height( hNode, memory_model::memory_order_relaxed );
1690 pRight->height( 1 + std::max( hNode, hRR ), memory_model::memory_order_relaxed );
1692 end_change( pNode, nodeVersion );
1693 m_stat.onRotateLeft();
1695 int nodeBalance = hRL - hL;
1696 if ( nodeBalance < -1 || nodeBalance > 1 )
1699 if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1702 int rightBalance = hRR - hNode;
1703 if ( rightBalance < -1 || rightBalance > 1 )
1706 if ( hRR == 0 && !pRight->is_valued( memory_model::memory_order_relaxed ))
1709 return fix_height_locked( pParent );
1712 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1714 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1715 version_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
1717 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1718 node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_relaxed );
1719 node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_relaxed );
1720 int hLRR = pLRR ? pLRR->height( memory_model::memory_order_relaxed ) : 0;
1722 begin_change( pNode, nodeVersion );
1723 begin_change( pLeft, leftVersion );
1725 // fix up pNode links, careful about the order!
1726 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
1727 if ( pLRR != nullptr )
1728 pLRR->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1730 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
1731 if ( pLRL != nullptr )
1732 pLRL->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1734 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1735 pLeft->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1736 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1737 pNode->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1740 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1742 assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1743 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
1745 pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1748 int hNode = 1 + std::max( hLRR, hR );
1749 pNode->height( hNode, memory_model::memory_order_relaxed );
1750 int hLeft = 1 + std::max( hLL, hLRL );
1751 pLeft->height( hLeft, memory_model::memory_order_relaxed );
1752 pLRight->height( 1 + std::max( hLeft, hNode ), memory_model::memory_order_relaxed );
1754 end_change( pNode, nodeVersion );
1755 end_change( pLeft, leftVersion );
1756 m_stat.onRotateRightOverLeft();
1758 // caller should have performed only a single rotation if pLeft was going
1759 // to end up damaged
1760 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1761 assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_relaxed )));
1763 // We have damaged pParent, pLR (now parent.child), and pNode (now
1764 // parent.child.right). pNode is the deepest. Perform as many fixes as we
1765 // can with the locks we've got.
1767 // We've already fixed the height for pNode, but it might still be outside
1768 // our allowable balance range. In that case a simple fix_height_locked()
1770 int nodeBalance = hLRR - hR;
1771 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1772 // we need another rotation at pNode
1776 // pNode might also be damaged by being an unnecessary routing node
1777 if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1778 // repair involves splicing out pNode and maybe more rotations
1782 // we've already fixed the height at pLRight, do we need a rotation here?
1783 int balanceLR = hLeft - hNode;
1784 if ( balanceLR < -1 || balanceLR > 1 )
1787 // try to fix the parent height while we've still got the lock
1788 return fix_height_locked( pParent );
1791 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
1793 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1794 version_type rightVersion = pRight->version( memory_model::memory_order_relaxed );
1796 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1797 node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_relaxed );
1798 node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1799 int hRLL = pRLL ? pRLL->height( memory_model::memory_order_relaxed ) : 0;
1801 begin_change( pNode, nodeVersion );
1802 begin_change( pRight, rightVersion );
1804 // fix up pNode links, careful about the order!
1805 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
1806 if ( pRLL != nullptr )
1807 pRLL->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1809 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
1810 if ( pRLR != nullptr )
1811 pRLR->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1813 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1814 pRight->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1815 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1816 pNode->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1819 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
1821 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1822 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1824 pRLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1827 int hNode = 1 + std::max( hL, hRLL );
1828 pNode->height( hNode, memory_model::memory_order_relaxed );
1829 int hRight = 1 + std::max( hRLR, hRR );
1830 pRight->height( hRight, memory_model::memory_order_relaxed );
1831 pRLeft->height( 1 + std::max( hNode, hRight ), memory_model::memory_order_relaxed );
1833 end_change( pNode, nodeVersion );
1834 end_change( pRight, rightVersion );
1835 m_stat.onRotateLeftOverRight();
1837 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
1839 int nodeBalance = hRLL - hL;
1840 if ( nodeBalance < -1 || nodeBalance > 1 )
1842 if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1845 int balRL = hRight - hNode;
1846 if ( balRL < -1 || balRL > 1 )
1849 return fix_height_locked( pParent );
1854 }} // namespace cds::container
1856 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H