3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <type_traits> // is_base_of
7 #include <cds/container/details/bronson_avltree_base.h>
8 #include <cds/urcu/details/check_deadlock.h>
9 #include <cds/urcu/exempt_ptr.h>
11 namespace cds { namespace container {
13 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
14 /** @ingroup cds_nonintrusive_map
15 @ingroup cds_nonintrusive_tree
16 @headerfile cds/container/bronson_avltree_map_rcu.h
17 @anchor cds_container_BronsonAVLTreeMap_rcu_ptr
19 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
20 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
21 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
22 the disposer functor provided by \p Traits template parameter.
24 <b>Template arguments</b>:
25 - \p RCU - one of \ref cds_urcu_gc "RCU type"
27 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
29 - \p Traits - tree traits, default is \p bronson_avltree::traits
30 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
31 instead of \p Traits template argument.
33 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
34 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
40 # ifdef CDS_DOXYGEN_INVOKED
41 typename Traits = bronson_avltree::traits
46 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
49 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
50 typedef Key key_type; ///< type of a key stored in the map
51 typedef T * mapped_type; ///< type of value stored in the map
52 typedef Traits traits; ///< Traits template parameter
54 # ifdef CDS_DOXYGEN_INVOKED
55 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
57 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
59 typedef typename traits::item_counter item_counter; ///< Item counting policy
60 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
61 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
62 typedef typename traits::stat stat; ///< internal statistics
63 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
64 typedef typename traits::back_off back_off; ///< Back-off strategy
65 typedef typename traits::disposer disposer; ///< Value disposer
66 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
68 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
69 static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
71 /// Group of \p extract_xxx functions does not require external locking
72 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false;
74 # ifdef CDS_DOXYGEN_INVOKED
75 /// Returned pointer to \p mapped_type of extracted node
76 typedef cds::urcu::exempt_ptr< gc, T, T, disposer, void > exempt_ptr;
78 typedef cds::urcu::exempt_ptr< gc,
79 typename std::remove_pointer<mapped_type>::type,
80 typename std::remove_pointer<mapped_type>::type,
86 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
90 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
91 typedef typename node_type::version_type version_type;
93 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
94 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
96 enum class find_result
113 result_inserted = allow_insert,
114 result_updated = allow_update,
121 nothing_required = -3,
122 rebalance_required = -2,
131 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
136 template <typename K>
137 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
139 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
142 static void free_node( node_type * pNode )
144 // Free node without disposer
145 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
146 assert( pNode->m_SyncMonitorInjection.check_free());
147 cxx_allocator().Delete( pNode );
150 static void free_value( mapped_type pVal )
155 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order = memory_model::memory_order_relaxed )
157 return pNode->child( nDir ).load( order );
160 static node_type * parent( node_type * pNode, atomics::memory_order order = memory_model::memory_order_relaxed )
162 return pNode->m_pParent.load( order );
168 node_type * m_pRetiredList; ///< head of retired node list
169 mapped_type m_pRetiredValue; ///< value retired
173 : m_pRetiredList( nullptr )
174 , m_pRetiredValue( nullptr )
182 void dispose( node_type * pNode )
184 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
185 pNode->m_pNextRemoved = m_pRetiredList;
186 m_pRetiredList = pNode;
189 void dispose_value( mapped_type pVal )
191 assert( m_pRetiredValue == nullptr );
192 m_pRetiredValue = pVal;
196 struct internal_disposer
198 void operator()( node_type * p ) const
206 assert( !gc::is_locked() );
208 // TODO: use RCU::batch_retire
211 for ( node_type * p = m_pRetiredList; p; ) {
212 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
213 // Value already disposed
214 gc::template retire_ptr<internal_disposer>( p );
219 if ( m_pRetiredValue )
220 gc::template retire_ptr<disposer>( m_pRetiredValue );
228 typename node_type::base_class m_Root;
230 item_counter m_ItemCounter;
231 mutable sync_monitor m_Monitor;
236 /// Creates empty map
238 : m_pRoot( static_cast<node_type *>( &m_Root ))
249 The \p key_type should be constructible from a value of type \p K.
251 RCU \p synchronize() can be called. RCU should not be locked.
253 Returns \p true if inserting successful, \p false otherwise.
255 template <typename K>
256 bool insert( K const& key, mapped_type pVal )
258 return do_update(key, key_comparator(),
259 [pVal]( node_type * pNode ) -> mapped_type
261 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
265 update_flags::allow_insert
266 ) == update_flags::result_inserted;
269 /// Updates the value for \p key
271 The operation performs inserting or updating the value for \p key with lock-free manner.
272 If \p bInsert is \p false, only updating of existing node is possible.
274 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
275 then the new node created from \p key will be inserted into the map; note that in this case the \ref key_type should be
276 constructible from type \p K.
277 Otherwise, the value for \p key will be changed to \p pVal.
279 RCU \p synchronize() method can be called. RCU should not be locked.
281 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
282 \p second is \p true if new node has been added or \p false if the node with \p key
285 template <typename K>
286 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
288 int result = do_update( key, key_comparator(),
289 [pVal]( node_type * ) -> mapped_type
293 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
295 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
299 template <typename K>
300 std::pair<bool, bool> ensure( K const& key, mapped_type pVal )
302 return update( key, pVal, true );
307 /// Delete \p key from the map
309 RCU \p synchronize() method can be called. RCU should not be locked.
311 Return \p true if \p key is found and deleted, \p false otherwise
313 template <typename K>
314 bool erase( K const& key )
319 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
323 /// Deletes the item from the map using \p pred predicate for searching
325 The function is an analog of \p erase(K const&)
326 but \p pred is used for key comparing.
327 \p Less functor has the interface like \p std::less.
328 \p Less must imply the same element order as the comparator used for building the map.
330 template <typename K, typename Less>
331 bool erase_with( K const& key, Less pred )
336 cds::opt::details::make_comparator_from_less<Less>(),
337 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
341 /// Delete \p key from the map
343 The function searches an item with key \p key, calls \p f functor
344 and deletes the item. If \p key is not found, the functor is not called.
346 The functor \p Func interface:
349 void operator()( key_type const& key, std::remove_pointer<mapped_type>::type& val) { ... }
353 RCU \p synchronize method can be called. RCU should not be locked.
355 Return \p true if key is found and deleted, \p false otherwise
357 template <typename K, typename Func>
358 bool erase( K const& key, Func f )
363 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
366 disp.dispose_value(pVal);
372 /// Deletes the item from the map using \p pred predicate for searching
374 The function is an analog of \p erase(K const&, Func)
375 but \p pred is used for key comparing.
376 \p Less functor has the interface like \p std::less.
377 \p Less must imply the same element order as the comparator used for building the map.
379 template <typename K, typename Less, typename Func>
380 bool erase_with( K const& key, Less pred, Func f )
385 cds::opt::details::make_comparator_from_less<Less>(),
386 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
389 disp.dispose_value(pVal);
395 /// Extracts a value with minimal key from the map
397 Returns \p exempt_ptr to the leftmost item.
398 If the tree is empty, returns empty \p exempt_ptr.
400 Note that the function returns only the value for minimal key.
401 To retrieve its key use \p extract_min( Func ) member function.
403 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
404 It means that the function gets leftmost leaf of the tree and tries to unlink it.
405 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
406 So, the function returns the item with minimum key at the moment of tree traversing.
408 RCU \p synchronize method can be called. RCU should NOT be locked.
409 The function does not free the item.
410 The deallocator will be implicitly invoked when the returned object is destroyed or when
411 its \p release() member function is called.
413 exempt_ptr extract_min()
415 return exempt_ptr(do_extract_min( []( key_type const& ) {}));
418 /// Extracts minimal key key and corresponding value
420 Returns \p exempt_ptr to the leftmost item.
421 If the tree is empty, returns empty \p exempt_ptr.
423 \p Func functor is used to store minimal key.
424 \p Func has the following signature:
427 void operator()( key_type const& key );
430 If the tree is empty, \p f is not called.
431 Otherwise, is it called with minimal key, the pointer to corresponding value is returned
434 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
435 It means that the function gets leftmost leaf of the tree and tries to unlink it.
436 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
437 So, the function returns the item with minimum key at the moment of tree traversing.
439 RCU \p synchronize method can be called. RCU should NOT be locked.
440 The function does not free the item.
441 The deallocator will be implicitly invoked when the returned object is destroyed or when
442 its \p release() member function is called.
444 template <typename Func>
445 exempt_ptr extract_min( Func f )
447 return exempt_ptr(do_extract_min( [&f]( key_type const& key ) { f(key); }));
450 /// Extracts minimal key key and corresponding value
452 This function is a shortcut for the following call:
455 exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } );
457 \p key_type should be copy-assignable. The copy of minimal key
458 is returned in \p min_key argument.
460 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
461 extract_min_key( key_type& min_key )
463 return exempt_ptr(do_extract_min( [&min_key]( key_type const& key ) { min_key = key; }));
466 /// Extracts a value with maximal key from the tree
468 Returns \p exempt_ptr pointer to the rightmost item.
469 If the set is empty, returns empty \p exempt_ptr.
471 Note that the function returns only the value for maximal key.
472 To retrieve its key use \p extract_max( Func ) member function.
474 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
475 It means that the function gets rightmost leaf of the tree and tries to unlink it.
476 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
477 So, the function returns the item with maximum key at the moment of tree traversing.
479 RCU \p synchronize method can be called. RCU should NOT be locked.
480 The function does not free the item.
481 The deallocator will be implicitly invoked when the returned object is destroyed or when
482 its \p release() is called.
484 exempt_ptr extract_max()
486 return exempt_ptr(do_extract_max( []( key_type const& ) {}));
489 /// Extracts the maximal key and corresponding value
491 Returns \p exempt_ptr pointer to the rightmost item.
492 If the set is empty, returns empty \p exempt_ptr.
494 \p Func functor is used to store maximal key.
495 \p Func has the following signature:
498 void operator()( key_type const& key );
501 If the tree is empty, \p f is not called.
502 Otherwise, is it called with maximal key, the pointer to corresponding value is returned
505 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
506 It means that the function gets rightmost leaf of the tree and tries to unlink it.
507 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
508 So, the function returns the item with maximum key at the moment of tree traversing.
510 RCU \p synchronize method can be called. RCU should NOT be locked.
511 The function does not free the item.
512 The deallocator will be implicitly invoked when the returned object is destroyed or when
513 its \p release() is called.
515 template <typename Func>
516 exempt_ptr extract_max( Func f )
518 return exempt_ptr(do_extract_max( [&f]( key_type const& key ) { f(key); }));
521 /// Extracts the maximal key and corresponding value
523 This function is a shortcut for the following call:
526 exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } );
528 \p key_type should be copy-assignable. The copy of maximal key
529 is returned in \p max_key argument.
531 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
532 extract_max_key( key_type& max_key )
534 return exempt_ptr(do_extract_max( [&max_key]( key_type const& key ) { max_key = key; }));
537 /// Extracts an item from the map
539 The function searches an item with key equal to \p key in the tree,
540 unlinks it, and returns \p exempt_ptr pointer to a value found.
541 If \p key is not found the function returns an empty \p exempt_ptr.
543 RCU \p synchronize method can be called. RCU should NOT be locked.
544 The function does not destroy the value found.
545 The disposer will be implicitly invoked when the returned object is destroyed or when
546 its \p release() member function is called.
548 template <typename Q>
549 exempt_ptr extract( Q const& key )
551 return exempt_ptr(do_extract( key ));
555 /// Extracts an item from the map using \p pred for searching
557 The function is an analog of \p extract(Q const&)
558 but \p pred is used for key compare.
559 \p Less has the interface like \p std::less.
560 \p pred must imply the same element order as the comparator used for building the tree.
562 template <typename Q, typename Less>
563 exempt_ptr extract_with( Q const& key, Less pred )
565 return exempt_ptr(do_extract_with( key, pred ));
568 /// Find the key \p key
570 The function searches the item with key equal to \p key and calls the functor \p f for item found.
571 The interface of \p Func functor is:
574 void operator()( key_type const& key, mapped_type& item );
577 where \p item is the item found.
578 The functor is called under node-level lock.
580 The function applies RCU lock internally.
582 The function returns \p true if \p key is found, \p false otherwise.
584 template <typename K, typename Func>
585 bool find( K const& key, Func f )
587 return do_find( key, key_comparator(),
588 [&f]( node_type * pNode ) -> bool {
589 assert( pNode != nullptr );
590 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
592 f( pNode->m_key, *pVal );
600 /// Finds the key \p val using \p pred predicate for searching
602 The function is an analog of \p find(K const&, Func)
603 but \p pred is used for key comparing.
604 \p Less functor has the interface like \p std::less.
605 \p Less must imply the same element order as the comparator used for building the map.
607 template <typename K, typename Less, typename Func>
608 bool find_with( K const& key, Less pred, Func f )
611 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
612 [&f]( node_type * pNode ) -> bool {
613 assert( pNode != nullptr );
614 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
616 f( pNode->m_key, *pVal );
624 /// Find the key \p key
626 The function searches the item with key equal to \p key
627 and returns \p true if it is found, and \p false otherwise.
629 The function applies RCU lock internally.
631 template <typename K>
632 bool find( K const& key )
634 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
637 /// Finds the key \p val using \p pred predicate for searching
639 The function is an analog of \p find(K const&)
640 but \p pred is used for key comparing.
641 \p Less functor has the interface like \p std::less.
642 \p Less must imply the same element order as the comparator used for building the map.
644 template <typename K, typename Less>
645 bool find_with( K const& key, Less pred )
648 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
651 /// Clears the tree (thread safe, not atomic)
653 The function unlink all items from the tree.
654 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
658 assert( set.empty() );
660 the assertion could be raised.
662 For each node the \ref disposer will be called after unlinking.
664 RCU \p synchronize method can be called. RCU should not be locked.
668 while ( extract_min() );
671 /// Clears the tree (not thread safe)
673 This function is not thread safe and may be called only when no other thread deals with the tree.
674 The function is used in the tree destructor.
678 clear(); // temp solution
682 /// Checks if the map is empty
685 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
688 /// Returns item count in the map
690 Only leaf nodes containing user data are counted.
692 The value returned depends on item counter type provided by \p Traits template parameter.
693 If it is \p atomicity::empty_item_counter this function always returns 0.
695 The function is not suitable for checking the tree emptiness, use \p empty()
696 member function for this purpose.
700 return m_ItemCounter;
703 /// Returns const reference to internal statistics
704 stat const& statistics() const
709 /// Returns reference to \p sync_monitor object
710 sync_monitor& monitor()
715 sync_monitor const& monitor() const
721 /// Checks internal consistency (not atomic, not thread-safe)
723 The debugging function to check internal consistency of the tree.
725 bool check_consistency() const
727 return check_consistency([]( size_t /*nLevel*/, size_t /*hLeft*/, size_t /*hRight*/ ){} );
730 /// Checks internal consistency (not atomic, not thread-safe)
732 The debugging function to check internal consistency of the tree.
733 The functor \p Func is called if a violation of internal tree structure
737 void operator()( size_t nLevel, size_t hLeft, size_t hRight );
741 - \p nLevel - the level where the violation is found
742 - \p hLeft - the height of left subtree
743 - \p hRight - the height of right subtree
745 The functor is called for each violation found.
747 template <typename Func>
748 bool check_consistency( Func f ) const
750 node_type * pChild = child( m_pRoot, right_child );
753 do_check_consistency( pChild, 1, f, nErrors );
761 template <typename Func>
762 size_t do_check_consistency( node_type * pNode, size_t nLevel, Func f, size_t& nErrors ) const
766 node_type * pLeft = child( pNode, left_child );
767 node_type * pRight = child( pNode, right_child );
768 if ( pLeft && cmp( pLeft->m_key, pNode->m_key ) > 0 )
770 if ( pRight && cmp( pNode->m_key, pRight->m_key ) > 0 )
773 size_t hLeft = do_check_consistency( pLeft, nLevel + 1, f, nErrors );
774 size_t hRight = do_check_consistency( pRight, nLevel + 1, f, nErrors );
776 if ( hLeft >= hRight ) {
777 if ( hLeft - hRight > 1 ) {
778 f( nLevel, hLeft, hRight );
784 if ( hRight - hLeft > 1 ) {
785 f( nLevel, hLeft, hRight );
794 template <typename Q, typename Compare, typename Func>
795 bool do_find( Q& key, Compare cmp, Func f ) const
800 result = try_find( key, cmp, f, m_pRoot, 1, 0 );
802 assert( result != find_result::retry );
803 return result == find_result::found;
806 template <typename K, typename Compare, typename Func>
807 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
809 check_deadlock_policy::check();
811 rcu_disposer removed_list;
814 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
818 template <typename K, typename Compare, typename Func>
819 bool do_remove( K const& key, Compare cmp, Func func )
821 // Func must return true if the value was disposed
822 // or false if the value was extracted
824 check_deadlock_policy::check();
826 rcu_disposer removed_list;
829 return try_remove_root( key, cmp, func, removed_list );
833 template <typename Func>
834 mapped_type do_extract_min( Func f )
836 mapped_type pExtracted = nullptr;
839 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
844 template <typename Func>
845 mapped_type do_extract_max( Func f )
847 mapped_type pExtracted = nullptr;
850 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
855 template <typename Func>
856 void do_extract_minmax( int nDir, Func func )
858 check_deadlock_policy::check();
860 rcu_disposer removed_list;
864 int result = update_flags::failed;
866 // get right child of root
867 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
869 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
870 if ( nChildVersion & node_type::shrinking ) {
871 m_stat.onRemoveRootWaitShrinking();
872 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
873 result = update_flags::retry;
875 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
876 result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list );
879 } while ( result == update_flags::retry );
883 template <typename Q>
884 mapped_type do_extract( Q const& key )
886 mapped_type pExtracted = nullptr;
890 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
895 template <typename Q, typename Less>
896 mapped_type do_extract_with( Q const& key, Less pred )
899 mapped_type pExtracted = nullptr;
902 cds::opt::details::make_comparator_from_less<Less>(),
903 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
911 static int height( node_type * pNode, atomics::memory_order order = memory_model::memory_order_relaxed )
914 return pNode->m_nHeight.load( order );
916 static void set_height( node_type * pNode, int h, atomics::memory_order order = memory_model::memory_order_relaxed )
919 pNode->m_nHeight.store( h, order );
921 static int height_null( node_type * pNode, atomics::memory_order order = memory_model::memory_order_relaxed )
923 return pNode ? height( pNode, order ) : 0;
926 template <typename Q, typename Compare, typename Func>
927 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
929 assert( gc::is_locked() );
933 node_type * pChild = child( pNode, nDir );
935 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
936 m_stat.onFindRetry();
937 return find_result::retry;
940 m_stat.onFindFailed();
941 return find_result::not_found;
944 int nCmp = cmp( key, pChild->m_key );
946 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
948 node_scoped_lock l( m_Monitor, *pChild );
949 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
951 m_stat.onFindSuccess();
952 return find_result::found;
957 m_stat.onFindFailed();
958 return find_result::not_found;
961 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
962 if ( nChildVersion & node_type::shrinking ) {
963 m_stat.onFindWaitShrinking();
964 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
966 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
967 m_stat.onFindRetry();
968 return find_result::retry;
971 else if ( nChildVersion != node_type::unlinked ) {
972 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
973 m_stat.onFindRetry();
974 return find_result::retry;
977 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
978 if ( found != find_result::retry )
982 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
983 m_stat.onFindRetry();
984 return find_result::retry;
989 template <typename K, typename Compare, typename Func>
990 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
992 assert( gc::is_locked() );
996 // get right child of root
997 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
999 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1000 if ( nChildVersion & node_type::shrinking ) {
1001 m_stat.onUpdateRootWaitShrinking();
1002 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1003 result = update_flags::retry;
1005 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
1006 result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
1009 result = update_flags::retry;
1012 // the tree is empty
1013 if ( nFlags & update_flags::allow_insert ) {
1014 // insert into tree as right child of the root
1016 node_scoped_lock l( m_Monitor, *m_pRoot );
1017 if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
1018 result = update_flags::retry;
1022 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
1023 mapped_type pVal = funcUpdate( pNew );
1024 assert( pVal != nullptr );
1025 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
1027 m_pRoot->child( pNew, right_child, memory_model::memory_order_relaxed );
1028 set_height( m_pRoot, 2 );
1032 m_stat.onInsertSuccess();
1033 return update_flags::result_inserted;
1036 return update_flags::failed;
1038 } while ( result == update_flags::retry );
1042 template <typename K, typename Compare, typename Func>
1043 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
1045 assert( gc::is_locked() );
1049 // get right child of root
1050 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1052 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1053 if ( nChildVersion & node_type::shrinking ) {
1054 m_stat.onRemoveRootWaitShrinking();
1055 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1056 result = update_flags::retry;
1058 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
1059 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
1062 result = update_flags::retry;
1066 } while ( result == update_flags::retry );
1068 return result == update_flags::result_removed;
1071 template <typename K, typename Compare, typename Func>
1072 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1074 assert( gc::is_locked() );
1075 assert( nVersion != node_type::unlinked );
1076 CDS_UNUSED( pParent );
1078 int nCmp = cmp( key, pNode->m_key );
1080 if ( nFlags & update_flags::allow_update ) {
1081 return try_update_node( funcUpdate, pNode, disp );
1083 return update_flags::failed;
1088 node_type * pChild = child( pNode, nCmp );
1089 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1090 m_stat.onUpdateRetry();
1091 return update_flags::retry;
1094 if ( pChild == nullptr ) {
1096 if ( nFlags & update_flags::allow_insert )
1097 result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
1099 result = update_flags::failed;
1103 result = update_flags::retry;
1104 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1105 if ( nChildVersion & node_type::shrinking ) {
1106 m_stat.onUpdateWaitShrinking();
1107 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1110 else if ( pChild == child( pNode, nCmp )) {
1111 // this second read is important, because it is protected by nChildVersion
1113 // validate the read that our caller took to get to node
1114 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1115 m_stat.onUpdateRetry();
1116 return update_flags::retry;
1119 // At this point we know that the traversal our parent took to get to node is still valid.
1120 // The recursive implementation will validate the traversal from node to
1121 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1122 // This means that we are no longer vulnerable to node shrinks, and we don't need
1123 // to validate node version any more.
1124 result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion, disp );
1128 if ( result == update_flags::retry && pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1129 m_stat.onUpdateRetry();
1130 return update_flags::retry;
1132 } while ( result == update_flags::retry );
1136 template <typename K, typename Compare, typename Func>
1137 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1139 assert( gc::is_locked() );
1140 assert( nVersion != node_type::unlinked );
1142 int nCmp = cmp( key, pNode->m_key );
1144 return try_remove_node( pParent, pNode, nVersion, func, disp );
1148 node_type * pChild = child( pNode, nCmp );
1149 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1150 m_stat.onRemoveRetry();
1151 return update_flags::retry;
1154 if ( pChild == nullptr ) {
1155 return update_flags::failed;
1159 result = update_flags::retry;
1160 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1161 if ( nChildVersion & node_type::shrinking ) {
1162 m_stat.onRemoveWaitShrinking();
1163 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1166 else if ( pChild == child( pNode, nCmp )) {
1167 // this second read is important, because it is protected by nChildVersion
1169 // validate the read that our caller took to get to node
1170 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1171 m_stat.onRemoveRetry();
1172 return update_flags::retry;
1175 // At this point we know that the traversal our parent took to get to node is still valid.
1176 // The recursive implementation will validate the traversal from node to
1177 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1178 // This means that we are no longer vulnerable to node shrinks, and we don't need
1179 // to validate node version any more.
1180 result = try_remove( key, cmp, func, pNode, pChild, nChildVersion, disp );
1184 if ( result == update_flags::retry && pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1185 m_stat.onRemoveRetry();
1186 return update_flags::retry;
1188 } while ( result == update_flags::retry );
1192 template <typename Func>
1193 int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1195 assert( gc::is_locked() );
1196 assert( nVersion != node_type::unlinked );
1200 node_type * pChild = child( pNode, nDir );
1201 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1202 m_stat.onRemoveRetry();
1203 return update_flags::retry;
1206 if ( pChild == nullptr ) {
1208 return try_remove_node( pParent, pNode, nVersion, func, disp );
1211 result = update_flags::retry;
1212 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1213 if ( nChildVersion & node_type::shrinking ) {
1214 m_stat.onRemoveWaitShrinking();
1215 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1218 else if ( pChild == child( pNode, nDir )) {
1219 // this second read is important, because it is protected by nChildVersion
1221 // validate the read that our caller took to get to node
1222 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1223 m_stat.onRemoveRetry();
1224 return update_flags::retry;
1227 // At this point we know that the traversal our parent took to get to node is still valid.
1228 // The recursive implementation will validate the traversal from node to
1229 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1230 // This means that we are no longer vulnerable to node shrinks, and we don't need
1231 // to validate node version any more.
1232 result = try_extract_minmax( nDir, func, pNode, pChild, nChildVersion, disp );
1236 if ( result == update_flags::retry && pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1237 m_stat.onRemoveRetry();
1238 return update_flags::retry;
1240 } while ( result == update_flags::retry );
1244 template <typename K, typename Func>
1245 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
1249 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
1250 mapped_type pVal = funcUpdate( pNew );
1251 assert( pVal != nullptr );
1252 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1255 if ( c_bRelaxedInsert ) {
1256 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1257 || child( pNode, nDir ) != nullptr )
1259 m_stat.onInsertRetry();
1260 return update_flags::retry;
1263 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1266 node_type * pDamaged;
1268 assert( pNode != nullptr );
1269 node_scoped_lock l( m_Monitor, *pNode );
1271 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1272 || child( pNode, nDir ) != nullptr )
1274 if ( c_bRelaxedInsert ) {
1275 mapped_type pVal = pNew->m_pValue.load( memory_model::memory_order_relaxed );
1276 pNew->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1279 m_stat.onRelaxedInsertFailed();
1282 m_stat.onInsertRetry();
1283 return update_flags::retry;
1286 if ( !c_bRelaxedInsert )
1287 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1289 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
1290 pDamaged = fix_height_locked( pNode );
1294 m_stat.onInsertSuccess();
1297 fix_height_and_rebalance( pDamaged, disp );
1298 m_stat.onInsertRebalanceRequired();
1301 return update_flags::result_inserted;
1304 template <typename Func>
1305 int try_update_node( Func funcUpdate, node_type * pNode, rcu_disposer& disp )
1308 assert( pNode != nullptr );
1310 node_scoped_lock l( m_Monitor, *pNode );
1312 if ( pNode->is_unlinked( memory_model::memory_order_relaxed )) {
1313 m_stat.onUpdateUnlinked();
1314 return update_flags::retry;
1317 pOld = pNode->value( memory_model::memory_order_relaxed );
1318 mapped_type pVal = funcUpdate( pNode );
1322 assert( pVal != nullptr );
1323 pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1328 disp.dispose_value(pOld);
1329 m_stat.onDisposeValue();
1332 m_stat.onUpdateSuccess();
1333 return update_flags::result_updated;
1336 template <typename Func>
1337 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
1339 assert( pParent != nullptr );
1340 assert( pNode != nullptr );
1342 if ( !pNode->is_valued( atomics::memory_order_relaxed ) )
1343 return update_flags::failed;
1345 if ( child( pNode, left_child ) == nullptr || child( pNode, right_child ) == nullptr ) {
1346 node_type * pDamaged;
1349 node_scoped_lock lp( m_Monitor, *pParent );
1350 if ( pParent->is_unlinked( atomics::memory_order_relaxed ) || parent( pNode ) != pParent )
1351 return update_flags::retry;
1354 node_scoped_lock ln( m_Monitor, *pNode );
1355 pOld = pNode->value( memory_model::memory_order_relaxed );
1356 if ( !( pNode->version( memory_model::memory_order_acquire ) == nVersion
1358 && try_unlink_locked( pParent, pNode, disp )))
1360 return update_flags::retry;
1363 pDamaged = fix_height_locked( pParent );
1367 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1368 m_stat.onDisposeValue();
1370 m_stat.onExtractValue();
1373 fix_height_and_rebalance( pDamaged, disp );
1374 m_stat.onRemoveRebalanceRequired();
1376 return update_flags::result_removed;
1379 int result = update_flags::retry;
1382 node_scoped_lock ln( m_Monitor, *pNode );
1383 pOld = pNode->value( atomics::memory_order_relaxed );
1384 if ( pNode->version( atomics::memory_order_acquire ) == nVersion && pOld ) {
1385 pNode->m_pValue.store( nullptr, atomics::memory_order_relaxed );
1386 result = update_flags::result_removed;
1390 if ( result == update_flags::result_removed ) {
1392 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1393 m_stat.onDisposeValue();
1395 m_stat.onExtractValue();
1402 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1404 // pParent and pNode must be locked
1405 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
1407 node_type * pParentLeft = child( pParent, left_child );
1408 node_type * pParentRight = child( pParent, right_child );
1409 if ( pNode != pParentLeft && pNode != pParentRight ) {
1410 // node is no longer a child of parent
1414 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ) );
1415 assert( pParent == parent( pNode ));
1417 node_type * pLeft = child( pNode, left_child );
1418 node_type * pRight = child( pNode, right_child );
1419 if ( pLeft != nullptr && pRight != nullptr ) {
1420 // splicing is no longer possible
1423 node_type * pSplice = pLeft ? pLeft : pRight;
1425 if ( pParentLeft == pNode )
1426 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
1428 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
1431 pSplice->m_pParent.store( pParent, memory_model::memory_order_release );
1433 // Mark the node as unlinked
1434 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1436 // The value will be disposed by calling function
1437 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1439 disp.dispose( pNode );
1440 m_stat.onDisposeNode();
1447 private: // rotations
1449 int estimate_node_condition( node_type * pNode )
1451 node_type * pLeft = child( pNode, left_child );
1452 node_type * pRight = child( pNode, right_child );
1454 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1455 return unlink_required;
1457 int h = height( pNode );
1458 int hL = height_null( pLeft );
1459 int hR = height_null( pRight );
1461 int hNew = 1 + std::max( hL, hR );
1462 int nBalance = hL - hR;
1464 if ( nBalance < -1 || nBalance > 1 )
1465 return rebalance_required;
1467 return h != hNew ? hNew : nothing_required;
1470 node_type * fix_height( node_type * pNode )
1472 assert( pNode != nullptr );
1473 node_scoped_lock l( m_Monitor, *pNode );
1474 return fix_height_locked( pNode );
1477 node_type * fix_height_locked( node_type * pNode )
1479 // pNode must be locked!!!
1480 int h = estimate_node_condition( pNode );
1482 case rebalance_required:
1483 case unlink_required:
1485 case nothing_required:
1488 set_height( pNode, h );
1489 return parent( pNode );
1493 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1495 while ( pNode && parent( pNode )) {
1496 int nCond = estimate_node_condition( pNode );
1497 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
1500 if ( nCond != unlink_required && nCond != rebalance_required )
1501 pNode = fix_height( pNode );
1503 node_type * pParent = parent( pNode );
1504 assert( pParent != nullptr );
1506 node_scoped_lock lp( m_Monitor, *pParent );
1507 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed ) && parent( pNode ) == pParent ) {
1508 node_scoped_lock ln( m_Monitor, *pNode );
1509 pNode = rebalance_locked( pParent, pNode, disp );
1516 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1518 // pParent and pNode should be locked.
1519 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1520 assert( parent( pNode ) == pParent );
1522 node_type * pLeft = child( pNode, left_child );
1523 node_type * pRight = child( pNode, right_child );
1525 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1526 if ( try_unlink_locked( pParent, pNode, disp ))
1527 return fix_height_locked( pParent );
1529 // retry needed for pNode
1534 assert( child( pParent, left_child ) == pNode || child( pParent, right_child ) == pNode );
1536 int h = height( pNode );
1537 int hL = height_null( pLeft );
1538 int hR = height_null( pRight );
1539 int hNew = 1 + std::max( hL, hR );
1540 int balance = hL - hR;
1543 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1544 else if ( balance < -1 )
1545 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1546 else if ( hNew != h ) {
1547 set_height( pNode, hNew );
1549 // pParent is already locked
1550 return fix_height_locked( pParent );
1556 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1558 assert( parent( pNode ) == pParent );
1559 assert( child( pParent, left_child ) == pNode || child( pParent, right_child ) == pNode );
1561 // pParent and pNode is locked yet
1562 // pNode->pLeft is too large, we will rotate-right.
1563 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1566 assert( pLeft != nullptr );
1567 node_scoped_lock l( m_Monitor, *pLeft );
1568 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1569 return pNode; // retry for pNode
1571 int hL = height( pLeft );
1573 return pNode; // retry
1575 node_type * pLRight = child( pLeft, right_child );
1576 int hLR = height_null( pLRight );
1577 node_type * pLLeft = child( pLeft, left_child );
1578 int hLL = height_null( pLLeft );
1582 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1585 assert( pLRight != nullptr );
1587 node_scoped_lock lr( m_Monitor, *pLRight );
1588 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
1589 return pNode; // retry
1591 hLR = height( pLRight );
1593 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1595 int hLRL = height_null( child( pLRight, left_child ));
1596 int balance = hLL - hLRL;
1597 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
1598 // nParent.child.left won't be damaged after a double rotation
1599 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1603 // focus on pLeft, if necessary pNode will be balanced later
1604 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1609 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1611 assert( parent( pNode ) == pParent );
1612 assert( child( pParent, left_child ) == pNode || child( pParent, right_child ) == pNode );
1614 // pParent and pNode is locked yet
1616 assert( pRight != nullptr );
1617 node_scoped_lock l( m_Monitor, *pRight );
1618 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1619 return pNode; // retry for pNode
1621 int hR = height( pRight );
1622 if ( hL - hR >= -1 )
1623 return pNode; // retry
1625 node_type * pRLeft = child( pRight, left_child );
1626 int hRL = height_null( pRLeft );
1627 node_type * pRRight = child( pRight, right_child );
1628 int hRR = height_null( pRRight );
1630 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1633 assert( pRLeft != nullptr );
1634 node_scoped_lock lrl( m_Monitor, *pRLeft );
1635 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
1636 return pNode; // retry
1638 hRL = height( pRLeft );
1640 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1642 node_type * pRLRight = child( pRLeft, right_child );
1643 int hRLR = height_null( pRLRight );
1644 int balance = hRR - hRLR;
1645 if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
1646 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1648 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1652 static void begin_change( node_type * pNode, version_type version )
1654 assert(pNode->version(memory_model::memory_order_acquire) == version );
1655 assert( (version & node_type::shrinking) == 0 );
1656 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1658 static void end_change( node_type * pNode, version_type version )
1660 // Clear shrinking and unlinked flags and increment version
1661 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1664 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1666 version_type nodeVersion = pNode->version( memory_model::memory_order_acquire );
1667 node_type * pParentLeft = child( pParent, left_child );
1669 begin_change( pNode, nodeVersion );
1671 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1672 if ( pLRight != nullptr )
1673 pLRight->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1675 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1676 pNode->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1678 if ( pParentLeft == pNode )
1679 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1681 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1682 pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed );
1684 pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1686 // fix up heights links
1687 int hNode = 1 + std::max( hLR, hR );
1688 set_height( pNode, hNode );
1689 set_height( pLeft, 1 + std::max( hLL, hNode ));
1691 end_change( pNode, nodeVersion );
1692 m_stat.onRotateRight();
1694 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1695 // parent.child). pNode is the deepest. Perform as many fixes as we can
1696 // with the locks we've got.
1698 // We've already fixed the height for pNode, but it might still be outside
1699 // our allowable balance range. In that case a simple fix_height_locked()
1701 int nodeBalance = hLR - hR;
1702 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1703 // we need another rotation at pNode
1707 // we've fixed balance and height damage for pNode, now handle
1708 // extra-routing node damage
1709 if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
1710 // we need to remove pNode and then repair
1714 // we've already fixed the height at pLeft, do we need a rotation here?
1715 int leftBalance = hLL - hNode;
1716 if ( leftBalance < -1 || leftBalance > 1 )
1719 // pLeft might also have routing node damage (if pLeft.left was null)
1720 if ( hLL == 0 && !pLeft->is_valued( memory_model::memory_order_relaxed ))
1723 // try to fix the parent height while we've still got the lock
1724 return fix_height_locked( pParent );
1727 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1729 version_type nodeVersion = pNode->version( memory_model::memory_order_acquire );
1730 node_type * pParentLeft = child( pParent, left_child );
1732 begin_change( pNode, nodeVersion );
1734 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1735 pNode->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1736 if ( pRLeft != nullptr )
1737 pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1739 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1740 pNode->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1742 if ( pParentLeft == pNode )
1743 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
1745 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1746 pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1748 pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1751 int hNode = 1 + std::max( hL, hRL );
1752 set_height( pNode, hNode );
1753 set_height( pRight, 1 + std::max( hNode, hRR ));
1755 end_change( pNode, nodeVersion );
1756 m_stat.onRotateLeft();
1758 int nodeBalance = hRL - hL;
1759 if ( nodeBalance < -1 || nodeBalance > 1 )
1762 if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1765 int rightBalance = hRR - hNode;
1766 if ( rightBalance < -1 || rightBalance > 1 )
1769 if ( hRR == 0 && !pRight->is_valued( memory_model::memory_order_relaxed ))
1772 return fix_height_locked( pParent );
1775 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1777 version_type nodeVersion = pNode->version( memory_model::memory_order_acquire );
1778 version_type leftVersion = pLeft->version( memory_model::memory_order_acquire );
1780 node_type * pPL = child( pParent, left_child );
1781 node_type * pLRL = child( pLRight, left_child );
1782 node_type * pLRR = child( pLRight, right_child );
1783 int hLRR = height_null( pLRR );
1785 begin_change( pNode, nodeVersion );
1786 begin_change( pLeft, leftVersion );
1788 // fix up pNode links, careful about the order!
1789 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
1790 if ( pLRR != nullptr )
1791 pLRR->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1793 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
1794 if ( pLRL != nullptr )
1795 pLRL->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1797 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1798 pLeft->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1799 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1800 pNode->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1803 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1805 assert( child( pParent, right_child ) == pNode );
1806 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
1808 pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1811 int hNode = 1 + std::max( hLRR, hR );
1812 set_height( pNode, hNode );
1813 int hLeft = 1 + std::max( hLL, hLRL );
1814 set_height( pLeft, hLeft );
1815 set_height( pLRight, 1 + std::max( hLeft, hNode ));
1817 end_change( pNode, nodeVersion );
1818 end_change( pLeft, leftVersion );
1819 m_stat.onRotateRightOverLeft();
1821 // caller should have performed only a single rotation if pLeft was going
1822 // to end up damaged
1823 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1824 assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_relaxed )));
1826 // We have damaged pParent, pLR (now parent.child), and pNode (now
1827 // parent.child.right). pNode is the deepest. Perform as many fixes as we
1828 // can with the locks we've got.
1830 // We've already fixed the height for pNode, but it might still be outside
1831 // our allowable balance range. In that case a simple fix_height_locked()
1833 int nodeBalance = hLRR - hR;
1834 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1835 // we need another rotation at pNode
1839 // pNode might also be damaged by being an unnecessary routing node
1840 if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1841 // repair involves splicing out pNode and maybe more rotations
1845 // we've already fixed the height at pLRight, do we need a rotation here?
1846 int balanceLR = hLeft - hNode;
1847 if ( balanceLR < -1 || balanceLR > 1 )
1850 // try to fix the parent height while we've still got the lock
1851 return fix_height_locked( pParent );
1854 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
1856 version_type nodeVersion = pNode->version( memory_model::memory_order_acquire );
1857 version_type rightVersion = pRight->version( memory_model::memory_order_acquire );
1859 node_type * pPL = child( pParent, left_child );
1860 node_type * pRLL = child( pRLeft, left_child );
1861 node_type * pRLR = child( pRLeft, right_child );
1862 int hRLL = height_null( pRLL );
1864 begin_change( pNode, nodeVersion );
1865 begin_change( pRight, rightVersion );
1867 // fix up pNode links, careful about the order!
1868 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
1869 if ( pRLL != nullptr )
1870 pRLL->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1872 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
1873 if ( pRLR != nullptr )
1874 pRLR->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1876 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1877 pRight->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1878 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1879 pNode->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1882 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
1884 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1885 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1887 pRLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1890 int hNode = 1 + std::max( hL, hRLL );
1891 set_height( pNode, hNode );
1892 int hRight = 1 + std::max( hRLR, hRR );
1893 set_height( pRight, hRight );
1894 set_height( pRLeft, 1 + std::max( hNode, hRight ));
1896 end_change( pNode, nodeVersion );
1897 end_change( pRight, rightVersion );
1898 m_stat.onRotateLeftOverRight();
1900 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
1902 int nodeBalance = hRLL - hL;
1903 if ( nodeBalance < -1 || nodeBalance > 1 )
1905 if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1908 int balRL = hRight - hNode;
1909 if ( balRL < -1 || balRL > 1 )
1912 return fix_height_locked( pParent );
1917 }} // namespace cds::container
1919 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H