3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <cds/container/details/bronson_avltree_base.h>
7 #include <cds/urcu/details/check_deadlock.h>
9 namespace cds { namespace container {
11 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
12 /** @ingroup cds_nonintrusive_map
13 @ingroup cds_nonintrusive_tree
14 @headerfile cds/container/bronson_avltree_map_rcu.h
16 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
17 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
18 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
19 the disposer functor provided by \p Traits template parameter.
21 <b>Template arguments</b>:
22 - \p RCU - one of \ref cds_urcu_gc "RCU type"
24 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
26 - \p Traits - tree traits, default is \p bronson_avltree::traits
27 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
28 instead of \p Traits template argument.
30 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
31 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
37 # ifdef CDS_DOXYGEN_INVOKED
38 typename Traits = bronson_avltree::traits
43 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
46 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
47 typedef Key key_type; ///< type of a key stored in the map
48 typedef T * mapped_type; ///< type of value stored in the map
49 typedef Traits traits; ///< Traits template parameter
51 # ifdef CDS_DOXYGEN_INVOKED
52 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
54 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
56 typedef typename traits::item_counter item_counter; ///< Item counting policy
57 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
58 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
59 typedef typename traits::stat stat; ///< internal statistics
60 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
61 typedef typename traits::back_off back_off; ///< Back-off strategy
62 typedef typename traits::disposer disposer; ///< Value disposer
63 typedef typename traits::lock_type lock_type; ///< Node lock type
65 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
66 static bool const c_bRelaxedInsert = traits::relaxed_insert;
70 typedef bronson_avltree::node< key_type, mapped_type, lock_type > node_type;
71 typedef typename node_type::version_type version_type;
73 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
74 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
76 enum class find_result
83 enum class update_flags
90 result_insert = allow_insert,
91 result_update = allow_update
96 nothing_required = -3,
97 rebalance_required = -2,
101 class node_scoped_lock
105 node_scoped_lock( node_type * pNode )
108 pNode->m_Lock.lock();
113 m_pNode->m_Lock.unlock();
121 template <typename K>
122 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
124 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
127 static void free_node( node_type * pNode )
129 // Free node without disposer
130 cxx_allocator().Delete( pNode );
136 node_type * m_pRetiredList; ///< head of retired list
140 : m_pRetiredList( nullptr )
148 void dispose( node_type * pNode )
150 mapped_type * pVal = pNode->value( memory_model::memory_order_relaxed );
152 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
156 pNode->m_pNextRemoved = m_pRetiredList;
157 m_pRetiredList = pNode;
161 struct internal_disposer
163 void operator()( node_type * p ) const
171 assert( !gc::is_locked() );
173 for ( node_type * p = m_pRetiredList; p; ) {
174 node_type * pNext = p->m_pNextRemoved;
175 // Value already disposed
176 gc::template retire_ptr<internal_disposer>( p );
186 typename node_type::base_class m_Root;
188 item_counter m_ItemCounter;
193 /// Creates empty map
195 : m_pRoot( static_cast<node_type *>(&m_Root) )
206 The \p key_type should be constructible from a value of type \p K.
208 RCU \p synchronize() can be called. RCU should not be locked.
210 Returns \p true if inserting successful, \p false otherwise.
212 template <typename K>
213 bool insert( K const& key, mapped_type * pVal )
215 return do_update(key, key_comparator(),
216 [pVal]( node_type * pNode ) -> mapped_type*
218 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
221 update_flags::allow_insert
222 ) == update_flags::result_insert;
225 /// Updates the value for \p key
227 The operation performs inserting or updating the value for \p key with lock-free manner.
228 If \p bInsert is \p false, only updating of existing node is possible.
230 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
231 then the new node created from \p key is inserted into the map; note that in this case the \ref key_type should be
232 constructible from type \p K.
233 Otherwise, the value is changed to \p pVal.
235 RCU \p synchronize() method can be called. RCU should not be locked.
237 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
238 \p second is \p true if new node has been added or \p false if the node with \p key
239 already is in the tree.
241 template <typename K, typename Func>
242 std::pair<bool, bool> update( K const& key, mapped_type * pVal, bool bInsert = true )
244 int result = do_update( key, key_comparator(),
245 [pVal]( node_type * ) -> mapped_type*
249 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
251 return std::make_pair( result != 0, (result & update_flags::result_insert) != 0 );
254 /// Delete \p key from the map
256 RCU \p synchronize() method can be called. RCU should not be locked.
258 Return \p true if \p key is found and deleted, \p false otherwise
260 template <typename K>
261 bool erase( K const& key )
266 /// Deletes the item from the map using \p pred predicate for searching
268 The function is an analog of \p erase(K const&)
269 but \p pred is used for key comparing.
270 \p Less functor has the interface like \p std::less.
271 \p Less must imply the same element order as the comparator used for building the map.
273 template <typename K, typename Less>
274 bool erase_with( K const& key, Less pred )
280 /// Delete \p key from the map
282 The function searches an item with key \p key, calls \p f functor
283 and deletes the item. If \p key is not found, the functor is not called.
285 The functor \p Func interface:
288 void operator()(mapped_type& item) { ... }
292 RCU \p synchronize method can be called. RCU should not be locked.
294 Return \p true if key is found and deleted, \p false otherwise
296 template <typename K, typename Func>
297 bool erase( K const& key, Func f )
302 /// Deletes the item from the map using \p pred predicate for searching
304 The function is an analog of \p erase(K const&, Func)
305 but \p pred is used for key comparing.
306 \p Less functor has the interface like \p std::less.
307 \p Less must imply the same element order as the comparator used for building the map.
309 template <typename K, typename Less, typename Func>
310 bool erase_with( K const& key, Less pred, Func f )
316 /// Extracts an item with minimal key from the map
318 Returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the leftmost item.
319 If the set is empty, returns empty \p exempt_ptr.
321 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
322 It means that the function gets leftmost leaf of the tree and tries to unlink it.
323 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
324 So, the function returns the item with minimum key at the moment of tree traversing.
326 RCU \p synchronize method can be called. RCU should NOT be locked.
327 The function does not free the item.
328 The deallocator will be implicitly invoked when the returned object is destroyed or when
329 its \p release() member function is called.
331 exempt_ptr extract_min()
336 /// Extracts an item with maximal key from the map
338 Returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the rightmost item.
339 If the set is empty, returns empty \p exempt_ptr.
341 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
342 It means that the function gets rightmost leaf of the tree and tries to unlink it.
343 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
344 So, the function returns the item with maximum key at the moment of tree traversing.
346 RCU \p synchronize method can be called. RCU should NOT be locked.
347 The function does not free the item.
348 The deallocator will be implicitly invoked when the returned object is destroyed or when
349 its \p release() is called.
350 @note Before reusing \p result object you should call its \p release() method.
352 exempt_ptr extract_max()
357 /// Extracts an item from the map
359 The function searches an item with key equal to \p key in the tree,
360 unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to an item found.
361 If \p key is not found the function returns an empty \p exempt_ptr.
363 RCU \p synchronize method can be called. RCU should NOT be locked.
364 The function does not destroy the item found.
365 The dealloctor will be implicitly invoked when the returned object is destroyed or when
366 its \p release() member function is called.
368 template <typename Q>
369 exempt_ptr extract( Q const& key )
374 /// Extracts an item from the map using \p pred for searching
376 The function is an analog of \p extract(exempt_ptr&, Q const&)
377 but \p pred is used for key compare.
378 \p Less has the interface like \p std::less and should meet \ref cds_container_EllenBinTreeSet_rcu_less
379 "predicate requirements".
380 \p pred must imply the same element order as the comparator used for building the map.
382 template <typename Q, typename Less>
383 exempt_ptr extract_with( Q const& val, Less pred )
389 /// Find the key \p key
391 The function searches the item with key equal to \p key and calls the functor \p f for item found.
392 The interface of \p Func functor is:
395 void operator()( mapped_type& item );
398 where \p item is the item found.
399 The functor is called under node-level lock.
401 The function applies RCU lock internally.
403 The function returns \p true if \p key is found, \p false otherwise.
405 template <typename K, typename Func>
406 bool find( K const& key, Func f )
408 return do_find( key, key_comparator(), [&f]( node_type * pNode ) -> bool {
409 node_scoped_lock l( pNode );
410 mapped_type * pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
419 /// Finds the key \p val using \p pred predicate for searching
421 The function is an analog of \p find(K const&, Func)
422 but \p pred is used for key comparing.
423 \p Less functor has the interface like \p std::less.
424 \p Less must imply the same element order as the comparator used for building the map.
426 template <typename K, typename Less, typename Func>
427 bool find_with( K const& key, Less pred, Func f )
430 return do_find( key, opt::details::make_comparator_from_less<Less>(), [&f]( node_type * pNode ) -> bool {
431 node_scoped_lock l( pNode );
432 mapped_type * pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
441 /// Find the key \p key
443 The function searches the item with key equal to \p key
444 and returns \p true if it is found, and \p false otherwise.
446 The function applies RCU lock internally.
448 template <typename K>
449 bool find( K const& key )
451 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
454 /// Finds the key \p val using \p pred predicate for searching
456 The function is an analog of \p find(K const&)
457 but \p pred is used for key comparing.
458 \p Less functor has the interface like \p std::less.
459 \p Less must imply the same element order as the comparator used for building the map.
461 template <typename K, typename Less>
462 bool find_with( K const& key, Less pred )
465 return do_find( key, opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
468 /// Finds \p key and return the item found
470 The function searches the item with key equal to \p key and returns the pointer to item found.
471 If \p key is not found it returns \p nullptr.
473 RCU should be locked before call the function.
474 Returned pointer is valid while RCU is locked.
476 template <typename Q>
477 mapped_type * get( Q const& key ) const
482 /// Finds \p key with \p pred predicate and return the item found
484 The function is an analog of \p get(Q const&)
485 but \p pred is used for comparing the keys.
487 \p Less functor has the semantics like \p std::less but should take arguments of type \p key_type
488 and \p Q in any order.
489 \p pred must imply the same element order as the comparator used for building the map.
491 template <typename Q, typename Less>
492 mapped_type * get_with( Q const& key, Less pred ) const
498 /// Clears the tree (thread safe, not atomic)
500 The function unlink all items from the tree.
501 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
505 assert( set.empty() );
507 the assertion could be raised.
509 For each node the \ref disposer will be called after unlinking.
511 RCU \p synchronize method can be called. RCU should not be locked.
515 for ( exempt_ptr ep = extract_min(); !ep.empty(); ep = extract_min() )
519 /// Clears the tree (not thread safe)
521 This function is not thread safe and may be called only when no other thread deals with the tree.
522 The function is used in the tree destructor.
529 /// Checks if the map is empty
532 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
535 /// Returns item count in the map
537 Only leaf nodes containing user data are counted.
539 The value returned depends on item counter type provided by \p Traits template parameter.
540 If it is \p atomicity::empty_item_counter this function always returns 0.
542 The function is not suitable for checking the tree emptiness, use \p empty()
543 member function for this purpose.
547 return m_ItemCounter;
550 /// Returns const reference to internal statistics
551 stat const& statistics() const
556 /// Checks internal consistency (not atomic, not thread-safe)
558 The debugging function to check internal consistency of the tree.
560 bool check_consistency() const
567 template <typename Q, typename Compare, typename Func>
568 bool do_find( Q& key, Compare cmp, Func f ) const
573 result = try_find( key, cmp, f, m_pRoot, 1, 0 );
575 assert( result != find_result::retry );
576 return result == find_result::found;
579 template <typename K, typename Compare, typename Func>
580 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
582 check_deadlock_policy::check();
584 rcu_disposer removed_list;
587 return try_udate_root( key, cmp, nFlags, funcUpdate, removed_list );
595 template <typename Q, typename Compare, typename Func>
596 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
598 assert( gc::is_locked() );
602 node_type * pChild = pNode->child( nDir ).load( memory_model::memory_order_relaxed );
604 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
605 m_stat.onFindRetry();
606 return find_result::retry;
609 m_stat.onFindFailed();
610 return find_result::not_found;
613 int nCmp = cmp( key, pChild->m_key );
615 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
618 m_stat.onFindSuccess();
619 return find_result::found;
623 m_stat.onFindFailed();
624 return find_result::not_found;
627 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
628 if ( nChildVersion & node_type::shrinking ) {
629 m_stat.onFindWaitShrinking();
630 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
632 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
633 m_stat.onFindRetry();
634 return find_result::retry;
637 else if ( nChildVersion != node_type::unlinked ) {
639 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
640 m_stat.onFindRetry();
641 return find_result::retry;
644 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
645 if ( found != find_result::retry )
651 template <typename K, typename Compare, typename Func>
652 int try_update_root( K const* key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
654 assert( gc::is_locked() );
658 node_type * pChild = m_Root.child( 1, memory_model::memory_order_acquire );
660 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
661 if ( nChildVersion & node_type::shrinking ) {
662 m_stat.onUpdateRootWaitShrinking();
663 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
666 else if ( pChild == m_Root.child( 1, memory_model::memory_order_acquire ) ) {
667 result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
672 if ( nFlags & update_flags::allow_insert ) {
673 // insert into tree as right child of the root
675 node_scoped_lock l( m_pRoot );
677 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
678 mapped_type * pVal = funcUpdate( pNew );
679 assert( pVal != nullptr );
680 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
682 m_pRoot->child( pNew, 1, memory_model::memory_order_relaxed );
683 m_pRoot->height( 2, memory_model::memory_order_relaxed );
687 m_stat.onInsertSuccess();
688 return update_flags::result_insert;
691 return update_flags::failed;
693 } while ( result != update_flags::retry );
697 template <typename K, typename Compare, typename Func>
698 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
700 assert( gc::is_locked() );
701 assert( nVersion != node_type::unlinked );
703 int nCmp = cmp( key, pNode->m_key );
705 if ( nFlags & update_flags::allow_update ) {
706 return try_update_node( funcUpdate, pNode );
708 return update_flags::failed;
713 node_type * pChild = pNode->child( nCmp ).load( memory_model::memory_order_relaxed );
714 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
715 return update_flags::retry;
717 if ( pChild == nullptr ) {
719 if ( nFlags & update_flags::allow_insert )
720 result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
722 result = update_flags::failed;
726 result = update_flags::retry;
727 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
728 if ( nChildVersion & node_type::shrinking ) {
729 m_stat.onUpdateWaitShrinking();
730 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
733 else if ( pChild == pNode->child( nCmp ).load( memory_model::memory_order_relaxed ) ) {
734 // this second read is important, because it is protected by nChildVersion
736 // validate the read that our caller took to get to node
737 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
738 m_stat.onUpdateRetry();
739 return update_flags::retry;
742 // At this point we know that the traversal our parent took to get to node is still valid.
743 // The recursive implementation will validate the traversal from node to
744 // child, so just prior to the node nVersion validation both traversals were definitely okay.
745 // This means that we are no longer vulnerable to node shrinks, and we don't need
746 // to validate node version any more.
747 result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion, disp );
750 } while ( result == update_flags::retry );
754 template <typename K, typename Func>
755 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
759 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
760 mapped_type * pVal = funcUpdate( pNew );
761 assert( pVal != nullptr );
762 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
765 if ( c_bRelaxedInsert ) {
766 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
767 || pNode->child( nDir ).load( memory_model::memory_order_relaxed ) != nullptr )
769 m_stat.onInsertRetry();
770 return update_flags::retry;
773 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
776 node_type * pDamaged;
778 node_scoped_lock l( pNode );
780 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion
781 || pNode->child( nDir ).load( memory_model::memory_order_relaxed ) != nullptr )
783 if ( c_RelaxedInsert ) {
785 m_stat.onRelaxedInsertFailed();
788 m_stat.onInsertRetry();
789 return update_flags::retry;
792 if ( !c_bRelaxedInsert )
793 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
795 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
796 pDamaged = fix_height_locked( pNode );
798 m_stat.onInsertSuccess();
800 fix_height_and_rebalance( pDamaged, disp );
802 return update_flags::result_insert;
805 template <typename Func>
806 int try_update_node( Func funcUpdate, node_type * pNode )
810 node_scoped_lock l( pNode );
812 if ( pNode->version( memory_model::memory_order_relaxed ) == node_type::unlinked ) {
813 if ( c_RelaxedInsert )
815 m_stat.onUpdateUnlinked();
816 return update_flags::retry;
819 pOld = pNode->value( memory_model::memory_order_relaxed );
820 mapped_type * pVal = funcUpdate( pNode );
821 assert( pVal != nullptr );
822 pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed ));
827 m_stat.onDisposeValue();
830 m_stat.onUpdateSuccess();
831 return update_flags::result_update;
834 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
836 // pParent and pNode must be locked
837 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
839 node_type * pParentLeft = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
840 node_type * pParentRight = pParent->m_pRight.load( memory_model::memory_order_relaxed );
841 if ( pNode != pParentLeft && pNode != pParentRight ) {
842 // node is no longer a child of parent
846 assert( !pNode->is_unlinked());
847 assert( pParent == pNode->m_pParent.load(memory_model::memory_order_relaxed));
849 node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
850 node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
851 if ( pLeft != nullptr && pRight != nullptr ) {
852 // splicing is no longer possible
855 node_type * pSplice = pLeft ? pLeft : pRight;
857 if ( pParentLeft == pNode )
858 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
860 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
863 pSplice->pParent.store( pParent, memory_model::memory_order_release );
865 // Mark the node as unlinked
866 pNode->version( node_type::unlinked, memory_model::memory_order_release );
867 disp.dispose( pNode );
873 private: // rotations
875 int estimate_node_condition( node_type * pNode )
877 node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
878 node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
880 if ( (pLeft == nullptr || pRight == nullptr) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
881 return unlink_required;
883 int h = pNode->height( memory_model::memory_order_relaxed );
884 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
885 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
887 int hNew = 1 + std::max( hL, hR );
888 int nBalance = hL - hR;
890 if ( nBalance < -1 || nBalance > 1 )
891 return rebalance_required;
893 return h != hNew ? hNew : nothing_required;
896 node_type * fix_height( node_type * pNode )
898 node_scoped_lock l( pNode );
899 return fix_height_locked( pNode );
902 node_type * fix_height_locked( node_type * pNode )
904 // pNode must be locked!!!
905 int h = estimate_node_condition( pNode );
907 case rebalance_required:
908 case unlink_required:
910 case nothing_required:
913 pNode->height( h, memory_model::memory_order_relaxed );
914 return pNode->m_pParent.load( memory_model::memory_order_relaxed );
918 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
920 while ( pNode && pNode->m_pParent.load( memory_model::memory_order_relaxed ) ) {
921 int nCond = estimate_node_condition( pNode );
922 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
925 if ( nCond != unlink_required && nCond != rebalance_required )
926 pNode = fix_height( pNode );
928 node_type * pParent = pNode->m_pParent.load( memory_model::memry_order_relaxed );
929 assert( pParent != nullptr );
931 node_scoped_lock lp( pParent );
932 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed )
933 && pNode->m_pParent.load( memory_model::memory_order_relaxed ) == pParent )
935 node_scoped_lock ln( pNode );
936 pNode = rebalance_locked( pParent, pNode, disp );
943 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
945 // pParent and pNode shlould be locked.
946 // Returns a damaged node, or nullptr if no more rebalancing is necessary
947 assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
948 assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
949 || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
951 node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
952 node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
954 if ( (pLeft == nullptr || pRight == nullptr) && pNode->value( memory_model::memory_order_relaxed ) == nullptr ) {
955 if ( try_unlink_locked( pParent, pNode, disp ))
956 return fix_height_locked( pParent );
958 // retry needed for pNode
963 int h = pNode->height( memory_model::memory_order_relaxed );
964 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
965 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
966 int hNew = 1 + std::max( hL, hR );
967 int balance = hL - hR;
970 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
971 else if ( balance < -1 )
972 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
973 else if ( hNew != h ) {
974 pNode->height( hNew, memory_model::memory_order_relaxed );
976 // pParent is already locked
977 return fix_height_locked( pParent );
983 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
985 assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
986 assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
987 || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
989 // pParent and pNode is locked yet
990 // pNode->pLeft is too large, we will rotate-right.
991 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
994 node_scoped_lock l( pLeft );
995 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
996 return pNode; // retry for pNode
998 int hL = pLeft->height( memory_model::memory_order_relaxed );
1000 return pNode; // retry
1002 node_type * pLRight = pLeft->m_pRight.load( memory_model::memory_order_relaxed );
1003 int hLR = pLRight ? pLRight->height( memory_model::memory_order_relaxed ) : 0;
1004 node_type * pLLeft = pLeft->m_pLeft.load( memory_model::memory_order_relaxed );
1005 int hLL = pLLeft ? pLLeft->height( memory_model::memory_order_relaxed ) : 0;
1009 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1012 assert( pLRight != nullptr );
1014 node_scoped_lock lr( pLRight );
1015 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
1016 return pNode; // retry
1018 hLR = pLRight->height( memory_model::memory_order_relaxed );
1020 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1022 node_type * pLRLeft = pLRight->m_pLeft.load( memory_model::memory_order_relaxed );
1023 int hLRL = pLRLeft ? pLRLeft->height( memory_model::memory_order_relaxed ) : 0;
1024 int balance = hLL - hLRL;
1025 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && pLeft->value(memory_model::memory_order_relaxed) == nullptr) ) {
1026 // nParent.child.left won't be damaged after a double rotation
1027 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1031 // focus on pLeft, if necessary pNode will be balanced later
1032 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1037 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1039 assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
1040 assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
1041 || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1043 // pParent and pNode is locked yet
1045 node_scoped_lock l( pRight );
1046 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1047 return pNode; // retry for pNode
1049 int hR = pRight->height( memory_model::memory_order_relaxed );
1050 if ( hL - hR >= -1 )
1051 return pNode; // retry
1053 node_type * pRLeft = pRight->m_pLeft.load( memory_model::memory_order_relaxed );
1054 int hRL = pRLeft > pRLeft->height( memory_model::memory_order_relaxed ) : 0;
1055 node_type * pRRight = pRight->m_pRight.load( memory_model::memory_order_relaxed );
1056 int hRR = pRRight ? pRRight->height( memory_model::memory_order_relaxed ) : 0;
1058 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1061 assert( pRLeft != nullptr );
1062 node_scoped_lock lrl( pRLeft );
1063 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
1064 return pNode; // retry
1066 hRL = pRLeft->height( memory_model::memory_order_relaxed );
1068 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1070 node_type * pRLRight = pRLeft->m_pRight.load( memory_model::memory_order_relaxed );
1071 int hRLR = pRLRight ? pRLRight->height( memory_model::memory_order_relaxed ) : 0;
1072 int balance = hRR - hRLR;
1073 if ( b >= -1 && b <= 1 && !((hRR == 0 || hRLR == 0) && pRight->value( memory_odel::memory_order_relaxed ) == nullptr)
1074 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1076 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1080 static void begin_change( node_type * pNode, version_type version )
1082 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1084 static void end_change( node_type * pNode, version_type version )
1086 // Clear shrinking and unlinked flags and increment version
1087 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1090 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1092 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1093 node_type * pParentLeft = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
1095 begin_change( pNode, nodeVersion );
1097 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1098 if ( pLRight != nullptr )
1099 pLRight->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1101 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1102 pNode->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1104 if ( pParentLeft == pNode )
1105 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1107 assert( pParent->m_pRight.load( memory_odel::memory_order_relaxed ) == pNode );
1108 pParent->m_pRight.store( pLeft, memory_mdel::memory_order_relaxed );
1110 pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1112 // fix up heights links
1113 int hNode = 1 + std::max( hLR, hR );
1114 pNode->height( hNode, memory_model::memory_order_relaxed );
1115 pLeft->height( 1 + std::max( hLL, hNode ), memory_model::memory_order_relaxed );
1117 end_change( pNode, nodeVersion );
1119 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1120 // parent.child). pNode is the deepest. Perform as many fixes as we can
1121 // with the locks we've got.
1123 // We've already fixed the height for pNode, but it might still be outside
1124 // our allowable balance range. In that case a simple fix_height_locked()
1126 int nodeBalance = hLR - hR;
1127 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1128 // we need another rotation at pNode
1132 // we've fixed balance and height damage for pNode, now handle
1133 // extra-routing node damage
1134 if ( (pLRight == nullptr || hR == 0) && pNode->value(memory_model::memory_order_relaxed) == nullptr ) {
1135 // we need to remove pNode and then repair
1139 // we've already fixed the height at pLeft, do we need a rotation here?
1140 int leftBalance = hLL - hNode;
1141 if ( leftBalance < -1 || leftBalance > 1 )
1144 // pLeft might also have routing node damage (if pLeft.left was null)
1145 if ( hLL == 0 && pLeft->value( memory_model::memory_order_relaxed ) == nullptr )
1148 // try to fix the parent height while we've still got the lock
1149 return fix_height_locked( pParent );
1152 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1154 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1155 node_type * pParentLeft = pParent->m_pLeft.load( memory_odel::memory_order_relaxed );
1157 begin_change( pNode, nodeVersion );
1159 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1160 pNode->m_pRight.store( pRLeft, memory_nodel::memory_order_relaxed );
1161 if ( pRLeft != nullptr )
1162 pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1164 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1165 pNode->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1167 if ( pParentLeft == pNode )
1168 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
1170 assert( pParent->m_pRight.load( memory_odel::memory_order_relaxed ) == pNode );
1171 pParent->m_pRight.store( pRight, memory_model::memory_model_relaxed );
1173 pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1176 int hNode = 1 + std::max( hL, hRL );
1177 pNode->height( hNode, memory_model::memory_order_relaxed );
1178 pRight->height( 1 + std::max( hNode, hRR ), memory_model::memory_order_relaxed );
1180 end_change( pNode, nodeVersion );
1182 int nodeBalance = hRL - hL;
1183 if ( nodeBalance < -1 || nodeBalance > 1 )
1186 if ( (pRLeft == nullptr || hL == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
1189 int rightBalance = hRR - hNode;
1190 if ( rightBalance < -1 || rightBalance > 1 )
1193 if ( hRR == 0 && pRight->value( memory_model::memory_order_relaxed ) == nullptr )
1196 return fix_height_locked( pParent );
1199 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1201 typename node_type::value_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1202 typename node_type::value_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
1204 node_type * pPL = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
1205 node_type * pLRL = pLRight->m_pLeft.load( memory_model::memory_order_relaxed );
1206 node_type * pLRR = pLRight->m_pRight.load( memory_model::memory_order_relaxed );
1207 int hLRR = pLRR ? pLRR->hight( memory_model::memory_order_relaxed ) : 0;
1209 begin_change( pNode, nodeVersion );
1210 begin_change( pLeft, leftVersion );
1212 // fix up pNode links, careful about the order!
1213 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
1214 if ( pLRR != nullptr )
1215 pLRR->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1217 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
1218 if ( pLRL != nullptr )
1219 pLRL->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1221 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1222 pLeft->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1223 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1224 pNode->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1227 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1229 assert( Parent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1230 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
1232 pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1235 int hNode = 1 + std::max( hLRR, hR );
1236 pNode->height( hNode, memory_model::memory_order_relaxed );
1237 int hLeft = 1 + std::max( hLL, hLRL );
1238 pLeft->height( hLeft, memory_model::memory_order_relaxed );
1239 pLRight->height( 1 + std::max( hLeft, hNode ), memory_model::memory_order_relaxed );
1241 end_change( pNode, nodeVersion );
1242 end_change( pLeft, leftVersion );
1244 // caller should have performed only a single rotation if pLeft was going
1245 // to end up damaged
1246 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1247 assert( !((hLL == 0 || pLRL == nullptr) && pLeft->value( memory_model::memory_order_relaxed ) == nullptr ));
1249 // We have damaged pParent, pLR (now parent.child), and pNode (now
1250 // parent.child.right). pNode is the deepest. Perform as many fixes as we
1251 // can with the locks we've got.
1253 // We've already fixed the height for pNode, but it might still be outside
1254 // our allowable balance range. In that case a simple fix_height_locked()
1256 int nodeBalance = hLRR - hR;
1257 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1258 // we need another rotation at pNode
1262 // pNode might also be damaged by being an unnecessary routing node
1263 if ( (pLRR == nullptr || hR == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr ) {
1264 // repair involves splicing out pNode and maybe more rotations
1268 // we've already fixed the height at pLRight, do we need a rotation here?
1269 final int balanceLR = hLeft - hNode;
1270 if ( balanceLR < -1 || balanceLR > 1 )
1273 // try to fix the parent height while we've still got the lock
1274 return fix_height_locked( pParent );
1277 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
1279 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1280 version_type rightVersion = pRight->version( memory_model::memory_order_relaxed );
1282 node_type * pPL = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
1283 node_type * pRLL = pRLeft->m_pLeft.load( memory_model::memory_order_relaxed );
1284 node_type * pRLR = pRLeft->m_pRight.load( memory_model::memory_order_relaxed );
1285 int hRLL = pRLL ? pRLL->height( memory_model::memory_order_relaxed ) : 0;
1287 begin_change( pNode, nodeVersion );
1288 begin_change( pRight, ghtVersion );
1290 // fix up pNode links, careful about the order!
1291 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
1292 if ( pRLL != nullptr )
1293 pRLL->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1295 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
1296 if ( pRLR != nullptr )
1297 pRLR->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1299 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1300 pRight->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1301 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1302 pNode->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1305 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
1307 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1308 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1310 pRLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1313 int hNode = 1 + std::max( hL, hRLL );
1314 pNode->height( hNode, memory_model::memory_order_relaxed );
1315 int hRight = 1 + std::max( hRLR, hRR );
1316 pRight->height( hRight, memory_model::memory_order_relaxed );
1317 pRLeft->height( 1 + std::max( hNode, hRight ), memory_model::memory_order_relaxed );
1319 end_change( pNode, nodeVersion );
1320 end_change( pRight, rightVersion );
1322 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
1324 int nodeBalance = hRLL - hL;
1325 if ( nodeBalance < -1 || nodeBalance > 1 )
1327 if ( (pRLL == nullptr || hL == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
1330 int balRL = hRight - hNode;
1331 if ( balRL < -1 || balRL > 1 )
1334 return fix_height_locked( pParent );
1339 }} // namespace cds::container
1341 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H