3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <cds/container/details/bronson_avltree_base.h>
7 #include <cds/urcu/details/check_deadlock.h>
9 namespace cds { namespace container {
11 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
12 /** @ingroup cds_nonintrusive_map
13 @ingroup cds_nonintrusive_tree
14 @headerfile cds/container/bronson_avltree_map_rcu.h
16 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
17 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
18 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
19 the disposer functor provided by \p Traits template parameter.
21 The set of available member functions differs from classic map.
23 <b>Template arguments</b>:
24 - \p RCU - one of \ref cds_urcu_gc "RCU type"
26 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
28 - \p Traits - tree traits, default is \p bronson_avltree::traits
29 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
30 instead of \p Traits template argument.
32 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
33 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
39 # ifdef CDS_DOXYGEN_INVOKED
40 typename Traits = bronson_avltree::traits
45 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
48 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
49 typedef Key key_type; ///< type of a key stored in the map
50 typedef T * mapped_type; ///< type of value stored in the map
51 typedef Traits traits; ///< Traits template parameter
53 # ifdef CDS_DOXYGEN_INVOKED
54 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
56 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
58 typedef typename traits::item_counter item_counter; ///< Item counting policy
59 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
60 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
61 typedef typename traits::stat stat; ///< internal statistics
62 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
63 typedef typename traits::back_off back_off; ///< Back-off strategy
64 typedef typename traits::disposer disposer; ///< Value disposer
65 typedef typename traits::lock_type lock_type; ///< Node lock type
67 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
68 static bool const c_bRelaxedInsert = traits::relaxed_insert;
72 typedef bronson_avltree::node< key_type, mapped_type, lock_type > node_type;
73 typedef typename node_type::version_type version_type;
75 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
76 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
78 enum class find_result
85 enum class update_flags
92 result_insert = allow_insert,
93 result_update = allow_update
98 nothing_required = -3,
99 rebalance_required = -2,
103 class node_scoped_lock
107 node_scoped_lock( node_type * pNode )
110 pNode->m_Lock.lock();
115 m_pNode->m_Lock.unlock();
123 template <typename K>
124 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
126 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
129 static void free_node( node_type * pNode )
131 // Free node without disposer
132 cxx_allocator().Delete( pNode );
138 node_type * m_pRetiredList; ///< head of retired list
142 : m_pRetiredList( nullptr )
150 void dispose( node_type * pNode )
152 mapped_type * pVal = pNode->value( memory_model::memory_order_relaxed );
154 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
158 pNode->m_pNextRemoved = m_pRetiredList;
159 m_pRetiredList = pNode;
163 struct internal_disposer
165 void operator()( node_type * p ) const
173 assert( !gc::is_locked() );
175 for ( node_type * p = m_pRetiredList; p; ) {
176 node_type * pNext = p->m_pNextRemoved;
177 // Value already disposed
178 gc::template retire_ptr<internal_disposer>( p );
188 typename node_type::base_class m_Root;
190 item_counter m_ItemCounter;
195 /// Creates empty map
197 : m_pRoot( static_cast<node_type *>(&m_Root) )
208 The \p key_type should be constructible from a value of type \p K.
210 RCU \p synchronize() can be called. RCU should not be locked.
212 Returns \p true if inserting successful, \p false otherwise.
214 template <typename K>
215 bool insert( K const& key, mapped_type * pVal )
217 return do_update(key, key_comparator(),
218 [pVal]( node_type * pNode ) -> mapped_type*
220 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
223 update_flags::allow_insert
224 ) == update_flags::result_insert;
227 /// Updates the value for \p key
229 The operation performs inserting or updating the value for \p key with lock-free manner.
230 If \p bInsert is \p false, only updating of existing node is possible.
232 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
233 then the new node created from \p key is inserted into the map; note that in this case the \ref key_type should be
234 constructible from type \p K.
235 Otherwise, the value is changed to \p pVal.
237 RCU \p synchronize() method can be called. RCU should not be locked.
239 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
240 \p second is \p true if new node has been added or \p false if the node with \p key
241 already is in the tree.
243 template <typename K, typename Func>
244 std::pair<bool, bool> update( K const& key, mapped_type * pVal, bool bInsert = true )
246 int result = do_update( key, key_comparator(),
247 [pVal]( node_type * ) -> mapped_type*
251 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
253 return std::make_pair( result != 0, (result & update_flags::result_insert) != 0 );
256 /// Delete \p key from the map
258 RCU \p synchronize() method can be called. RCU should not be locked.
260 Return \p true if \p key is found and deleted, \p false otherwise
262 template <typename K>
263 bool erase( K const& key )
268 /// Deletes the item from the map using \p pred predicate for searching
270 The function is an analog of \p erase(K const&)
271 but \p pred is used for key comparing.
272 \p Less functor has the interface like \p std::less.
273 \p Less must imply the same element order as the comparator used for building the map.
275 template <typename K, typename Less>
276 bool erase_with( K const& key, Less pred )
282 /// Delete \p key from the map
284 The function searches an item with key \p key, calls \p f functor
285 and deletes the item. If \p key is not found, the functor is not called.
287 The functor \p Func interface:
290 void operator()(mapped_type& item) { ... }
294 RCU \p synchronize method can be called. RCU should not be locked.
296 Return \p true if key is found and deleted, \p false otherwise
298 template <typename K, typename Func>
299 bool erase( K const& key, Func f )
304 /// Deletes the item from the map using \p pred predicate for searching
306 The function is an analog of \p erase(K const&, Func)
307 but \p pred is used for key comparing.
308 \p Less functor has the interface like \p std::less.
309 \p Less must imply the same element order as the comparator used for building the map.
311 template <typename K, typename Less, typename Func>
312 bool erase_with( K const& key, Less pred, Func f )
318 /// Extracts an item with minimal key from the map
320 Returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the leftmost item.
321 If the set is empty, returns empty \p exempt_ptr.
323 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
324 It means that the function gets leftmost leaf of the tree and tries to unlink it.
325 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
326 So, the function returns the item with minimum key at the moment of tree traversing.
328 RCU \p synchronize method can be called. RCU should NOT be locked.
329 The function does not free the item.
330 The deallocator will be implicitly invoked when the returned object is destroyed or when
331 its \p release() member function is called.
333 exempt_ptr extract_min()
338 /// Extracts an item with maximal key from the map
340 Returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the rightmost item.
341 If the set is empty, returns empty \p exempt_ptr.
343 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
344 It means that the function gets rightmost leaf of the tree and tries to unlink it.
345 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
346 So, the function returns the item with maximum key at the moment of tree traversing.
348 RCU \p synchronize method can be called. RCU should NOT be locked.
349 The function does not free the item.
350 The deallocator will be implicitly invoked when the returned object is destroyed or when
351 its \p release() is called.
352 @note Before reusing \p result object you should call its \p release() method.
354 exempt_ptr extract_max()
359 /// Extracts an item from the map
361 The function searches an item with key equal to \p key in the tree,
362 unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to an item found.
363 If \p key is not found the function returns an empty \p exempt_ptr.
365 RCU \p synchronize method can be called. RCU should NOT be locked.
366 The function does not destroy the item found.
367 The dealloctor will be implicitly invoked when the returned object is destroyed or when
368 its \p release() member function is called.
370 template <typename Q>
371 exempt_ptr extract( Q const& key )
376 /// Extracts an item from the map using \p pred for searching
378 The function is an analog of \p extract(exempt_ptr&, Q const&)
379 but \p pred is used for key compare.
380 \p Less has the interface like \p std::less and should meet \ref cds_container_EllenBinTreeSet_rcu_less
381 "predicate requirements".
382 \p pred must imply the same element order as the comparator used for building the map.
384 template <typename Q, typename Less>
385 exempt_ptr extract_with( Q const& val, Less pred )
391 /// Find the key \p key
393 The function searches the item with key equal to \p key and calls the functor \p f for item found.
394 The interface of \p Func functor is:
397 void operator()( mapped_type& item );
400 where \p item is the item found.
401 The functor is called under node-level lock.
403 The function applies RCU lock internally.
405 The function returns \p true if \p key is found, \p false otherwise.
407 template <typename K, typename Func>
408 bool find( K const& key, Func f )
410 return do_find( key, key_comparator(), [&f]( node_type * pNode ) -> bool {
411 node_scoped_lock l( pNode );
412 mapped_type * pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
421 /// Finds the key \p val using \p pred predicate for searching
423 The function is an analog of \p find(K const&, Func)
424 but \p pred is used for key comparing.
425 \p Less functor has the interface like \p std::less.
426 \p Less must imply the same element order as the comparator used for building the map.
428 template <typename K, typename Less, typename Func>
429 bool find_with( K const& key, Less pred, Func f )
432 return do_find( key, opt::details::make_comparator_from_less<Less>(), [&f]( node_type * pNode ) -> bool {
433 node_scoped_lock l( pNode );
434 mapped_type * pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
443 /// Find the key \p key
445 The function searches the item with key equal to \p key
446 and returns \p true if it is found, and \p false otherwise.
448 The function applies RCU lock internally.
450 template <typename K>
451 bool find( K const& key )
453 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
456 /// Finds the key \p val using \p pred predicate for searching
458 The function is an analog of \p find(K const&)
459 but \p pred is used for key comparing.
460 \p Less functor has the interface like \p std::less.
461 \p Less must imply the same element order as the comparator used for building the map.
463 template <typename K, typename Less>
464 bool find_with( K const& key, Less pred )
467 return do_find( key, opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
470 /// Finds \p key and return the item found
472 The function searches the item with key equal to \p key and returns the pointer to item found.
473 If \p key is not found it returns \p nullptr.
475 RCU should be locked before call the function.
476 Returned pointer is valid while RCU is locked.
478 template <typename Q>
479 mapped_type * get( Q const& key ) const
484 /// Finds \p key with \p pred predicate and return the item found
486 The function is an analog of \p get(Q const&)
487 but \p pred is used for comparing the keys.
489 \p Less functor has the semantics like \p std::less but should take arguments of type \p key_type
490 and \p Q in any order.
491 \p pred must imply the same element order as the comparator used for building the map.
493 template <typename Q, typename Less>
494 mapped_type * get_with( Q const& key, Less pred ) const
500 /// Clears the tree (thread safe, not atomic)
502 The function unlink all items from the tree.
503 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
507 assert( set.empty() );
509 the assertion could be raised.
511 For each node the \ref disposer will be called after unlinking.
513 RCU \p synchronize method can be called. RCU should not be locked.
517 for ( exempt_ptr ep = extract_min(); !ep.empty(); ep = extract_min() )
521 /// Clears the tree (not thread safe)
523 This function is not thread safe and may be called only when no other thread deals with the tree.
524 The function is used in the tree destructor.
531 /// Checks if the map is empty
534 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
537 /// Returns item count in the map
539 Only leaf nodes containing user data are counted.
541 The value returned depends on item counter type provided by \p Traits template parameter.
542 If it is \p atomicity::empty_item_counter this function always returns 0.
544 The function is not suitable for checking the tree emptiness, use \p empty()
545 member function for this purpose.
549 return m_ItemCounter;
552 /// Returns const reference to internal statistics
553 stat const& statistics() const
558 /// Checks internal consistency (not atomic, not thread-safe)
560 The debugging function to check internal consistency of the tree.
562 bool check_consistency() const
569 template <typename Q, typename Compare, typename Func>
570 bool do_find( Q& key, Compare cmp, Func f ) const
575 result = try_find( key, cmp, f, m_pRoot, 1, 0 );
577 assert( result != find_result::retry );
578 return result == find_result::found;
581 template <typename K, typename Compare, typename Func>
582 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
584 check_deadlock_policy::check();
586 rcu_disposer removed_list;
589 return try_udate_root( key, cmp, nFlags, funcUpdate, removed_list );
597 template <typename Q, typename Compare, typename Func>
598 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
600 assert( gc::is_locked() );
604 node_type * pChild = pNode->child( nDir ).load( memory_model::memory_order_relaxed );
606 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
607 m_stat.onFindRetry();
608 return find_result::retry;
611 m_stat.onFindFailed();
612 return find_result::not_found;
615 int nCmp = cmp( key, pChild->m_key );
617 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
620 m_stat.onFindSuccess();
621 return find_result::found;
625 m_stat.onFindFailed();
626 return find_result::not_found;
629 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
630 if ( nChildVersion & node_type::shrinking ) {
631 m_stat.onFindWaitShrinking();
632 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
634 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
635 m_stat.onFindRetry();
636 return find_result::retry;
639 else if ( nChildVersion != node_type::unlinked ) {
641 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
642 m_stat.onFindRetry();
643 return find_result::retry;
646 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
647 if ( found != find_result::retry )
653 template <typename K, typename Compare, typename Func>
654 int try_update_root( K const* key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
656 assert( gc::is_locked() );
660 node_type * pChild = m_Root.child( 1, memory_model::memory_order_acquire );
662 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
663 if ( nChildVersion & node_type::shrinking ) {
664 m_stat.onUpdateRootWaitShrinking();
665 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
668 else if ( pChild == m_Root.child( 1, memory_model::memory_order_acquire ) ) {
669 result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
674 if ( nFlags & update_flags::allow_insert ) {
675 // insert into tree as right child of the root
677 node_scoped_lock l( m_pRoot );
679 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
680 mapped_type * pVal = funcUpdate( pNew );
681 assert( pVal != nullptr );
682 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
684 m_pRoot->child( pNew, 1, memory_model::memory_order_relaxed );
685 m_pRoot->height( 2, memory_model::memory_order_relaxed );
689 m_stat.onInsertSuccess();
690 return update_flags::result_insert;
693 return update_flags::failed;
695 } while ( result != update_flags::retry );
699 template <typename K, typename Compare, typename Func>
700 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
702 assert( gc::is_locked() );
703 assert( nVersion != node_type::unlinked );
705 int nCmp = cmp( key, pNode->m_key );
707 if ( nFlags & update_flags::allow_update ) {
708 return try_update_node( funcUpdate, pNode );
710 return update_flags::failed;
715 node_type * pChild = pNode->child( nCmp ).load( memory_model::memory_order_relaxed );
716 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
717 return update_flags::retry;
719 if ( pChild == nullptr ) {
721 if ( nFlags & update_flags::allow_insert )
722 result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
724 result = update_flags::failed;
728 result = update_flags::retry;
729 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
730 if ( nChildVersion & node_type::shrinking ) {
731 m_stat.onUpdateWaitShrinking();
732 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
735 else if ( pChild == pNode->child( nCmp ).load( memory_model::memory_order_relaxed ) ) {
736 // this second read is important, because it is protected by nChildVersion
738 // validate the read that our caller took to get to node
739 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
740 m_stat.onUpdateRetry();
741 return update_flags::retry;
744 // At this point we know that the traversal our parent took to get to node is still valid.
745 // The recursive implementation will validate the traversal from node to
746 // child, so just prior to the node nVersion validation both traversals were definitely okay.
747 // This means that we are no longer vulnerable to node shrinks, and we don't need
748 // to validate node version any more.
749 result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion, disp );
752 } while ( result == update_flags::retry );
756 template <typename K, typename Func>
757 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
761 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
762 mapped_type * pVal = funcUpdate( pNew );
763 assert( pVal != nullptr );
764 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
767 if ( c_bRelaxedInsert ) {
768 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
769 || pNode->child( nDir ).load( memory_model::memory_order_relaxed ) != nullptr )
771 m_stat.onInsertRetry();
772 return update_flags::retry;
775 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
778 node_type * pDamaged;
780 node_scoped_lock l( pNode );
782 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion
783 || pNode->child( nDir ).load( memory_model::memory_order_relaxed ) != nullptr )
785 if ( c_RelaxedInsert ) {
787 m_stat.onRelaxedInsertFailed();
790 m_stat.onInsertRetry();
791 return update_flags::retry;
794 if ( !c_bRelaxedInsert )
795 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
797 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
798 pDamaged = fix_height_locked( pNode );
800 m_stat.onInsertSuccess();
802 fix_height_and_rebalance( pDamaged, disp );
804 return update_flags::result_insert;
807 template <typename Func>
808 int try_update_node( Func funcUpdate, node_type * pNode )
812 node_scoped_lock l( pNode );
814 if ( pNode->version( memory_model::memory_order_relaxed ) == node_type::unlinked ) {
815 if ( c_RelaxedInsert )
817 m_stat.onUpdateUnlinked();
818 return update_flags::retry;
821 pOld = pNode->value( memory_model::memory_order_relaxed );
822 mapped_type * pVal = funcUpdate( pNode );
823 assert( pVal != nullptr );
824 pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed ));
829 m_stat.onDisposeValue();
832 m_stat.onUpdateSuccess();
833 return update_flags::result_update;
836 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
838 // pParent and pNode must be locked
839 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
841 node_type * pParentLeft = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
842 node_type * pParentRight = pParent->m_pRight.load( memory_model::memory_order_relaxed );
843 if ( pNode != pParentLeft && pNode != pParentRight ) {
844 // node is no longer a child of parent
848 assert( !pNode->is_unlinked());
849 assert( pParent == pNode->m_pParent.load(memory_model::memory_order_relaxed));
851 node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
852 node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
853 if ( pLeft != nullptr && pRight != nullptr ) {
854 // splicing is no longer possible
857 node_type * pSplice = pLeft ? pLeft : pRight;
859 if ( pParentLeft == pNode )
860 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
862 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
865 pSplice->pParent.store( pParent, memory_model::memory_order_release );
867 // Mark the node as unlinked
868 pNode->version( node_type::unlinked, memory_model::memory_order_release );
869 disp.dispose( pNode );
875 private: // rotations
877 int estimate_node_condition( node_type * pNode )
879 node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
880 node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
882 if ( (pLeft == nullptr || pRight == nullptr) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
883 return unlink_required;
885 int h = pNode->height( memory_model::memory_order_relaxed );
886 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
887 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
889 int hNew = 1 + std::max( hL, hR );
890 int nBalance = hL - hR;
892 if ( nBalance < -1 || nBalance > 1 )
893 return rebalance_required;
895 return h != hNew ? hNew : nothing_required;
898 node_type * fix_height( node_type * pNode )
900 node_scoped_lock l( pNode );
901 return fix_height_locked( pNode );
904 node_type * fix_height_locked( node_type * pNode )
906 // pNode must be locked!!!
907 int h = estimate_node_condition( pNode );
909 case rebalance_required:
910 case unlink_required:
912 case nothing_required:
915 pNode->height( h, memory_model::memory_order_relaxed );
916 return pNode->m_pParent.load( memory_model::memory_order_relaxed );
920 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
922 while ( pNode && pNode->m_pParent.load( memory_model::memory_order_relaxed ) ) {
923 int nCond = estimate_node_condition( pNode );
924 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
927 if ( nCond != unlink_required && nCond != rebalance_required )
928 pNode = fix_height( pNode );
930 node_type * pParent = pNode->m_pParent.load( memory_model::memry_order_relaxed );
931 assert( pParent != nullptr );
933 node_scoped_lock lp( pParent );
934 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed )
935 && pNode->m_pParent.load( memory_model::memory_order_relaxed ) == pParent )
937 node_scoped_lock ln( pNode );
938 pNode = rebalance_locked( pParent, pNode, disp );
945 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
947 // pParent and pNode shlould be locked.
948 // Returns a damaged node, or nullptr if no more rebalancing is necessary
949 assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
950 assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
951 || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
953 node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
954 node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
956 if ( (pLeft == nullptr || pRight == nullptr) && pNode->value( memory_model::memory_order_relaxed ) == nullptr ) {
957 if ( try_unlink_locked( pParent, pNode, disp ))
958 return fix_height_locked( pParent );
960 // retry needed for pNode
965 int h = pNode->height( memory_model::memory_order_relaxed );
966 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
967 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
968 int hNew = 1 + std::max( hL, hR );
969 int balance = hL - hR;
972 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
973 else if ( balance < -1 )
974 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
975 else if ( hNew != h ) {
976 pNode->height( hNew, memory_model::memory_order_relaxed );
978 // pParent is already locked
979 return fix_height_locked( pParent );
985 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
987 assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
988 assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
989 || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
991 // pParent and pNode is locked yet
992 // pNode->pLeft is too large, we will rotate-right.
993 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
996 node_scoped_lock l( pLeft );
997 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
998 return pNode; // retry for pNode
1000 int hL = pLeft->height( memory_model::memory_order_relaxed );
1002 return pNode; // retry
1004 node_type * pLRight = pLeft->m_pRight.load( memory_model::memory_order_relaxed );
1005 int hLR = pLRight ? pLRight->height( memory_model::memory_order_relaxed ) : 0;
1006 node_type * pLLeft = pLeft->m_pLeft.load( memory_model::memory_order_relaxed );
1007 int hLL = pLLeft ? pLLeft->height( memory_model::memory_order_relaxed ) : 0;
1011 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1014 assert( pLRight != nullptr );
1016 node_scoped_lock lr( pLRight );
1017 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
1018 return pNode; // retry
1020 hLR = pLRight->height( memory_model::memory_order_relaxed );
1022 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1024 node_type * pLRLeft = pLRight->m_pLeft.load( memory_model::memory_order_relaxed );
1025 int hLRL = pLRLeft ? pLRLeft->height( memory_model::memory_order_relaxed ) : 0;
1026 int balance = hLL - hLRL;
1027 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && pLeft->value(memory_model::memory_order_relaxed) == nullptr) ) {
1028 // nParent.child.left won't be damaged after a double rotation
1029 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1033 // focus on pLeft, if necessary pNode will be balanced later
1034 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1039 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1041 assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
1042 assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
1043 || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1045 // pParent and pNode is locked yet
1047 node_scoped_lock l( pRight );
1048 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1049 return pNode; // retry for pNode
1051 int hR = pRight->height( memory_model::memory_order_relaxed );
1052 if ( hL - hR >= -1 )
1053 return pNode; // retry
1055 node_type * pRLeft = pRight->m_pLeft.load( memory_model::memory_order_relaxed );
1056 int hRL = pRLeft > pRLeft->height( memory_model::memory_order_relaxed ) : 0;
1057 node_type * pRRight = pRight->m_pRight.load( memory_model::memory_order_relaxed );
1058 int hRR = pRRight ? pRRight->height( memory_model::memory_order_relaxed ) : 0;
1060 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1063 assert( pRLeft != nullptr );
1064 node_scoped_lock lrl( pRLeft );
1065 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
1066 return pNode; // retry
1068 hRL = pRLeft->height( memory_model::memory_order_relaxed );
1070 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1072 node_type * pRLRight = pRLeft->m_pRight.load( memory_model::memory_order_relaxed );
1073 int hRLR = pRLRight ? pRLRight->height( memory_model::memory_order_relaxed ) : 0;
1074 int balance = hRR - hRLR;
1075 if ( b >= -1 && b <= 1 && !((hRR == 0 || hRLR == 0) && pRight->value( memory_odel::memory_order_relaxed ) == nullptr)
1076 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1078 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1082 static void begin_change( node_type * pNode, version_type version )
1084 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1086 static void end_change( node_type * pNode, version_type version )
1088 // Clear shrinking and unlinked flags and increment version
1089 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1092 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1094 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1095 node_type * pParentLeft = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
1097 begin_change( pNode, nodeVersion );
1099 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1100 if ( pLRight != nullptr )
1101 pLRight->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1103 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1104 pNode->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1106 if ( pParentLeft == pNode )
1107 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1109 assert( pParent->m_pRight.load( memory_odel::memory_order_relaxed ) == pNode );
1110 pParent->m_pRight.store( pLeft, memory_mdel::memory_order_relaxed );
1112 pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1114 // fix up heights links
1115 int hNode = 1 + std::max( hLR, hR );
1116 pNode->height( hNode, memory_model::memory_order_relaxed );
1117 pLeft->height( 1 + std::max( hLL, hNode ), memory_model::memory_order_relaxed );
1119 end_change( pNode, nodeVersion );
1121 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1122 // parent.child). pNode is the deepest. Perform as many fixes as we can
1123 // with the locks we've got.
1125 // We've already fixed the height for pNode, but it might still be outside
1126 // our allowable balance range. In that case a simple fix_height_locked()
1128 int nodeBalance = hLR - hR;
1129 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1130 // we need another rotation at pNode
1134 // we've fixed balance and height damage for pNode, now handle
1135 // extra-routing node damage
1136 if ( (pLRight == nullptr || hR == 0) && pNode->value(memory_model::memory_order_relaxed) == nullptr ) {
1137 // we need to remove pNode and then repair
1141 // we've already fixed the height at pLeft, do we need a rotation here?
1142 int leftBalance = hLL - hNode;
1143 if ( leftBalance < -1 || leftBalance > 1 )
1146 // pLeft might also have routing node damage (if pLeft.left was null)
1147 if ( hLL == 0 && pLeft->value( memory_model::memory_order_relaxed ) == nullptr )
1150 // try to fix the parent height while we've still got the lock
1151 return fix_height_locked( pParent );
1154 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1156 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1157 node_type * pParentLeft = pParent->m_pLeft.load( memory_odel::memory_order_relaxed );
1159 begin_change( pNode, nodeVersion );
1161 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1162 pNode->m_pRight.store( pRLeft, memory_nodel::memory_order_relaxed );
1163 if ( pRLeft != nullptr )
1164 pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1166 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1167 pNode->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1169 if ( pParentLeft == pNode )
1170 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
1172 assert( pParent->m_pRight.load( memory_odel::memory_order_relaxed ) == pNode );
1173 pParent->m_pRight.store( pRight, memory_model::memory_model_relaxed );
1175 pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1178 int hNode = 1 + std::max( hL, hRL );
1179 pNode->height( hNode, memory_model::memory_order_relaxed );
1180 pRight->height( 1 + std::max( hNode, hRR ), memory_model::memory_order_relaxed );
1182 end_change( pNode, nodeVersion );
1184 int nodeBalance = hRL - hL;
1185 if ( nodeBalance < -1 || nodeBalance > 1 )
1188 if ( (pRLeft == nullptr || hL == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
1191 int rightBalance = hRR - hNode;
1192 if ( rightBalance < -1 || rightBalance > 1 )
1195 if ( hRR == 0 && pRight->value( memory_model::memory_order_relaxed ) == nullptr )
1198 return fix_height_locked( pParent );
1201 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1203 typename node_type::value_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1204 typename node_type::value_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
1206 node_type * pPL = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
1207 node_type * pLRL = pLRight->m_pLeft.load( memory_model::memory_order_relaxed );
1208 node_type * pLRR = pLRight->m_pRight.load( memory_model::memory_order_relaxed );
1209 int hLRR = pLRR ? pLRR->hight( memory_model::memory_order_relaxed ) : 0;
1211 begin_change( pNode, nodeVersion );
1212 begin_change( pLeft, leftVersion );
1214 // fix up pNode links, careful about the order!
1215 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
1216 if ( pLRR != nullptr )
1217 pLRR->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1219 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
1220 if ( pLRL != nullptr )
1221 pLRL->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1223 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1224 pLeft->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1225 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1226 pNode->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1229 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1231 assert( Parent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1232 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
1234 pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1237 int hNode = 1 + std::max( hLRR, hR );
1238 pNode->height( hNode, memory_model::memory_order_relaxed );
1239 int hLeft = 1 + std::max( hLL, hLRL );
1240 pLeft->height( hLeft, memory_model::memory_order_relaxed );
1241 pLRight->height( 1 + std::max( hLeft, hNode ), memory_model::memory_order_relaxed );
1243 end_change( pNode, nodeVersion );
1244 end_change( pLeft, leftVersion );
1246 // caller should have performed only a single rotation if pLeft was going
1247 // to end up damaged
1248 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1249 assert( !((hLL == 0 || pLRL == nullptr) && pLeft->value( memory_model::memory_order_relaxed ) == nullptr ));
1251 // We have damaged pParent, pLR (now parent.child), and pNode (now
1252 // parent.child.right). pNode is the deepest. Perform as many fixes as we
1253 // can with the locks we've got.
1255 // We've already fixed the height for pNode, but it might still be outside
1256 // our allowable balance range. In that case a simple fix_height_locked()
1258 int nodeBalance = hLRR - hR;
1259 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1260 // we need another rotation at pNode
1264 // pNode might also be damaged by being an unnecessary routing node
1265 if ( (pLRR == nullptr || hR == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr ) {
1266 // repair involves splicing out pNode and maybe more rotations
1270 // we've already fixed the height at pLRight, do we need a rotation here?
1271 final int balanceLR = hLeft - hNode;
1272 if ( balanceLR < -1 || balanceLR > 1 )
1275 // try to fix the parent height while we've still got the lock
1276 return fix_height_locked( pParent );
1279 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
1281 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1282 version_type rightVersion = pRight->version( memory_model::memory_order_relaxed );
1284 node_type * pPL = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
1285 node_type * pRLL = pRLeft->m_pLeft.load( memory_model::memory_order_relaxed );
1286 node_type * pRLR = pRLeft->m_pRight.load( memory_model::memory_order_relaxed );
1287 int hRLL = pRLL ? pRLL->height( memory_model::memory_order_relaxed ) : 0;
1289 begin_change( pNode, nodeVersion );
1290 begin_change( pRight, ghtVersion );
1292 // fix up pNode links, careful about the order!
1293 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
1294 if ( pRLL != nullptr )
1295 pRLL->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1297 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
1298 if ( pRLR != nullptr )
1299 pRLR->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1301 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1302 pRight->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1303 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1304 pNode->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1307 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
1309 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1310 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1312 pRLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1315 int hNode = 1 + std::max( hL, hRLL );
1316 pNode->height( hNode, memory_model::memory_order_relaxed );
1317 int hRight = 1 + std::max( hRLR, hRR );
1318 pRight->height( hRight, memory_model::memory_order_relaxed );
1319 pRLeft->height( 1 + std::max( hNode, hRight ), memory_model::memory_order_relaxed );
1321 end_change( pNode, nodeVersion );
1322 end_change( pRight, rightVersion );
1324 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
1326 int nodeBalance = hRLL - hL;
1327 if ( nodeBalance < -1 || nodeBalance > 1 )
1329 if ( (pRLL == nullptr || hL == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
1332 int balRL = hRight - hNode;
1333 if ( balRL < -1 || balRL > 1 )
1336 return fix_height_locked( pParent );
1341 }} // namespace cds::container
1343 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H