3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <cds/container/details/bronson_avltree_base.h>
7 #include <cds/urcu/details/check_deadlock.h>
8 #include <cds/details/binary_functor_wrapper.h>
10 namespace cds { namespace container {
12 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
13 /** @ingroup cds_nonintrusive_map
14 @ingroup cds_nonintrusive_tree
15 @headerfile cds/container/bronson_avltree_map_rcu.h
17 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
18 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
19 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
20 the disposer functor provided by \p Traits template parameter.
22 <b>Template arguments</b>:
23 - \p RCU - one of \ref cds_urcu_gc "RCU type"
25 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
27 - \p Traits - tree traits, default is \p bronson_avltree::traits
28 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
29 instead of \p Traits template argument.
31 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
32 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
38 # ifdef CDS_DOXYGEN_INVOKED
39 typename Traits = bronson_avltree::traits
44 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
47 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
48 typedef Key key_type; ///< type of a key stored in the map
49 typedef T * mapped_type; ///< type of value stored in the map
50 typedef Traits traits; ///< Traits template parameter
52 # ifdef CDS_DOXYGEN_INVOKED
53 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
55 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
57 typedef typename traits::item_counter item_counter; ///< Item counting policy
58 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
59 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
60 typedef typename traits::stat stat; ///< internal statistics
61 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
62 typedef typename traits::back_off back_off; ///< Back-off strategy
63 typedef typename traits::disposer disposer; ///< Value disposer
64 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
66 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
67 static bool const c_bRelaxedInsert = traits::relaxed_insert;
71 typedef typename sync_monitor::template node_injection< bronson_avltree::node< key_type, mapped_type >> node_type;
72 typedef typename node_type::version_type version_type;
74 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
75 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
77 enum class find_result
84 enum class update_flags
93 result_inserted = allow_insert,
94 result_updated = allow_update,
95 result_removed = allow_remove
100 nothing_required = -3,
101 rebalance_required = -2,
105 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
110 template <typename K>
111 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
113 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
116 static void free_node( node_type * pNode )
118 // Free node without disposer
119 cxx_allocator().Delete( pNode );
125 node_type * m_pRetiredList; ///< head of retired list
129 : m_pRetiredList( nullptr )
137 void dispose( node_type * pNode )
139 mapped_type * pVal = pNode->value( memory_model::memory_order_relaxed );
141 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
145 pNode->m_pNextRemoved = m_pRetiredList;
146 m_pRetiredList = pNode;
150 struct internal_disposer
152 void operator()( node_type * p ) const
160 assert( !gc::is_locked() );
162 for ( node_type * p = m_pRetiredList; p; ) {
163 node_type * pNext = p->m_pNextRemoved;
164 // Value already disposed
165 gc::template retire_ptr<internal_disposer>( p );
175 typename node_type::base_class m_Root;
177 item_counter m_ItemCounter;
178 mutable sync_monitor m_Monitor;
183 /// Creates empty map
185 : m_pRoot( static_cast<node_type *>(&m_Root) )
196 The \p key_type should be constructible from a value of type \p K.
198 RCU \p synchronize() can be called. RCU should not be locked.
200 Returns \p true if inserting successful, \p false otherwise.
202 template <typename K>
203 bool insert( K const& key, mapped_type * pVal )
205 return do_update(key, key_comparator(),
206 [pVal]( node_type * pNode ) -> mapped_type*
208 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
211 update_flags::allow_insert
212 ) == update_flags::result_inserted;
215 /// Updates the value for \p key
217 The operation performs inserting or updating the value for \p key with lock-free manner.
218 If \p bInsert is \p false, only updating of existing node is possible.
220 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
221 then the new node created from \p key is inserted into the map; note that in this case the \ref key_type should be
222 constructible from type \p K.
223 Otherwise, the value is changed to \p pVal.
225 RCU \p synchronize() method can be called. RCU should not be locked.
227 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
228 \p second is \p true if new node has been added or \p false if the node with \p key
229 already is in the tree.
231 template <typename K, typename Func>
232 std::pair<bool, bool> update( K const& key, mapped_type * pVal, bool bInsert = true )
234 int result = do_update( key, key_comparator(),
235 [pVal]( node_type * ) -> mapped_type*
239 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
241 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
244 /// Delete \p key from the map
246 RCU \p synchronize() method can be called. RCU should not be locked.
248 Return \p true if \p key is found and deleted, \p false otherwise
250 template <typename K>
251 bool erase( K const& key )
253 return do_update( key, key_comparator(), []( mapped_type& ) {}, update_flags::allow_remove ) == update_flags::result_removed;
256 /// Deletes the item from the map using \p pred predicate for searching
258 The function is an analog of \p erase(K const&)
259 but \p pred is used for key comparing.
260 \p Less functor has the interface like \p std::less.
261 \p Less must imply the same element order as the comparator used for building the map.
263 template <typename K, typename Less>
264 bool erase_with( K const& key, Less pred )
269 cds::details::predicate_wrapper<key_type, Less, cds::details::trivial_accessor >(),
270 []( mapped_type& ) {},
271 update_flags::allow_remove
272 ) == update_flags::result_removed;
275 /// Delete \p key from the map
277 The function searches an item with key \p key, calls \p f functor
278 and deletes the item. If \p key is not found, the functor is not called.
280 The functor \p Func interface:
283 void operator()(mapped_type& item) { ... }
287 RCU \p synchronize method can be called. RCU should not be locked.
289 Return \p true if key is found and deleted, \p false otherwise
291 template <typename K, typename Func>
292 bool erase( K const& key, Func f )
297 [&f]( mapped_type& val ) { f( val ); },
298 update_flags::allow_remove
299 ) == update_flags::result_removed;
302 /// Deletes the item from the map using \p pred predicate for searching
304 The function is an analog of \p erase(K const&, Func)
305 but \p pred is used for key comparing.
306 \p Less functor has the interface like \p std::less.
307 \p Less must imply the same element order as the comparator used for building the map.
309 template <typename K, typename Less, typename Func>
310 bool erase_with( K const& key, Less pred, Func f )
315 cds::details::predicate_wrapper<key_type, Less, cds::details::trivial_accessor >(),
316 [&f]( mapped_type& val ) { f( val ); },
317 update_flags::allow_remove
318 ) == update_flags::result_removed;
321 /// Extracts an item with minimal key from the map
323 Returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the leftmost item.
324 If the set is empty, returns empty \p exempt_ptr.
326 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
327 It means that the function gets leftmost leaf of the tree and tries to unlink it.
328 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
329 So, the function returns the item with minimum key at the moment of tree traversing.
331 RCU \p synchronize method can be called. RCU should NOT be locked.
332 The function does not free the item.
333 The deallocator will be implicitly invoked when the returned object is destroyed or when
334 its \p release() member function is called.
336 exempt_ptr extract_min()
341 /// Extracts an item with maximal key from the map
343 Returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the rightmost item.
344 If the set is empty, returns empty \p exempt_ptr.
346 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
347 It means that the function gets rightmost leaf of the tree and tries to unlink it.
348 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
349 So, the function returns the item with maximum key at the moment of tree traversing.
351 RCU \p synchronize method can be called. RCU should NOT be locked.
352 The function does not free the item.
353 The deallocator will be implicitly invoked when the returned object is destroyed or when
354 its \p release() is called.
355 @note Before reusing \p result object you should call its \p release() method.
357 exempt_ptr extract_max()
362 /// Extracts an item from the map
364 The function searches an item with key equal to \p key in the tree,
365 unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to an item found.
366 If \p key is not found the function returns an empty \p exempt_ptr.
368 RCU \p synchronize method can be called. RCU should NOT be locked.
369 The function does not destroy the item found.
370 The dealloctor will be implicitly invoked when the returned object is destroyed or when
371 its \p release() member function is called.
373 template <typename Q>
374 exempt_ptr extract( Q const& key )
379 /// Extracts an item from the map using \p pred for searching
381 The function is an analog of \p extract(exempt_ptr&, Q const&)
382 but \p pred is used for key compare.
383 \p Less has the interface like \p std::less and should meet \ref cds_container_EllenBinTreeSet_rcu_less
384 "predicate requirements".
385 \p pred must imply the same element order as the comparator used for building the map.
387 template <typename Q, typename Less>
388 exempt_ptr extract_with( Q const& val, Less pred )
394 /// Find the key \p key
396 The function searches the item with key equal to \p key and calls the functor \p f for item found.
397 The interface of \p Func functor is:
400 void operator()( mapped_type& item );
403 where \p item is the item found.
404 The functor is called under node-level lock.
406 The function applies RCU lock internally.
408 The function returns \p true if \p key is found, \p false otherwise.
410 template <typename K, typename Func>
411 bool find( K const& key, Func f )
413 return do_find( key, key_comparator(), [&f]( sync_monitor& monitor, node_type * pNode ) -> bool {
414 assert( pNode != nullptr );
415 node_scoped_lock l( monitor, *pNode );
416 mapped_type * pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
425 /// Finds the key \p val using \p pred predicate for searching
427 The function is an analog of \p find(K const&, Func)
428 but \p pred is used for key comparing.
429 \p Less functor has the interface like \p std::less.
430 \p Less must imply the same element order as the comparator used for building the map.
432 template <typename K, typename Less, typename Func>
433 bool find_with( K const& key, Less pred, Func f )
436 return do_find( key, opt::details::make_comparator_from_less<Less>(),
437 [&f]( sync_monitor& monitor, node_type * pNode ) -> bool {
438 assert( pNode != nullptr );
439 node_scoped_lock l( monitor, *pNode );
440 mapped_type * pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
450 /// Find the key \p key
452 The function searches the item with key equal to \p key
453 and returns \p true if it is found, and \p false otherwise.
455 The function applies RCU lock internally.
457 template <typename K>
458 bool find( K const& key )
460 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
463 /// Finds the key \p val using \p pred predicate for searching
465 The function is an analog of \p find(K const&)
466 but \p pred is used for key comparing.
467 \p Less functor has the interface like \p std::less.
468 \p Less must imply the same element order as the comparator used for building the map.
470 template <typename K, typename Less>
471 bool find_with( K const& key, Less pred )
474 return do_find( key, opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
477 /// Finds \p key and return the item found
479 The function searches the item with key equal to \p key and returns the pointer to item found.
480 If \p key is not found it returns \p nullptr.
482 RCU should be locked before call the function.
483 Returned pointer is valid while RCU is locked.
485 template <typename Q>
486 mapped_type * get( Q const& key ) const
491 /// Finds \p key with \p pred predicate and return the item found
493 The function is an analog of \p get(Q const&)
494 but \p pred is used for comparing the keys.
496 \p Less functor has the semantics like \p std::less but should take arguments of type \p key_type
497 and \p Q in any order.
498 \p pred must imply the same element order as the comparator used for building the map.
500 template <typename Q, typename Less>
501 mapped_type * get_with( Q const& key, Less pred ) const
507 /// Clears the tree (thread safe, not atomic)
509 The function unlink all items from the tree.
510 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
514 assert( set.empty() );
516 the assertion could be raised.
518 For each node the \ref disposer will be called after unlinking.
520 RCU \p synchronize method can be called. RCU should not be locked.
524 for ( exempt_ptr ep = extract_min(); !ep.empty(); ep = extract_min() )
528 /// Clears the tree (not thread safe)
530 This function is not thread safe and may be called only when no other thread deals with the tree.
531 The function is used in the tree destructor.
538 /// Checks if the map is empty
541 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
544 /// Returns item count in the map
546 Only leaf nodes containing user data are counted.
548 The value returned depends on item counter type provided by \p Traits template parameter.
549 If it is \p atomicity::empty_item_counter this function always returns 0.
551 The function is not suitable for checking the tree emptiness, use \p empty()
552 member function for this purpose.
556 return m_ItemCounter;
559 /// Returns const reference to internal statistics
560 stat const& statistics() const
565 /// Checks internal consistency (not atomic, not thread-safe)
567 The debugging function to check internal consistency of the tree.
569 bool check_consistency() const
576 template <typename Q, typename Compare, typename Func>
577 bool do_find( Q& key, Compare cmp, Func f ) const
582 result = try_find( key, cmp, f, m_pRoot, 1, 0 );
584 assert( result != find_result::retry );
585 return result == find_result::found;
588 template <typename K, typename Compare, typename Func>
589 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
591 check_deadlock_policy::check();
593 rcu_disposer removed_list;
596 return try_udate_root( key, cmp, nFlags, funcUpdate, removed_list );
604 template <typename Q, typename Compare, typename Func>
605 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
607 assert( gc::is_locked() );
611 node_type * pChild = pNode->child( nDir ).load( memory_model::memory_order_relaxed );
613 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
614 m_stat.onFindRetry();
615 return find_result::retry;
618 m_stat.onFindFailed();
619 return find_result::not_found;
622 int nCmp = cmp( key, pChild->m_key );
624 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
626 node_scoped_lock l( m_Monitor, pChild );
627 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
628 if ( f( *pChild ) ) {
629 m_stat.onFindSuccess();
630 return find_result::found;
635 m_stat.onFindFailed();
636 return find_result::not_found;
639 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
640 if ( nChildVersion & node_type::shrinking ) {
641 m_stat.onFindWaitShrinking();
642 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
644 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
645 m_stat.onFindRetry();
646 return find_result::retry;
649 else if ( nChildVersion != node_type::unlinked ) {
651 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
652 m_stat.onFindRetry();
653 return find_result::retry;
656 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
657 if ( found != find_result::retry )
663 template <typename K, typename Compare, typename Func>
664 int try_update_root( K const* key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
666 assert( gc::is_locked() );
670 // get right child of root
671 node_type * pChild = m_Root.child( 1, memory_model::memory_order_acquire );
673 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
674 if ( nChildVersion & node_type::shrinking ) {
675 m_stat.onUpdateRootWaitShrinking();
676 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
677 result = update_flags::retry;
679 else if ( pChild == m_Root.child( 1, memory_model::memory_order_acquire ) ) {
680 result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
685 if ( nFlags & update_flags::allow_insert ) {
686 // insert into tree as right child of the root
688 node_scoped_lock l( m_Monitor, *m_pRoot );
690 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
691 mapped_type * pVal = funcUpdate( pNew );
692 assert( pVal != nullptr );
693 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
695 m_pRoot->child( pNew, 1, memory_model::memory_order_relaxed );
696 m_pRoot->height( 2, memory_model::memory_order_relaxed );
700 m_stat.onInsertSuccess();
701 return update_flags::result_inserted;
704 return update_flags::failed;
706 } while ( result == update_flags::retry );
710 template <typename K, typename Compare, typename Func>
711 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
713 assert( gc::is_locked() );
714 assert( nVersion != node_type::unlinked );
716 int nCmp = cmp( key, pNode->m_key );
718 if ( nFlags & update_flags::allow_update ) {
719 return try_update_node( funcUpdate, pNode );
721 if ( nFlags & update_flags::allow_remove ) {
722 return try_remove_node( pParent, pNode, nVersion, funcUpdate, disp );
724 return update_flags::failed;
729 node_type * pChild = pNode->child( nCmp ).load( memory_model::memory_order_relaxed );
730 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
731 return update_flags::retry;
733 if ( pChild == nullptr ) {
735 if ( nFlags & update_flags::allow_insert )
736 result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
738 result = update_flags::failed;
742 result = update_flags::retry;
743 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
744 if ( nChildVersion & node_type::shrinking ) {
745 m_stat.onUpdateWaitShrinking();
746 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
749 else if ( pChild == pNode->child( nCmp ).load( memory_model::memory_order_relaxed ) ) {
750 // this second read is important, because it is protected by nChildVersion
752 // validate the read that our caller took to get to node
753 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
754 m_stat.onUpdateRetry();
755 return update_flags::retry;
758 // At this point we know that the traversal our parent took to get to node is still valid.
759 // The recursive implementation will validate the traversal from node to
760 // child, so just prior to the node nVersion validation both traversals were definitely okay.
761 // This means that we are no longer vulnerable to node shrinks, and we don't need
762 // to validate node version any more.
763 result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion, disp );
766 } while ( result == update_flags::retry );
770 template <typename K, typename Func>
771 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
775 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
776 mapped_type * pVal = funcUpdate( pNew );
777 assert( pVal != nullptr );
778 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
781 if ( c_bRelaxedInsert ) {
782 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
783 || pNode->child( nDir ).load( memory_model::memory_order_relaxed ) != nullptr )
785 m_stat.onInsertRetry();
786 return update_flags::retry;
789 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
792 node_type * pDamaged;
794 assert( pNode != nullptr );
795 node_scoped_lock l( m_Monitor, *pNode );
797 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion
798 || pNode->child( nDir ).load( memory_model::memory_order_relaxed ) != nullptr )
800 if ( c_RelaxedInsert ) {
802 m_stat.onRelaxedInsertFailed();
805 m_stat.onInsertRetry();
806 return update_flags::retry;
809 if ( !c_bRelaxedInsert )
810 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
812 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
813 pDamaged = fix_height_locked( pNode );
815 m_stat.onInsertSuccess();
817 fix_height_and_rebalance( pDamaged, disp );
819 return update_flags::result_inserted;
822 template <typename Func>
823 int try_update_node( Func funcUpdate, node_type * pNode )
827 assert( pNode != nullptr );
828 node_scoped_lock l( m_Monitor, *pNode );
830 if ( pNode->is_unlinked( memory_model::memory_order_relaxed )) {
831 m_stat.onUpdateUnlinked();
832 return update_flags::retry;
835 pOld = pNode->value( memory_model::memory_order_relaxed );
836 mapped_type * pVal = funcUpdate( *pNode );
837 assert( pVal != nullptr );
838 pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed ));
843 m_stat.onDisposeValue();
846 m_stat.onUpdateSuccess();
847 return update_flags::result_updated;
850 template <typename Func>
851 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
853 assert( pParent != nullptr );
854 assert( pNode != nullptr );
856 if ( !pNode->is_valued( atomics::memory_order_relaxed ) )
857 return update_flags::failed;
859 int result = update_flags::retry;
861 if ( pNode->child( -1, atomics::memory_order_relaxed ) == nullptr
862 || pNode->child( 1, atomics::memory_order_relaxed ) == nullptr )
864 node_type * pDamaged;
867 node_scoped_lock lp( m_Monitor, *pParent );
868 if ( pParent->is_unlinked( atomics::memory_order_relaxed ) || pNode->m_pParent.load( atomics::memory_order_relaxed ) != pParent )
869 return update_flags::retry;
872 node_scoped_lock ln( m_Monitor, *pNode );
873 pOld = pNode->value( atomics::memory_order_relaxed );
874 if ( pNode->version( atomics::memory_order_relaxed ) == nVersion
876 && try_unlink_locked( pParent, pNode, disp ) )
878 pNode->m_pValue.store( nullptr, atomics::memory_order_relaxed );
881 return update_flags::retry;
883 pDamaged = fix_height_locked( pParent );
888 m_stat.onDisposeValue();
890 fix_height_and_rebalance( pDamaged, disp );
891 return upfate_flags::result_removed;
895 node_scoped_lock ln( m_Monitor, *pNode );
896 mapped_type * pOld = pNode->value( atomics::memory_order_relaxed );
897 if ( pNode->version( atomics::memory_order_relaxed ) == nVersion && pOld ) {
898 pNode->m_pValue.store( nullptr, atomics::memory_order_relaxed );
899 result = upfate_flags::result_removed;
903 if ( result == upfate_flags::result_removed ) {
906 m_stat.onDisposeValue();
909 return update_flags::retry;
912 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
914 // pParent and pNode must be locked
915 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
917 node_type * pParentLeft = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
918 node_type * pParentRight = pParent->m_pRight.load( memory_model::memory_order_relaxed );
919 if ( pNode != pParentLeft && pNode != pParentRight ) {
920 // node is no longer a child of parent
924 assert( !pNode->is_unlinked());
925 assert( pParent == pNode->m_pParent.load(memory_model::memory_order_relaxed));
927 node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
928 node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
929 if ( pLeft != nullptr && pRight != nullptr ) {
930 // splicing is no longer possible
933 node_type * pSplice = pLeft ? pLeft : pRight;
935 if ( pParentLeft == pNode )
936 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
938 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
941 pSplice->pParent.store( pParent, memory_model::memory_order_release );
943 // Mark the node as unlinked
944 pNode->version( node_type::unlinked, memory_model::memory_order_release );
945 disp.dispose( pNode );
951 private: // rotations
953 int estimate_node_condition( node_type * pNode )
955 node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
956 node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
958 if ( (pLeft == nullptr || pRight == nullptr) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
959 return unlink_required;
961 int h = pNode->height( memory_model::memory_order_relaxed );
962 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
963 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
965 int hNew = 1 + std::max( hL, hR );
966 int nBalance = hL - hR;
968 if ( nBalance < -1 || nBalance > 1 )
969 return rebalance_required;
971 return h != hNew ? hNew : nothing_required;
974 node_type * fix_height( node_type * pNode )
976 assert( pNode != nullptr );
977 node_scoped_lock l( m_Monitor, *pNode );
978 return fix_height_locked( pNode );
981 node_type * fix_height_locked( node_type * pNode )
983 // pNode must be locked!!!
984 int h = estimate_node_condition( pNode );
986 case rebalance_required:
987 case unlink_required:
989 case nothing_required:
992 pNode->height( h, memory_model::memory_order_relaxed );
993 return pNode->m_pParent.load( memory_model::memory_order_relaxed );
997 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
999 while ( pNode && pNode->m_pParent.load( memory_model::memory_order_relaxed ) ) {
1000 int nCond = estimate_node_condition( pNode );
1001 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
1004 if ( nCond != unlink_required && nCond != rebalance_required )
1005 pNode = fix_height( pNode );
1007 node_type * pParent = pNode->m_pParent.load( memory_model::memry_order_relaxed );
1008 assert( pParent != nullptr );
1010 node_scoped_lock lp( m_Monitor, *pParent );
1011 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed )
1012 && pNode->m_pParent.load( memory_model::memory_order_relaxed ) == pParent )
1014 node_scoped_lock ln( m_Monitor, *pNode );
1015 pNode = rebalance_locked( pParent, pNode, disp );
1022 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1024 // pParent and pNode shlould be locked.
1025 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1026 assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
1027 assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
1028 || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1030 node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
1031 node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
1033 if ( (pLeft == nullptr || pRight == nullptr) && pNode->value( memory_model::memory_order_relaxed ) == nullptr ) {
1034 if ( try_unlink_locked( pParent, pNode, disp ))
1035 return fix_height_locked( pParent );
1037 // retry needed for pNode
1042 int h = pNode->height( memory_model::memory_order_relaxed );
1043 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1044 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1045 int hNew = 1 + std::max( hL, hR );
1046 int balance = hL - hR;
1049 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1050 else if ( balance < -1 )
1051 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1052 else if ( hNew != h ) {
1053 pNode->height( hNew, memory_model::memory_order_relaxed );
1055 // pParent is already locked
1056 return fix_height_locked( pParent );
1062 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1064 assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
1065 assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
1066 || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1068 // pParent and pNode is locked yet
1069 // pNode->pLeft is too large, we will rotate-right.
1070 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1073 assert( pLeft != nullptr );
1074 node_scoped_lock l( m_Monitor, *pLeft );
1075 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1076 return pNode; // retry for pNode
1078 int hL = pLeft->height( memory_model::memory_order_relaxed );
1080 return pNode; // retry
1082 node_type * pLRight = pLeft->m_pRight.load( memory_model::memory_order_relaxed );
1083 int hLR = pLRight ? pLRight->height( memory_model::memory_order_relaxed ) : 0;
1084 node_type * pLLeft = pLeft->m_pLeft.load( memory_model::memory_order_relaxed );
1085 int hLL = pLLeft ? pLLeft->height( memory_model::memory_order_relaxed ) : 0;
1089 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1092 assert( pLRight != nullptr );
1094 node_scoped_lock lr( m_Monitor, *pLRight );
1095 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
1096 return pNode; // retry
1098 hLR = pLRight->height( memory_model::memory_order_relaxed );
1100 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1102 node_type * pLRLeft = pLRight->m_pLeft.load( memory_model::memory_order_relaxed );
1103 int hLRL = pLRLeft ? pLRLeft->height( memory_model::memory_order_relaxed ) : 0;
1104 int balance = hLL - hLRL;
1105 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && pLeft->value(memory_model::memory_order_relaxed) == nullptr) ) {
1106 // nParent.child.left won't be damaged after a double rotation
1107 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1111 // focus on pLeft, if necessary pNode will be balanced later
1112 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1117 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1119 assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
1120 assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
1121 || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1123 // pParent and pNode is locked yet
1125 assert( pRight != nullptr );
1126 node_scoped_lock l( m_Monitor, *pRight );
1127 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1128 return pNode; // retry for pNode
1130 int hR = pRight->height( memory_model::memory_order_relaxed );
1131 if ( hL - hR >= -1 )
1132 return pNode; // retry
1134 node_type * pRLeft = pRight->m_pLeft.load( memory_model::memory_order_relaxed );
1135 int hRL = pRLeft > pRLeft->height( memory_model::memory_order_relaxed ) : 0;
1136 node_type * pRRight = pRight->m_pRight.load( memory_model::memory_order_relaxed );
1137 int hRR = pRRight ? pRRight->height( memory_model::memory_order_relaxed ) : 0;
1139 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1142 assert( pRLeft != nullptr );
1143 node_scoped_lock lrl( m_Monitor, *pRLeft );
1144 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
1145 return pNode; // retry
1147 hRL = pRLeft->height( memory_model::memory_order_relaxed );
1149 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1151 node_type * pRLRight = pRLeft->m_pRight.load( memory_model::memory_order_relaxed );
1152 int hRLR = pRLRight ? pRLRight->height( memory_model::memory_order_relaxed ) : 0;
1153 int balance = hRR - hRLR;
1154 if ( b >= -1 && b <= 1 && !((hRR == 0 || hRLR == 0) && pRight->value( memory_odel::memory_order_relaxed ) == nullptr)
1155 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1157 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1161 static void begin_change( node_type * pNode, version_type version )
1163 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1165 static void end_change( node_type * pNode, version_type version )
1167 // Clear shrinking and unlinked flags and increment version
1168 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1171 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1173 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1174 node_type * pParentLeft = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
1176 begin_change( pNode, nodeVersion );
1178 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1179 if ( pLRight != nullptr )
1180 pLRight->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1182 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1183 pNode->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1185 if ( pParentLeft == pNode )
1186 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1188 assert( pParent->m_pRight.load( memory_odel::memory_order_relaxed ) == pNode );
1189 pParent->m_pRight.store( pLeft, memory_mdel::memory_order_relaxed );
1191 pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1193 // fix up heights links
1194 int hNode = 1 + std::max( hLR, hR );
1195 pNode->height( hNode, memory_model::memory_order_relaxed );
1196 pLeft->height( 1 + std::max( hLL, hNode ), memory_model::memory_order_relaxed );
1198 end_change( pNode, nodeVersion );
1200 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1201 // parent.child). pNode is the deepest. Perform as many fixes as we can
1202 // with the locks we've got.
1204 // We've already fixed the height for pNode, but it might still be outside
1205 // our allowable balance range. In that case a simple fix_height_locked()
1207 int nodeBalance = hLR - hR;
1208 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1209 // we need another rotation at pNode
1213 // we've fixed balance and height damage for pNode, now handle
1214 // extra-routing node damage
1215 if ( (pLRight == nullptr || hR == 0) && pNode->value(memory_model::memory_order_relaxed) == nullptr ) {
1216 // we need to remove pNode and then repair
1220 // we've already fixed the height at pLeft, do we need a rotation here?
1221 int leftBalance = hLL - hNode;
1222 if ( leftBalance < -1 || leftBalance > 1 )
1225 // pLeft might also have routing node damage (if pLeft.left was null)
1226 if ( hLL == 0 && pLeft->value( memory_model::memory_order_relaxed ) == nullptr )
1229 // try to fix the parent height while we've still got the lock
1230 return fix_height_locked( pParent );
1233 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1235 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1236 node_type * pParentLeft = pParent->m_pLeft.load( memory_odel::memory_order_relaxed );
1238 begin_change( pNode, nodeVersion );
1240 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1241 pNode->m_pRight.store( pRLeft, memory_nodel::memory_order_relaxed );
1242 if ( pRLeft != nullptr )
1243 pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1245 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1246 pNode->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1248 if ( pParentLeft == pNode )
1249 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
1251 assert( pParent->m_pRight.load( memory_odel::memory_order_relaxed ) == pNode );
1252 pParent->m_pRight.store( pRight, memory_model::memory_model_relaxed );
1254 pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1257 int hNode = 1 + std::max( hL, hRL );
1258 pNode->height( hNode, memory_model::memory_order_relaxed );
1259 pRight->height( 1 + std::max( hNode, hRR ), memory_model::memory_order_relaxed );
1261 end_change( pNode, nodeVersion );
1263 int nodeBalance = hRL - hL;
1264 if ( nodeBalance < -1 || nodeBalance > 1 )
1267 if ( (pRLeft == nullptr || hL == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
1270 int rightBalance = hRR - hNode;
1271 if ( rightBalance < -1 || rightBalance > 1 )
1274 if ( hRR == 0 && pRight->value( memory_model::memory_order_relaxed ) == nullptr )
1277 return fix_height_locked( pParent );
1280 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1282 typename node_type::value_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1283 typename node_type::value_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
1285 node_type * pPL = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
1286 node_type * pLRL = pLRight->m_pLeft.load( memory_model::memory_order_relaxed );
1287 node_type * pLRR = pLRight->m_pRight.load( memory_model::memory_order_relaxed );
1288 int hLRR = pLRR ? pLRR->hight( memory_model::memory_order_relaxed ) : 0;
1290 begin_change( pNode, nodeVersion );
1291 begin_change( pLeft, leftVersion );
1293 // fix up pNode links, careful about the order!
1294 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
1295 if ( pLRR != nullptr )
1296 pLRR->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1298 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
1299 if ( pLRL != nullptr )
1300 pLRL->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1302 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1303 pLeft->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1304 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1305 pNode->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1308 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1310 assert( Parent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1311 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
1313 pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1316 int hNode = 1 + std::max( hLRR, hR );
1317 pNode->height( hNode, memory_model::memory_order_relaxed );
1318 int hLeft = 1 + std::max( hLL, hLRL );
1319 pLeft->height( hLeft, memory_model::memory_order_relaxed );
1320 pLRight->height( 1 + std::max( hLeft, hNode ), memory_model::memory_order_relaxed );
1322 end_change( pNode, nodeVersion );
1323 end_change( pLeft, leftVersion );
1325 // caller should have performed only a single rotation if pLeft was going
1326 // to end up damaged
1327 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1328 assert( !((hLL == 0 || pLRL == nullptr) && pLeft->value( memory_model::memory_order_relaxed ) == nullptr ));
1330 // We have damaged pParent, pLR (now parent.child), and pNode (now
1331 // parent.child.right). pNode is the deepest. Perform as many fixes as we
1332 // can with the locks we've got.
1334 // We've already fixed the height for pNode, but it might still be outside
1335 // our allowable balance range. In that case a simple fix_height_locked()
1337 int nodeBalance = hLRR - hR;
1338 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1339 // we need another rotation at pNode
1343 // pNode might also be damaged by being an unnecessary routing node
1344 if ( (pLRR == nullptr || hR == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr ) {
1345 // repair involves splicing out pNode and maybe more rotations
1349 // we've already fixed the height at pLRight, do we need a rotation here?
1350 final int balanceLR = hLeft - hNode;
1351 if ( balanceLR < -1 || balanceLR > 1 )
1354 // try to fix the parent height while we've still got the lock
1355 return fix_height_locked( pParent );
1358 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
1360 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1361 version_type rightVersion = pRight->version( memory_model::memory_order_relaxed );
1363 node_type * pPL = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
1364 node_type * pRLL = pRLeft->m_pLeft.load( memory_model::memory_order_relaxed );
1365 node_type * pRLR = pRLeft->m_pRight.load( memory_model::memory_order_relaxed );
1366 int hRLL = pRLL ? pRLL->height( memory_model::memory_order_relaxed ) : 0;
1368 begin_change( pNode, nodeVersion );
1369 begin_change( pRight, ghtVersion );
1371 // fix up pNode links, careful about the order!
1372 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
1373 if ( pRLL != nullptr )
1374 pRLL->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1376 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
1377 if ( pRLR != nullptr )
1378 pRLR->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1380 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1381 pRight->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1382 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1383 pNode->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1386 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
1388 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1389 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1391 pRLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1394 int hNode = 1 + std::max( hL, hRLL );
1395 pNode->height( hNode, memory_model::memory_order_relaxed );
1396 int hRight = 1 + std::max( hRLR, hRR );
1397 pRight->height( hRight, memory_model::memory_order_relaxed );
1398 pRLeft->height( 1 + std::max( hNode, hRight ), memory_model::memory_order_relaxed );
1400 end_change( pNode, nodeVersion );
1401 end_change( pRight, rightVersion );
1403 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
1405 int nodeBalance = hRLL - hL;
1406 if ( nodeBalance < -1 || nodeBalance > 1 )
1408 if ( (pRLL == nullptr || hL == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
1411 int balRL = hRight - hNode;
1412 if ( balRL < -1 || balRL > 1 )
1415 return fix_height_locked( pParent );
1420 }} // namespace cds::container
1422 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H