3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <memory> // unique_ptr
7 #include <cds/container/details/bronson_avltree_base.h>
8 #include <cds/urcu/details/check_deadlock.h>
9 #include <cds/details/binary_functor_wrapper.h>
12 namespace cds { namespace container {
14 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
15 /** @ingroup cds_nonintrusive_map
16 @ingroup cds_nonintrusive_tree
17 @headerfile cds/container/bronson_avltree_map_rcu.h
19 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
20 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
21 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
22 the disposer functor provided by \p Traits template parameter.
24 <b>Template arguments</b>:
25 - \p RCU - one of \ref cds_urcu_gc "RCU type"
27 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
29 - \p Traits - tree traits, default is \p bronson_avltree::traits
30 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
31 instead of \p Traits template argument.
33 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
34 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
40 # ifdef CDS_DOXYGEN_INVOKED
41 typename Traits = bronson_avltree::traits
46 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
49 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
50 typedef Key key_type; ///< type of a key stored in the map
51 typedef T * mapped_type; ///< type of value stored in the map
52 typedef Traits traits; ///< Traits template parameter
54 # ifdef CDS_DOXYGEN_INVOKED
55 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
57 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
59 typedef typename traits::item_counter item_counter; ///< Item counting policy
60 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
61 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
62 typedef typename traits::stat stat; ///< internal statistics
63 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
64 typedef typename traits::back_off back_off; ///< Back-off strategy
65 typedef typename traits::disposer disposer; ///< Value disposer
66 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
68 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
69 static bool const c_bRelaxedInsert = traits::relaxed_insert;
71 /// Returned pointer to \p mapped_type of extracted node
72 typedef std::unique_ptr< mapped_type, disposer > unique_ptr;
76 typedef typename sync_monitor::template node_injection< bronson_avltree::node< key_type, mapped_type >> node_type;
77 typedef typename node_type::version_type version_type;
79 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
80 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
82 enum class find_result
89 enum class update_flags
98 result_inserted = allow_insert,
99 result_updated = allow_update,
100 result_removed = allow_remove
105 nothing_required = -3,
106 rebalance_required = -2,
110 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
115 template <typename K>
116 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
118 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
121 static void free_node( node_type * pNode )
123 // Free node without disposer
124 cxx_allocator().Delete( pNode );
130 node_type * m_pRetiredList; ///< head of retired list
134 : m_pRetiredList( nullptr )
142 void dispose( node_type * pNode )
144 mapped_type * pVal = pNode->value( memory_model::memory_order_relaxed );
146 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
150 pNode->m_pNextRemoved = m_pRetiredList;
151 m_pRetiredList = pNode;
155 struct internal_disposer
157 void operator()( node_type * p ) const
165 assert( !gc::is_locked() );
167 for ( node_type * p = m_pRetiredList; p; ) {
168 node_type * pNext = p->m_pNextRemoved;
169 // Value already disposed
170 gc::template retire_ptr<internal_disposer>( p );
180 typename node_type::base_class m_Root;
182 item_counter m_ItemCounter;
183 mutable sync_monitor m_Monitor;
188 /// Creates empty map
190 : m_pRoot( static_cast<node_type *>(&m_Root) )
201 The \p key_type should be constructible from a value of type \p K.
203 RCU \p synchronize() can be called. RCU should not be locked.
205 Returns \p true if inserting successful, \p false otherwise.
207 template <typename K>
208 bool insert( K const& key, mapped_type * pVal )
210 return do_update(key, key_comparator(),
211 [pVal]( node_type * pNode ) -> mapped_type*
213 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
216 update_flags::allow_insert
217 ) == update_flags::result_inserted;
220 /// Updates the value for \p key
222 The operation performs inserting or updating the value for \p key with lock-free manner.
223 If \p bInsert is \p false, only updating of existing node is possible.
225 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
226 then the new node created from \p key is inserted into the map; note that in this case the \ref key_type should be
227 constructible from type \p K.
228 Otherwise, the value is changed to \p pVal.
230 RCU \p synchronize() method can be called. RCU should not be locked.
232 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
233 \p second is \p true if new node has been added or \p false if the node with \p key
234 already is in the tree.
236 template <typename K, typename Func>
237 std::pair<bool, bool> update( K const& key, mapped_type * pVal, bool bInsert = true )
239 int result = do_update( key, key_comparator(),
240 [pVal]( node_type * ) -> mapped_type*
244 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
246 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
249 /// Delete \p key from the map
251 RCU \p synchronize() method can be called. RCU should not be locked.
253 Return \p true if \p key is found and deleted, \p false otherwise
255 template <typename K>
256 bool erase( K const& key )
261 []( mapped_type * pVal ) { disposer()(pVal); },
262 update_flags::allow_remove
263 ) == update_flags::result_removed;
266 /// Deletes the item from the map using \p pred predicate for searching
268 The function is an analog of \p erase(K const&)
269 but \p pred is used for key comparing.
270 \p Less functor has the interface like \p std::less.
271 \p Less must imply the same element order as the comparator used for building the map.
273 template <typename K, typename Less>
274 bool erase_with( K const& key, Less pred )
279 cds::details::predicate_wrapper<key_type, Less, cds::details::trivial_accessor >(),
280 []( mapped_type * pVal ) { disposer()(pVal); },
281 update_flags::allow_remove
282 ) == update_flags::result_removed;
285 /// Delete \p key from the map
287 The function searches an item with key \p key, calls \p f functor
288 and deletes the item. If \p key is not found, the functor is not called.
290 The functor \p Func interface:
293 void operator()(mapped_type& item) { ... }
297 RCU \p synchronize method can be called. RCU should not be locked.
299 Return \p true if key is found and deleted, \p false otherwise
301 template <typename K, typename Func>
302 bool erase( K const& key, Func f )
307 [&f]( mapped_type * pVal ) { f( *pVal ); disposer()(pVal); },
308 update_flags::allow_remove
309 ) == update_flags::result_removed;
312 /// Deletes the item from the map using \p pred predicate for searching
314 The function is an analog of \p erase(K const&, Func)
315 but \p pred is used for key comparing.
316 \p Less functor has the interface like \p std::less.
317 \p Less must imply the same element order as the comparator used for building the map.
319 template <typename K, typename Less, typename Func>
320 bool erase_with( K const& key, Less pred, Func f )
325 cds::details::predicate_wrapper<key_type, Less, cds::details::trivial_accessor >(),
326 [&f]( mapped_type * pVal ) { f( *pVal ); disposer()(pVal); },
327 update_flags::allow_remove
328 ) == update_flags::result_removed;
331 /// Extracts an item with minimal key from the map
333 Returns \p std::unique_ptr to the leftmost item.
334 If the set is empty, returns empty \p std::unique_ptr.
336 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
337 It means that the function gets leftmost leaf of the tree and tries to unlink it.
338 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
339 So, the function returns the item with minimum key at the moment of tree traversing.
341 RCU \p synchronize method can be called. RCU should NOT be locked.
342 The function does not free the item.
343 The deallocator will be implicitly invoked when the returned object is destroyed or when
344 its \p reset(nullptr) member function is called.
346 unique_ptr extract_min()
351 /// Extracts an item with maximal key from the map
353 Returns \p std::unique_ptr pointer to the rightmost item.
354 If the set is empty, returns empty \p std::unique_ptr.
356 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
357 It means that the function gets rightmost leaf of the tree and tries to unlink it.
358 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
359 So, the function returns the item with maximum key at the moment of tree traversing.
361 RCU \p synchronize method can be called. RCU should NOT be locked.
362 The function does not free the item.
363 The deallocator will be implicitly invoked when the returned object is destroyed or when
364 its \p reset(nullptr) is called.
365 @note Before reusing \p result object you should call its \p release() method.
367 unique_ptr extract_max()
372 /// Extracts an item from the map
374 The function searches an item with key equal to \p key in the tree,
375 unlinks it, and returns \p std::unique_ptr pointer to a value found.
376 If \p key is not found the function returns an empty \p std::unique_ptr.
378 RCU \p synchronize method can be called. RCU should NOT be locked.
379 The function does not destroy the value found.
380 The disposer will be implicitly invoked when the returned object is destroyed or when
381 its \p reset(nullptr) member function is called.
383 template <typename Q>
384 unique_ptr extract( Q const& key )
386 unique_ptr pExtracted;
391 [&pExtracted]( mapped_type * pVal ) { pExtracted.reset( pVal ); },
392 update_flags::allow_remove
397 /// Extracts an item from the map using \p pred for searching
399 The function is an analog of \p extract(Q const&)
400 but \p pred is used for key compare.
401 \p Less has the interface like \p std::less.
402 \p pred must imply the same element order as the comparator used for building the tree.
404 template <typename Q, typename Less>
405 unique_ptr extract_with( Q const& key, Less pred )
408 unique_ptr pExtracted;
412 cds::details::predicate_wrapper<key_type, Less, cds::details::trivial_accessor >(),
413 [&pExtracted]( mapped_type * pVal ) { pExtracted.reset( pVal ); },
414 update_flags::allow_remove
419 /// Find the key \p key
421 The function searches the item with key equal to \p key and calls the functor \p f for item found.
422 The interface of \p Func functor is:
425 void operator()( mapped_type& item );
428 where \p item is the item found.
429 The functor is called under node-level lock.
431 The function applies RCU lock internally.
433 The function returns \p true if \p key is found, \p false otherwise.
435 template <typename K, typename Func>
436 bool find( K const& key, Func f )
438 return do_find( key, key_comparator(), [&f]( sync_monitor& monitor, node_type * pNode ) -> bool {
439 assert( pNode != nullptr );
440 node_scoped_lock l( monitor, *pNode );
441 mapped_type * pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
450 /// Finds the key \p val using \p pred predicate for searching
452 The function is an analog of \p find(K const&, Func)
453 but \p pred is used for key comparing.
454 \p Less functor has the interface like \p std::less.
455 \p Less must imply the same element order as the comparator used for building the map.
457 template <typename K, typename Less, typename Func>
458 bool find_with( K const& key, Less pred, Func f )
461 return do_find( key, opt::details::make_comparator_from_less<Less>(),
462 [&f]( sync_monitor& monitor, node_type * pNode ) -> bool {
463 assert( pNode != nullptr );
464 node_scoped_lock l( monitor, *pNode );
465 mapped_type * pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
475 /// Find the key \p key
477 The function searches the item with key equal to \p key
478 and returns \p true if it is found, and \p false otherwise.
480 The function applies RCU lock internally.
482 template <typename K>
483 bool find( K const& key )
485 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
488 /// Finds the key \p val using \p pred predicate for searching
490 The function is an analog of \p find(K const&)
491 but \p pred is used for key comparing.
492 \p Less functor has the interface like \p std::less.
493 \p Less must imply the same element order as the comparator used for building the map.
495 template <typename K, typename Less>
496 bool find_with( K const& key, Less pred )
499 return do_find( key, opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
502 /// Clears the tree (thread safe, not atomic)
504 The function unlink all items from the tree.
505 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
509 assert( set.empty() );
511 the assertion could be raised.
513 For each node the \ref disposer will be called after unlinking.
515 RCU \p synchronize method can be called. RCU should not be locked.
520 unique_ptr ep( extract_min() );
526 /// Clears the tree (not thread safe)
528 This function is not thread safe and may be called only when no other thread deals with the tree.
529 The function is used in the tree destructor.
536 /// Checks if the map is empty
539 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
542 /// Returns item count in the map
544 Only leaf nodes containing user data are counted.
546 The value returned depends on item counter type provided by \p Traits template parameter.
547 If it is \p atomicity::empty_item_counter this function always returns 0.
549 The function is not suitable for checking the tree emptiness, use \p empty()
550 member function for this purpose.
554 return m_ItemCounter;
557 /// Returns const reference to internal statistics
558 stat const& statistics() const
563 /// Checks internal consistency (not atomic, not thread-safe)
565 The debugging function to check internal consistency of the tree.
567 bool check_consistency() const
575 template <typename Q, typename Compare, typename Func>
576 bool do_find( Q& key, Compare cmp, Func f ) const
581 result = try_find( key, cmp, f, m_pRoot, 1, 0 );
583 assert( result != find_result::retry );
584 return result == find_result::found;
587 template <typename K, typename Compare, typename Func>
588 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
590 check_deadlock_policy::check();
592 rcu_disposer removed_list;
595 return try_udate_root( key, cmp, nFlags, funcUpdate, removed_list );
603 template <typename Q, typename Compare, typename Func>
604 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
606 assert( gc::is_locked() );
610 node_type * pChild = pNode->child( nDir ).load( memory_model::memory_order_relaxed );
612 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
613 m_stat.onFindRetry();
614 return find_result::retry;
617 m_stat.onFindFailed();
618 return find_result::not_found;
621 int nCmp = cmp( key, pChild->m_key );
623 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
625 node_scoped_lock l( m_Monitor, pChild );
626 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
627 if ( f( *pChild ) ) {
628 m_stat.onFindSuccess();
629 return find_result::found;
634 m_stat.onFindFailed();
635 return find_result::not_found;
638 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
639 if ( nChildVersion & node_type::shrinking ) {
640 m_stat.onFindWaitShrinking();
641 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
643 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
644 m_stat.onFindRetry();
645 return find_result::retry;
648 else if ( nChildVersion != node_type::unlinked ) {
650 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
651 m_stat.onFindRetry();
652 return find_result::retry;
655 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
656 if ( found != find_result::retry )
662 template <typename K, typename Compare, typename Func>
663 int try_update_root( K const* key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
665 assert( gc::is_locked() );
669 // get right child of root
670 node_type * pChild = m_Root.child( 1, memory_model::memory_order_acquire );
672 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
673 if ( nChildVersion & node_type::shrinking ) {
674 m_stat.onUpdateRootWaitShrinking();
675 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
676 result = update_flags::retry;
678 else if ( pChild == m_Root.child( 1, memory_model::memory_order_acquire ) ) {
679 result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
684 if ( nFlags & update_flags::allow_insert ) {
685 // insert into tree as right child of the root
687 node_scoped_lock l( m_Monitor, *m_pRoot );
689 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
690 mapped_type * pVal = funcUpdate( pNew );
691 assert( pVal != nullptr );
692 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
694 m_pRoot->child( pNew, 1, memory_model::memory_order_relaxed );
695 m_pRoot->height( 2, memory_model::memory_order_relaxed );
699 m_stat.onInsertSuccess();
700 return update_flags::result_inserted;
703 return update_flags::failed;
705 } while ( result == update_flags::retry );
709 template <typename K, typename Compare, typename Func>
710 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
712 assert( gc::is_locked() );
713 assert( nVersion != node_type::unlinked );
715 int nCmp = cmp( key, pNode->m_key );
717 if ( nFlags & update_flags::allow_update ) {
718 return try_update_node( funcUpdate, pNode );
720 if ( nFlags & update_flags::allow_remove ) {
721 return try_remove_node( pParent, pNode, nVersion, funcUpdate, disp );
723 return update_flags::failed;
728 node_type * pChild = pNode->child( nCmp ).load( memory_model::memory_order_relaxed );
729 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
730 return update_flags::retry;
732 if ( pChild == nullptr ) {
734 if ( nFlags & update_flags::allow_insert )
735 result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
737 result = update_flags::failed;
741 result = update_flags::retry;
742 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
743 if ( nChildVersion & node_type::shrinking ) {
744 m_stat.onUpdateWaitShrinking();
745 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
748 else if ( pChild == pNode->child( nCmp ).load( memory_model::memory_order_relaxed ) ) {
749 // this second read is important, because it is protected by nChildVersion
751 // validate the read that our caller took to get to node
752 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
753 m_stat.onUpdateRetry();
754 return update_flags::retry;
757 // At this point we know that the traversal our parent took to get to node is still valid.
758 // The recursive implementation will validate the traversal from node to
759 // child, so just prior to the node nVersion validation both traversals were definitely okay.
760 // This means that we are no longer vulnerable to node shrinks, and we don't need
761 // to validate node version any more.
762 result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion, disp );
765 } while ( result == update_flags::retry );
769 template <typename K, typename Func>
770 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
774 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
775 mapped_type * pVal = funcUpdate( pNew );
776 assert( pVal != nullptr );
777 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
780 if ( c_bRelaxedInsert ) {
781 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
782 || pNode->child( nDir ).load( memory_model::memory_order_relaxed ) != nullptr )
784 m_stat.onInsertRetry();
785 return update_flags::retry;
788 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
791 node_type * pDamaged;
793 assert( pNode != nullptr );
794 node_scoped_lock l( m_Monitor, *pNode );
796 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion
797 || pNode->child( nDir ).load( memory_model::memory_order_relaxed ) != nullptr )
799 if ( c_RelaxedInsert ) {
801 m_stat.onRelaxedInsertFailed();
804 m_stat.onInsertRetry();
805 return update_flags::retry;
808 if ( !c_bRelaxedInsert )
809 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
811 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
812 pDamaged = fix_height_locked( pNode );
814 m_stat.onInsertSuccess();
816 fix_height_and_rebalance( pDamaged, disp );
818 return update_flags::result_inserted;
821 template <typename Func>
822 int try_update_node( Func funcUpdate, node_type * pNode )
826 assert( pNode != nullptr );
827 node_scoped_lock l( m_Monitor, *pNode );
829 if ( pNode->is_unlinked( memory_model::memory_order_relaxed )) {
830 m_stat.onUpdateUnlinked();
831 return update_flags::retry;
834 pOld = pNode->value( memory_model::memory_order_relaxed );
835 mapped_type * pVal = funcUpdate( *pNode );
836 assert( pVal != nullptr );
837 pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed ));
842 m_stat.onDisposeValue();
845 m_stat.onUpdateSuccess();
846 return update_flags::result_updated;
849 template <typename Func>
850 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
852 assert( pParent != nullptr );
853 assert( pNode != nullptr );
855 if ( !pNode->is_valued( atomics::memory_order_relaxed ) )
856 return update_flags::failed;
858 if ( pNode->child( -1, atomics::memory_order_relaxed ) == nullptr
859 || pNode->child( 1, atomics::memory_order_relaxed ) == nullptr )
861 node_type * pDamaged;
864 node_scoped_lock lp( m_Monitor, *pParent );
865 if ( pParent->is_unlinked( atomics::memory_order_relaxed ) || pNode->m_pParent.load( atomics::memory_order_relaxed ) != pParent )
866 return update_flags::retry;
869 node_scoped_lock ln( m_Monitor, *pNode );
870 pOld = pNode->value( atomics::memory_order_relaxed );
871 if ( pNode->version( atomics::memory_order_relaxed ) == nVersion
873 && try_unlink_locked( pParent, pNode, disp ) )
875 pNode->m_pValue.store( nullptr, atomics::memory_order_relaxed );
878 return update_flags::retry;
880 pDamaged = fix_height_locked( pParent );
883 func( pOld ); // calls pOld disposer inside
884 m_stat.onDisposeValue();
886 fix_height_and_rebalance( pDamaged, disp );
887 return upfate_flags::result_removed;
890 int result = update_flags::retry;
892 node_scoped_lock ln( m_Monitor, *pNode );
893 mapped_type * pOld = pNode->value( atomics::memory_order_relaxed );
894 if ( pNode->version( atomics::memory_order_relaxed ) == nVersion && pOld ) {
895 pNode->m_pValue.store( nullptr, atomics::memory_order_relaxed );
896 result = upfate_flags::result_removed;
900 if ( result == upfate_flags::result_removed ) {
901 func( *pOld ); // calls pOld disposer inside
902 m_stat.onDisposeValue();
909 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
911 // pParent and pNode must be locked
912 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
914 node_type * pParentLeft = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
915 node_type * pParentRight = pParent->m_pRight.load( memory_model::memory_order_relaxed );
916 if ( pNode != pParentLeft && pNode != pParentRight ) {
917 // node is no longer a child of parent
921 assert( !pNode->is_unlinked());
922 assert( pParent == pNode->m_pParent.load(memory_model::memory_order_relaxed));
924 node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
925 node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
926 if ( pLeft != nullptr && pRight != nullptr ) {
927 // splicing is no longer possible
930 node_type * pSplice = pLeft ? pLeft : pRight;
932 if ( pParentLeft == pNode )
933 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
935 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
938 pSplice->pParent.store( pParent, memory_model::memory_order_release );
940 // Mark the node as unlinked
941 pNode->version( node_type::unlinked, memory_model::memory_order_release );
942 disp.dispose( pNode );
949 private: // rotations
951 int estimate_node_condition( node_type * pNode )
953 node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
954 node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
956 if ( (pLeft == nullptr || pRight == nullptr) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
957 return unlink_required;
959 int h = pNode->height( memory_model::memory_order_relaxed );
960 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
961 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
963 int hNew = 1 + std::max( hL, hR );
964 int nBalance = hL - hR;
966 if ( nBalance < -1 || nBalance > 1 )
967 return rebalance_required;
969 return h != hNew ? hNew : nothing_required;
972 node_type * fix_height( node_type * pNode )
974 assert( pNode != nullptr );
975 node_scoped_lock l( m_Monitor, *pNode );
976 return fix_height_locked( pNode );
979 node_type * fix_height_locked( node_type * pNode )
981 // pNode must be locked!!!
982 int h = estimate_node_condition( pNode );
984 case rebalance_required:
985 case unlink_required:
987 case nothing_required:
990 pNode->height( h, memory_model::memory_order_relaxed );
991 return pNode->m_pParent.load( memory_model::memory_order_relaxed );
995 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
997 while ( pNode && pNode->m_pParent.load( memory_model::memory_order_relaxed ) ) {
998 int nCond = estimate_node_condition( pNode );
999 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
1002 if ( nCond != unlink_required && nCond != rebalance_required )
1003 pNode = fix_height( pNode );
1005 node_type * pParent = pNode->m_pParent.load( memory_model::memry_order_relaxed );
1006 assert( pParent != nullptr );
1008 node_scoped_lock lp( m_Monitor, *pParent );
1009 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed )
1010 && pNode->m_pParent.load( memory_model::memory_order_relaxed ) == pParent )
1012 node_scoped_lock ln( m_Monitor, *pNode );
1013 pNode = rebalance_locked( pParent, pNode, disp );
1020 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1022 // pParent and pNode should be locked.
1023 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1024 assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
1025 assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
1026 || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1028 node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
1029 node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
1031 if ( (pLeft == nullptr || pRight == nullptr) && pNode->value( memory_model::memory_order_relaxed ) == nullptr ) {
1032 if ( try_unlink_locked( pParent, pNode, disp ))
1033 return fix_height_locked( pParent );
1035 // retry needed for pNode
1040 int h = pNode->height( memory_model::memory_order_relaxed );
1041 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1042 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1043 int hNew = 1 + std::max( hL, hR );
1044 int balance = hL - hR;
1047 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1048 else if ( balance < -1 )
1049 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1050 else if ( hNew != h ) {
1051 pNode->height( hNew, memory_model::memory_order_relaxed );
1053 // pParent is already locked
1054 return fix_height_locked( pParent );
1060 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1062 assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
1063 assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
1064 || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1066 // pParent and pNode is locked yet
1067 // pNode->pLeft is too large, we will rotate-right.
1068 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1071 assert( pLeft != nullptr );
1072 node_scoped_lock l( m_Monitor, *pLeft );
1073 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1074 return pNode; // retry for pNode
1076 int hL = pLeft->height( memory_model::memory_order_relaxed );
1078 return pNode; // retry
1080 node_type * pLRight = pLeft->m_pRight.load( memory_model::memory_order_relaxed );
1081 int hLR = pLRight ? pLRight->height( memory_model::memory_order_relaxed ) : 0;
1082 node_type * pLLeft = pLeft->m_pLeft.load( memory_model::memory_order_relaxed );
1083 int hLL = pLLeft ? pLLeft->height( memory_model::memory_order_relaxed ) : 0;
1087 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1090 assert( pLRight != nullptr );
1092 node_scoped_lock lr( m_Monitor, *pLRight );
1093 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
1094 return pNode; // retry
1096 hLR = pLRight->height( memory_model::memory_order_relaxed );
1098 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1100 node_type * pLRLeft = pLRight->m_pLeft.load( memory_model::memory_order_relaxed );
1101 int hLRL = pLRLeft ? pLRLeft->height( memory_model::memory_order_relaxed ) : 0;
1102 int balance = hLL - hLRL;
1103 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && pLeft->value(memory_model::memory_order_relaxed) == nullptr) ) {
1104 // nParent.child.left won't be damaged after a double rotation
1105 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1109 // focus on pLeft, if necessary pNode will be balanced later
1110 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1115 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1117 assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
1118 assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
1119 || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1121 // pParent and pNode is locked yet
1123 assert( pRight != nullptr );
1124 node_scoped_lock l( m_Monitor, *pRight );
1125 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1126 return pNode; // retry for pNode
1128 int hR = pRight->height( memory_model::memory_order_relaxed );
1129 if ( hL - hR >= -1 )
1130 return pNode; // retry
1132 node_type * pRLeft = pRight->m_pLeft.load( memory_model::memory_order_relaxed );
1133 int hRL = pRLeft > pRLeft->height( memory_model::memory_order_relaxed ) : 0;
1134 node_type * pRRight = pRight->m_pRight.load( memory_model::memory_order_relaxed );
1135 int hRR = pRRight ? pRRight->height( memory_model::memory_order_relaxed ) : 0;
1137 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1140 assert( pRLeft != nullptr );
1141 node_scoped_lock lrl( m_Monitor, *pRLeft );
1142 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
1143 return pNode; // retry
1145 hRL = pRLeft->height( memory_model::memory_order_relaxed );
1147 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1149 node_type * pRLRight = pRLeft->m_pRight.load( memory_model::memory_order_relaxed );
1150 int hRLR = pRLRight ? pRLRight->height( memory_model::memory_order_relaxed ) : 0;
1151 int balance = hRR - hRLR;
1152 if ( b >= -1 && b <= 1 && !((hRR == 0 || hRLR == 0) && pRight->value( memory_odel::memory_order_relaxed ) == nullptr)
1153 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1155 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1159 static void begin_change( node_type * pNode, version_type version )
1161 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1163 static void end_change( node_type * pNode, version_type version )
1165 // Clear shrinking and unlinked flags and increment version
1166 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1169 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1171 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1172 node_type * pParentLeft = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
1174 begin_change( pNode, nodeVersion );
1176 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1177 if ( pLRight != nullptr )
1178 pLRight->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1180 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1181 pNode->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1183 if ( pParentLeft == pNode )
1184 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1186 assert( pParent->m_pRight.load( memory_odel::memory_order_relaxed ) == pNode );
1187 pParent->m_pRight.store( pLeft, memory_mdel::memory_order_relaxed );
1189 pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1191 // fix up heights links
1192 int hNode = 1 + std::max( hLR, hR );
1193 pNode->height( hNode, memory_model::memory_order_relaxed );
1194 pLeft->height( 1 + std::max( hLL, hNode ), memory_model::memory_order_relaxed );
1196 end_change( pNode, nodeVersion );
1198 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1199 // parent.child). pNode is the deepest. Perform as many fixes as we can
1200 // with the locks we've got.
1202 // We've already fixed the height for pNode, but it might still be outside
1203 // our allowable balance range. In that case a simple fix_height_locked()
1205 int nodeBalance = hLR - hR;
1206 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1207 // we need another rotation at pNode
1211 // we've fixed balance and height damage for pNode, now handle
1212 // extra-routing node damage
1213 if ( (pLRight == nullptr || hR == 0) && pNode->value(memory_model::memory_order_relaxed) == nullptr ) {
1214 // we need to remove pNode and then repair
1218 // we've already fixed the height at pLeft, do we need a rotation here?
1219 int leftBalance = hLL - hNode;
1220 if ( leftBalance < -1 || leftBalance > 1 )
1223 // pLeft might also have routing node damage (if pLeft.left was null)
1224 if ( hLL == 0 && pLeft->value( memory_model::memory_order_relaxed ) == nullptr )
1227 // try to fix the parent height while we've still got the lock
1228 return fix_height_locked( pParent );
1231 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1233 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1234 node_type * pParentLeft = pParent->m_pLeft.load( memory_odel::memory_order_relaxed );
1236 begin_change( pNode, nodeVersion );
1238 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1239 pNode->m_pRight.store( pRLeft, memory_nodel::memory_order_relaxed );
1240 if ( pRLeft != nullptr )
1241 pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1243 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1244 pNode->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1246 if ( pParentLeft == pNode )
1247 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
1249 assert( pParent->m_pRight.load( memory_odel::memory_order_relaxed ) == pNode );
1250 pParent->m_pRight.store( pRight, memory_model::memory_model_relaxed );
1252 pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1255 int hNode = 1 + std::max( hL, hRL );
1256 pNode->height( hNode, memory_model::memory_order_relaxed );
1257 pRight->height( 1 + std::max( hNode, hRR ), memory_model::memory_order_relaxed );
1259 end_change( pNode, nodeVersion );
1261 int nodeBalance = hRL - hL;
1262 if ( nodeBalance < -1 || nodeBalance > 1 )
1265 if ( (pRLeft == nullptr || hL == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
1268 int rightBalance = hRR - hNode;
1269 if ( rightBalance < -1 || rightBalance > 1 )
1272 if ( hRR == 0 && pRight->value( memory_model::memory_order_relaxed ) == nullptr )
1275 return fix_height_locked( pParent );
1278 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1280 typename node_type::value_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1281 typename node_type::value_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
1283 node_type * pPL = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
1284 node_type * pLRL = pLRight->m_pLeft.load( memory_model::memory_order_relaxed );
1285 node_type * pLRR = pLRight->m_pRight.load( memory_model::memory_order_relaxed );
1286 int hLRR = pLRR ? pLRR->hight( memory_model::memory_order_relaxed ) : 0;
1288 begin_change( pNode, nodeVersion );
1289 begin_change( pLeft, leftVersion );
1291 // fix up pNode links, careful about the order!
1292 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
1293 if ( pLRR != nullptr )
1294 pLRR->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1296 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
1297 if ( pLRL != nullptr )
1298 pLRL->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1300 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1301 pLeft->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1302 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1303 pNode->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1306 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1308 assert( Parent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1309 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
1311 pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1314 int hNode = 1 + std::max( hLRR, hR );
1315 pNode->height( hNode, memory_model::memory_order_relaxed );
1316 int hLeft = 1 + std::max( hLL, hLRL );
1317 pLeft->height( hLeft, memory_model::memory_order_relaxed );
1318 pLRight->height( 1 + std::max( hLeft, hNode ), memory_model::memory_order_relaxed );
1320 end_change( pNode, nodeVersion );
1321 end_change( pLeft, leftVersion );
1323 // caller should have performed only a single rotation if pLeft was going
1324 // to end up damaged
1325 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1326 assert( !((hLL == 0 || pLRL == nullptr) && pLeft->value( memory_model::memory_order_relaxed ) == nullptr ));
1328 // We have damaged pParent, pLR (now parent.child), and pNode (now
1329 // parent.child.right). pNode is the deepest. Perform as many fixes as we
1330 // can with the locks we've got.
1332 // We've already fixed the height for pNode, but it might still be outside
1333 // our allowable balance range. In that case a simple fix_height_locked()
1335 int nodeBalance = hLRR - hR;
1336 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1337 // we need another rotation at pNode
1341 // pNode might also be damaged by being an unnecessary routing node
1342 if ( (pLRR == nullptr || hR == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr ) {
1343 // repair involves splicing out pNode and maybe more rotations
1347 // we've already fixed the height at pLRight, do we need a rotation here?
1348 final int balanceLR = hLeft - hNode;
1349 if ( balanceLR < -1 || balanceLR > 1 )
1352 // try to fix the parent height while we've still got the lock
1353 return fix_height_locked( pParent );
1356 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
1358 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1359 version_type rightVersion = pRight->version( memory_model::memory_order_relaxed );
1361 node_type * pPL = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
1362 node_type * pRLL = pRLeft->m_pLeft.load( memory_model::memory_order_relaxed );
1363 node_type * pRLR = pRLeft->m_pRight.load( memory_model::memory_order_relaxed );
1364 int hRLL = pRLL ? pRLL->height( memory_model::memory_order_relaxed ) : 0;
1366 begin_change( pNode, nodeVersion );
1367 begin_change( pRight, ghtVersion );
1369 // fix up pNode links, careful about the order!
1370 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
1371 if ( pRLL != nullptr )
1372 pRLL->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1374 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
1375 if ( pRLR != nullptr )
1376 pRLR->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1378 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1379 pRight->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1380 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1381 pNode->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1384 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
1386 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1387 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1389 pRLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1392 int hNode = 1 + std::max( hL, hRLL );
1393 pNode->height( hNode, memory_model::memory_order_relaxed );
1394 int hRight = 1 + std::max( hRLR, hRR );
1395 pRight->height( hRight, memory_model::memory_order_relaxed );
1396 pRLeft->height( 1 + std::max( hNode, hRight ), memory_model::memory_order_relaxed );
1398 end_change( pNode, nodeVersion );
1399 end_change( pRight, rightVersion );
1401 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
1403 int nodeBalance = hRLL - hL;
1404 if ( nodeBalance < -1 || nodeBalance > 1 )
1406 if ( (pRLL == nullptr || hL == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
1409 int balRL = hRight - hNode;
1410 if ( balRL < -1 || balRL > 1 )
1413 return fix_height_locked( pParent );
1418 }} // namespace cds::container
1420 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H