3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <type_traits> // is_base_of
7 #include <cds/container/details/bronson_avltree_base.h>
8 #include <cds/urcu/details/check_deadlock.h>
9 #include <cds/urcu/exempt_ptr.h>
11 namespace cds { namespace container {
13 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
14 /** @ingroup cds_nonintrusive_map
15 @ingroup cds_nonintrusive_tree
16 @headerfile cds/container/bronson_avltree_map_rcu.h
17 @anchor cds_container_BronsonAVLTreeMap_rcu_ptr
19 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
20 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
21 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
22 the disposer functor provided by \p Traits template parameter.
24 <b>Template arguments</b>:
25 - \p RCU - one of \ref cds_urcu_gc "RCU type"
27 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
29 - \p Traits - tree traits, default is \p bronson_avltree::traits
30 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
31 instead of \p Traits template argument.
33 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
34 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
40 # ifdef CDS_DOXYGEN_INVOKED
41 typename Traits = bronson_avltree::traits
46 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
49 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
50 typedef Key key_type; ///< type of a key stored in the map
51 typedef T * mapped_type; ///< type of value stored in the map
52 typedef Traits traits; ///< Traits template parameter
54 # ifdef CDS_DOXYGEN_INVOKED
55 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
57 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
59 typedef typename traits::item_counter item_counter; ///< Item counting policy
60 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
61 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
62 typedef typename traits::stat stat; ///< internal statistics
63 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
64 typedef typename traits::back_off back_off; ///< Back-off strategy
65 typedef typename traits::disposer disposer; ///< Value disposer
66 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
68 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
69 static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
71 /// Group of \p extract_xxx functions does not require external locking
72 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false;
74 # ifdef CDS_DOXYGEN_INVOKED
75 /// Returned pointer to \p mapped_type of extracted node
76 typedef cds::urcu::exempt_ptr< gc, T, T, disposer, void > exempt_ptr;
78 typedef cds::urcu::exempt_ptr< gc,
79 typename std::remove_pointer<mapped_type>::type,
80 typename std::remove_pointer<mapped_type>::type,
86 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
90 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
91 typedef typename node_type::version_type version_type;
93 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
94 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
96 enum class find_result
113 result_inserted = allow_insert,
114 result_updated = allow_update,
121 nothing_required = -3,
122 rebalance_required = -2,
131 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
136 template <typename K>
137 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
139 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
142 static void free_node( node_type * pNode )
144 // Free node without disposer
145 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
146 assert( pNode->m_SyncMonitorInjection.check_free());
147 cxx_allocator().Delete( pNode );
150 static void free_value( mapped_type pVal )
155 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
157 return pNode->child( nDir, order );
160 static node_type * parent( node_type * pNode, atomics::memory_order order )
162 return pNode->parent( order );
168 node_type * m_pRetiredList; ///< head of retired node list
169 mapped_type m_pRetiredValue; ///< value retired
173 : m_pRetiredList( nullptr )
174 , m_pRetiredValue( nullptr )
182 void dispose( node_type * pNode )
184 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
185 pNode->m_pNextRemoved = m_pRetiredList;
186 m_pRetiredList = pNode;
189 void dispose_value( mapped_type pVal )
191 assert( m_pRetiredValue == nullptr );
192 m_pRetiredValue = pVal;
196 struct internal_disposer
198 void operator()( node_type * p ) const
206 assert( !gc::is_locked() );
208 // TODO: use RCU::batch_retire
211 for ( node_type * p = m_pRetiredList; p; ) {
212 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
213 // Value already disposed
214 gc::template retire_ptr<internal_disposer>( p );
219 if ( m_pRetiredValue )
220 gc::template retire_ptr<disposer>( m_pRetiredValue );
228 typename node_type::base_class m_Root;
230 item_counter m_ItemCounter;
231 mutable sync_monitor m_Monitor;
236 /// Creates empty map
238 : m_pRoot( static_cast<node_type *>( &m_Root ))
249 The \p key_type should be constructible from a value of type \p K.
251 RCU \p synchronize() can be called. RCU should not be locked.
253 Returns \p true if inserting successful, \p false otherwise.
255 template <typename K>
256 bool insert( K const& key, mapped_type pVal )
258 return do_update(key, key_comparator(),
259 [pVal]( node_type * pNode ) -> mapped_type
261 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
265 update_flags::allow_insert
266 ) == update_flags::result_inserted;
269 /// Updates the value for \p key
271 The operation performs inserting or updating the value for \p key with lock-free manner.
272 If \p bInsert is \p false, only updating of existing node is possible.
274 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
275 then the new node created from \p key will be inserted into the map; note that in this case the \ref key_type should be
276 constructible from type \p K.
277 Otherwise, the value for \p key will be changed to \p pVal.
279 RCU \p synchronize() method can be called. RCU should not be locked.
281 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
282 \p second is \p true if new node has been added or \p false if the node with \p key
285 template <typename K>
286 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
288 int result = do_update( key, key_comparator(),
289 [pVal]( node_type * ) -> mapped_type
293 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
295 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
300 /// Delete \p key from the map
302 RCU \p synchronize() method can be called. RCU should not be locked.
304 Return \p true if \p key is found and deleted, \p false otherwise
306 template <typename K>
307 bool erase( K const& key )
312 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
316 /// Deletes the item from the map using \p pred predicate for searching
318 The function is an analog of \p erase(K const&)
319 but \p pred is used for key comparing.
320 \p Less functor has the interface like \p std::less.
321 \p Less must imply the same element order as the comparator used for building the map.
323 template <typename K, typename Less>
324 bool erase_with( K const& key, Less pred )
329 cds::opt::details::make_comparator_from_less<Less>(),
330 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
334 /// Delete \p key from the map
336 The function searches an item with key \p key, calls \p f functor
337 and deletes the item. If \p key is not found, the functor is not called.
339 The functor \p Func interface:
342 void operator()( key_type const& key, std::remove_pointer<mapped_type>::type& val) { ... }
346 RCU \p synchronize method can be called. RCU should not be locked.
348 Return \p true if key is found and deleted, \p false otherwise
350 template <typename K, typename Func>
351 bool erase( K const& key, Func f )
356 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
359 disp.dispose_value(pVal);
365 /// Deletes the item from the map using \p pred predicate for searching
367 The function is an analog of \p erase(K const&, Func)
368 but \p pred is used for key comparing.
369 \p Less functor has the interface like \p std::less.
370 \p Less must imply the same element order as the comparator used for building the map.
372 template <typename K, typename Less, typename Func>
373 bool erase_with( K const& key, Less pred, Func f )
378 cds::opt::details::make_comparator_from_less<Less>(),
379 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
382 disp.dispose_value(pVal);
388 /// Extracts a value with minimal key from the map
390 Returns \p exempt_ptr to the leftmost item.
391 If the tree is empty, returns empty \p exempt_ptr.
393 Note that the function returns only the value for minimal key.
394 To retrieve its key use \p extract_min( Func ) member function.
396 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
397 It means that the function gets leftmost leaf of the tree and tries to unlink it.
398 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
399 So, the function returns the item with minimum key at the moment of tree traversing.
401 RCU \p synchronize method can be called. RCU should NOT be locked.
402 The function does not free the item.
403 The deallocator will be implicitly invoked when the returned object is destroyed or when
404 its \p release() member function is called.
406 exempt_ptr extract_min()
408 return exempt_ptr(do_extract_min( []( key_type const& ) {}));
411 /// Extracts minimal key key and corresponding value
413 Returns \p exempt_ptr to the leftmost item.
414 If the tree is empty, returns empty \p exempt_ptr.
416 \p Func functor is used to store minimal key.
417 \p Func has the following signature:
420 void operator()( key_type const& key );
423 If the tree is empty, \p f is not called.
424 Otherwise, is it called with minimal key, the pointer to corresponding value is returned
427 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
428 It means that the function gets leftmost leaf of the tree and tries to unlink it.
429 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
430 So, the function returns the item with minimum key at the moment of tree traversing.
432 RCU \p synchronize method can be called. RCU should NOT be locked.
433 The function does not free the item.
434 The deallocator will be implicitly invoked when the returned object is destroyed or when
435 its \p release() member function is called.
437 template <typename Func>
438 exempt_ptr extract_min( Func f )
440 return exempt_ptr(do_extract_min( [&f]( key_type const& key ) { f(key); }));
443 /// Extracts minimal key key and corresponding value
445 This function is a shortcut for the following call:
448 exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } );
450 \p key_type should be copy-assignable. The copy of minimal key
451 is returned in \p min_key argument.
453 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
454 extract_min_key( key_type& min_key )
456 return exempt_ptr(do_extract_min( [&min_key]( key_type const& key ) { min_key = key; }));
459 /// Extracts a value with maximal key from the tree
461 Returns \p exempt_ptr pointer to the rightmost item.
462 If the set is empty, returns empty \p exempt_ptr.
464 Note that the function returns only the value for maximal key.
465 To retrieve its key use \p extract_max( Func ) member function.
467 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
468 It means that the function gets rightmost leaf of the tree and tries to unlink it.
469 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
470 So, the function returns the item with maximum key at the moment of tree traversing.
472 RCU \p synchronize method can be called. RCU should NOT be locked.
473 The function does not free the item.
474 The deallocator will be implicitly invoked when the returned object is destroyed or when
475 its \p release() is called.
477 exempt_ptr extract_max()
479 return exempt_ptr(do_extract_max( []( key_type const& ) {}));
482 /// Extracts the maximal key and corresponding value
484 Returns \p exempt_ptr pointer to the rightmost item.
485 If the set is empty, returns empty \p exempt_ptr.
487 \p Func functor is used to store maximal key.
488 \p Func has the following signature:
491 void operator()( key_type const& key );
494 If the tree is empty, \p f is not called.
495 Otherwise, is it called with maximal key, the pointer to corresponding value is returned
498 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
499 It means that the function gets rightmost leaf of the tree and tries to unlink it.
500 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
501 So, the function returns the item with maximum key at the moment of tree traversing.
503 RCU \p synchronize method can be called. RCU should NOT be locked.
504 The function does not free the item.
505 The deallocator will be implicitly invoked when the returned object is destroyed or when
506 its \p release() is called.
508 template <typename Func>
509 exempt_ptr extract_max( Func f )
511 return exempt_ptr(do_extract_max( [&f]( key_type const& key ) { f(key); }));
514 /// Extracts the maximal key and corresponding value
516 This function is a shortcut for the following call:
519 exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } );
521 \p key_type should be copy-assignable. The copy of maximal key
522 is returned in \p max_key argument.
524 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
525 extract_max_key( key_type& max_key )
527 return exempt_ptr(do_extract_max( [&max_key]( key_type const& key ) { max_key = key; }));
530 /// Extracts an item from the map
532 The function searches an item with key equal to \p key in the tree,
533 unlinks it, and returns \p exempt_ptr pointer to a value found.
534 If \p key is not found the function returns an empty \p exempt_ptr.
536 RCU \p synchronize method can be called. RCU should NOT be locked.
537 The function does not destroy the value found.
538 The disposer will be implicitly invoked when the returned object is destroyed or when
539 its \p release() member function is called.
541 template <typename Q>
542 exempt_ptr extract( Q const& key )
544 return exempt_ptr(do_extract( key ));
548 /// Extracts an item from the map using \p pred for searching
550 The function is an analog of \p extract(Q const&)
551 but \p pred is used for key compare.
552 \p Less has the interface like \p std::less.
553 \p pred must imply the same element order as the comparator used for building the tree.
555 template <typename Q, typename Less>
556 exempt_ptr extract_with( Q const& key, Less pred )
558 return exempt_ptr(do_extract_with( key, pred ));
561 /// Find the key \p key
563 The function searches the item with key equal to \p key and calls the functor \p f for item found.
564 The interface of \p Func functor is:
567 void operator()( key_type const& key, mapped_type& item );
570 where \p item is the item found.
571 The functor is called under node-level lock.
573 The function applies RCU lock internally.
575 The function returns \p true if \p key is found, \p false otherwise.
577 template <typename K, typename Func>
578 bool find( K const& key, Func f )
580 return do_find( key, key_comparator(),
581 [&f]( node_type * pNode ) -> bool {
582 assert( pNode != nullptr );
583 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
585 f( pNode->m_key, *pVal );
593 /// Finds the key \p val using \p pred predicate for searching
595 The function is an analog of \p find(K const&, Func)
596 but \p pred is used for key comparing.
597 \p Less functor has the interface like \p std::less.
598 \p Less must imply the same element order as the comparator used for building the map.
600 template <typename K, typename Less, typename Func>
601 bool find_with( K const& key, Less pred, Func f )
604 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
605 [&f]( node_type * pNode ) -> bool {
606 assert( pNode != nullptr );
607 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
609 f( pNode->m_key, *pVal );
617 /// Checks whether the map contains \p key
619 The function searches the item with key equal to \p key
620 and returns \p true if it is found, and \p false otherwise.
622 The function applies RCU lock internally.
624 template <typename K>
625 bool contains( K const& key )
627 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
630 /// Checks whether the map contains \p key using \p pred predicate for searching
632 The function is similar to <tt>contains( key )</tt> but \p pred is used for key comparing.
633 \p Less functor has the interface like \p std::less.
634 \p Less must imply the same element order as the comparator used for building the set.
636 template <typename K, typename Less>
637 bool contains( K const& key, Less pred )
640 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
643 /// Clears the tree (thread safe, not atomic)
645 The function unlink all items from the tree.
646 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
650 assert( set.empty() );
652 the assertion could be raised.
654 For each node the \ref disposer will be called after unlinking.
656 RCU \p synchronize method can be called. RCU should not be locked.
660 while ( extract_min() );
663 /// Clears the tree (not thread safe)
665 This function is not thread safe and may be called only when no other thread deals with the tree.
666 The function is used in the tree destructor.
670 clear(); // temp solution
674 /// Checks if the map is empty
677 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
680 /// Returns item count in the map
682 Only leaf nodes containing user data are counted.
684 The value returned depends on item counter type provided by \p Traits template parameter.
685 If it is \p atomicity::empty_item_counter this function always returns 0.
687 The function is not suitable for checking the tree emptiness, use \p empty()
688 member function for this purpose.
692 return m_ItemCounter;
695 /// Returns const reference to internal statistics
696 stat const& statistics() const
701 /// Returns reference to \p sync_monitor object
702 sync_monitor& monitor()
707 sync_monitor const& monitor() const
713 /// Checks internal consistency (not atomic, not thread-safe)
715 The debugging function to check internal consistency of the tree.
717 bool check_consistency() const
719 return check_consistency([]( size_t /*nLevel*/, size_t /*hLeft*/, size_t /*hRight*/ ){} );
722 /// Checks internal consistency (not atomic, not thread-safe)
724 The debugging function to check internal consistency of the tree.
725 The functor \p Func is called if a violation of internal tree structure
729 void operator()( size_t nLevel, size_t hLeft, size_t hRight );
733 - \p nLevel - the level where the violation is found
734 - \p hLeft - the height of left subtree
735 - \p hRight - the height of right subtree
737 The functor is called for each violation found.
739 template <typename Func>
740 bool check_consistency( Func f ) const
742 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
745 do_check_consistency( pChild, 1, f, nErrors );
753 template <typename Func>
754 size_t do_check_consistency( node_type * pNode, size_t nLevel, Func f, size_t& nErrors ) const
758 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_acquire );
759 node_type * pRight = child( pNode, right_child, memory_model::memory_order_acquire );
760 if ( pLeft && cmp( pLeft->m_key, pNode->m_key ) > 0 )
762 if ( pRight && cmp( pNode->m_key, pRight->m_key ) > 0 )
765 size_t hLeft = do_check_consistency( pLeft, nLevel + 1, f, nErrors );
766 size_t hRight = do_check_consistency( pRight, nLevel + 1, f, nErrors );
768 if ( hLeft >= hRight ) {
769 if ( hLeft - hRight > 1 ) {
770 f( nLevel, hLeft, hRight );
776 if ( hRight - hLeft > 1 ) {
777 f( nLevel, hLeft, hRight );
786 template <typename Q, typename Compare, typename Func>
787 bool do_find( Q& key, Compare cmp, Func f ) const
792 result = try_find( key, cmp, f, m_pRoot, right_child, 0 );
794 assert( result != find_result::retry );
795 return result == find_result::found;
798 template <typename K, typename Compare, typename Func>
799 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
801 check_deadlock_policy::check();
803 rcu_disposer removed_list;
806 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
810 template <typename K, typename Compare, typename Func>
811 bool do_remove( K const& key, Compare cmp, Func func )
813 // Func must return true if the value was disposed
814 // or false if the value was extracted
816 check_deadlock_policy::check();
818 rcu_disposer removed_list;
821 return try_remove_root( key, cmp, func, removed_list );
825 template <typename Func>
826 mapped_type do_extract_min( Func f )
828 mapped_type pExtracted = nullptr;
831 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
836 template <typename Func>
837 mapped_type do_extract_max( Func f )
839 mapped_type pExtracted = nullptr;
842 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
847 template <typename Func>
848 void do_extract_minmax( int nDir, Func func )
850 check_deadlock_policy::check();
852 rcu_disposer removed_list;
859 // get right child of root
860 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
862 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
863 if ( nChildVersion & node_type::shrinking ) {
864 m_stat.onRemoveRootWaitShrinking();
865 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
866 result = update_flags::retry;
868 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
869 result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list );
872 result = update_flags::retry;
877 if ( result == update_flags::retry )
878 m_stat.onRemoveRetry();
880 m_stat.onExtract( result == update_flags::result_removed );
887 template <typename Q>
888 mapped_type do_extract( Q const& key )
890 mapped_type pExtracted = nullptr;
894 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
896 m_stat.onExtract( pExtracted != nullptr );
900 template <typename Q, typename Less>
901 mapped_type do_extract_with( Q const& key, Less pred )
904 mapped_type pExtracted = nullptr;
907 cds::opt::details::make_comparator_from_less<Less>(),
908 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
910 m_stat.onExtract( pExtracted != nullptr );
917 static int height( node_type * pNode, atomics::memory_order order )
920 return pNode->m_nHeight.load( order );
922 static void set_height( node_type * pNode, int h, atomics::memory_order order )
925 pNode->m_nHeight.store( h, order );
927 static int height_null( node_type * pNode, atomics::memory_order order )
929 return pNode ? height( pNode, order ) : 0;
932 static CDS_CONSTEXPR int const c_stackSize = 64;
934 template <typename Q, typename Compare, typename Func>
935 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
937 assert( gc::is_locked() );
943 version_type nVersion;
947 stack_record stack[c_stackSize];
949 stack[0].pNode = pNode;
950 stack[0].nVersion = nVersion;
951 stack[0].nDir = nDir;
954 pNode = stack[pos].pNode;
955 nVersion = stack[pos].nVersion;
956 nDir = stack[pos].nDir;
959 node_type * pChild = child( pNode, nDir, memory_model::memory_order_acquire );
961 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
963 m_stat.onFindRetry();
966 m_stat.onFindFailed();
967 return find_result::not_found;
970 int nCmp = cmp( key, pChild->m_key );
972 if ( pChild->is_valued( memory_model::memory_order_acquire ) ) {
974 node_scoped_lock l( m_Monitor, *pChild );
975 if ( child(pNode, nDir, memory_model::memory_order_acquire) == pChild ) {
976 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
978 m_stat.onFindSuccess();
979 return find_result::found;
984 m_stat.onFindRetry();
988 m_stat.onFindFailed();
989 return find_result::not_found;
992 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
993 if ( nChildVersion & node_type::shrinking ) {
994 m_stat.onFindWaitShrinking();
995 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
997 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
999 m_stat.onFindRetry();
1003 else if ( nChildVersion != node_type::unlinked && child( pNode, nDir, memory_model::memory_order_acquire ) == pChild )
1005 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1007 m_stat.onFindRetry();
1012 assert(pos < c_stackSize);
1013 stack[pos].pNode = pChild;
1014 stack[pos].nVersion = nChildVersion;
1015 stack[pos].nDir = nCmp;
1016 break; // child iteration
1019 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1021 m_stat.onFindRetry();
1025 m_stat.onFindRetry();
1028 return find_result::retry;
1031 template <typename K, typename Compare, typename Func>
1032 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
1034 assert( gc::is_locked() );
1039 // get right child of root
1040 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1042 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1043 if ( nChildVersion & node_type::shrinking ) {
1044 m_stat.onUpdateRootWaitShrinking();
1045 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1046 result = update_flags::retry;
1048 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire ))
1049 result = try_update( key, cmp, nFlags, funcUpdate, pChild, nChildVersion, disp );
1051 result = update_flags::retry;
1054 // the tree is empty
1055 if ( nFlags & update_flags::allow_insert ) {
1056 // insert into tree as right child of the root
1058 node_scoped_lock l( m_Monitor, *m_pRoot );
1059 if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
1060 result = update_flags::retry;
1064 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
1065 mapped_type pVal = funcUpdate( pNew );
1066 assert( pVal != nullptr );
1067 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
1069 m_pRoot->child( pNew, right_child, memory_model::memory_order_release);
1070 set_height( m_pRoot, 2, memory_model::memory_order_release );
1074 m_stat.onInsertSuccess();
1075 return update_flags::result_inserted;
1078 return update_flags::failed;
1081 if ( result != update_flags::retry )
1086 template <typename K, typename Compare, typename Func>
1087 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
1089 assert( gc::is_locked() );
1094 // get right child of root
1095 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1097 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1098 if ( nChildVersion & node_type::shrinking ) {
1099 m_stat.onRemoveRootWaitShrinking();
1100 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1101 result = update_flags::retry;
1103 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
1104 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
1107 result = update_flags::retry;
1112 if ( result == update_flags::retry )
1113 m_stat.onRemoveRetry();
1115 m_stat.onRemove( result == update_flags::result_removed );
1116 return result == update_flags::result_removed;
1121 template <typename K, typename Compare, typename Func>
1122 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1124 assert( gc::is_locked() );
1125 assert( nVersion != node_type::unlinked );
1130 version_type nVersion;
1133 stack_record stack[c_stackSize];
1135 stack[0].pNode = pNode;
1136 stack[0].nVersion = nVersion;
1138 while ( pos >= 0 ) {
1139 pNode = stack[pos].pNode;
1140 nVersion = stack[pos].nVersion;
1142 int nCmp = cmp( key, pNode->m_key );
1144 int result = try_update_node( nFlags, funcUpdate, pNode, nVersion, disp );
1145 if ( result != update_flags::retry )
1148 m_stat.onUpdateRetry();
1153 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_acquire );
1154 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1156 m_stat.onUpdateRetry();
1160 if ( pChild == nullptr ) {
1162 if ( nFlags & update_flags::allow_insert ) {
1163 int result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
1164 if ( result != update_flags::retry )
1167 m_stat.onUpdateRetry();
1171 return update_flags::failed;
1175 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1176 if ( nChildVersion & node_type::shrinking ) {
1177 m_stat.onUpdateWaitShrinking();
1178 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1181 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_acquire )) {
1182 // this second read is important, because it is protected by nChildVersion
1184 // validate the read that our caller took to get to node
1185 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1187 m_stat.onUpdateRetry();
1190 // At this point we know that the traversal our parent took to get to node is still valid.
1191 // The recursive implementation will validate the traversal from node to
1192 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1193 // This means that we are no longer vulnerable to node shrinks, and we don't need
1194 // to validate node version any more.
1196 assert( pos < c_stackSize );
1197 stack[pos].pNode = pChild;
1198 stack[pos].nVersion = nChildVersion;
1199 assert( nChildVersion != node_type::unlinked );
1200 break; // child iteration
1202 m_stat.onUpdateRetry();
1206 return update_flags::retry;
1209 template <typename K, typename Compare, typename Func>
1210 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1212 assert( gc::is_locked() );
1213 assert( nVersion != node_type::unlinked );
1217 node_type * pParent;
1219 version_type nVersion;
1222 stack_record stack[c_stackSize];
1224 stack[0].pParent = pParent;
1225 stack[0].pNode = pNode;
1226 stack[0].nVersion = nVersion;
1228 while ( pos >= 0 ) {
1229 pParent = stack[pos].pParent;
1230 pNode = stack[pos].pNode;
1231 nVersion = stack[pos].nVersion;
1233 int nCmp = cmp( key, pNode->m_key );
1235 int result = try_remove_node( pParent, pNode, nVersion, func, disp );
1236 if ( result != update_flags::retry )
1239 m_stat.onRemoveRetry();
1244 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_acquire );
1245 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1247 m_stat.onRemoveRetry();
1251 if ( pChild == nullptr )
1252 return update_flags::failed;
1255 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1256 if ( nChildVersion & node_type::shrinking ) {
1257 m_stat.onRemoveWaitShrinking();
1258 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1261 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_acquire )) {
1262 // this second read is important, because it is protected by nChildVersion
1264 // validate the read that our caller took to get to node
1265 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1267 m_stat.onRemoveRetry();
1271 // At this point we know that the traversal our parent took to get to node is still valid.
1272 // The recursive implementation will validate the traversal from node to
1273 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1274 // This means that we are no longer vulnerable to node shrinks, and we don't need
1275 // to validate node version any more.
1277 assert( pos < c_stackSize );
1278 stack[pos].pParent = pNode;
1279 stack[pos].pNode = pChild;
1280 stack[pos].nVersion = nChildVersion;
1281 break; // child iteration
1283 m_stat.onRemoveRetry();
1286 return update_flags::retry;
1289 template <typename Func>
1290 int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1292 assert( gc::is_locked() );
1293 assert( nVersion != node_type::unlinked );
1297 node_type * pParent;
1299 version_type nVersion;
1302 stack_record stack[c_stackSize];
1304 stack[0].pParent = pParent;
1305 stack[0].pNode = pNode;
1306 stack[0].nVersion = nVersion;
1308 while ( pos >= 0 ) {
1309 pParent = stack[pos].pParent;
1310 pNode = stack[pos].pNode;
1311 nVersion = stack[pos].nVersion;
1314 node_type * pChild = child( pNode, nDir, memory_model::memory_order_acquire );
1315 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1317 m_stat.onRemoveRetry();
1321 if ( pChild == nullptr ) {
1323 assert(pNode->is_valued( memory_model::memory_order_acquire ));
1324 int result = try_remove_node( pParent, pNode, nVersion, func, disp );
1325 if ( result != update_flags::retry )
1328 m_stat.onRemoveRetry();
1332 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1333 if ( nChildVersion & node_type::shrinking ) {
1334 m_stat.onRemoveWaitShrinking();
1335 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1338 else if ( pChild == child( pNode, nDir, memory_model::memory_order_acquire )) {
1339 // this second read is important, because it is protected by nChildVersion
1341 // validate the read that our caller took to get to node
1342 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1344 m_stat.onRemoveRetry();
1348 // At this point we know that the traversal our parent took to get to node is still valid.
1349 // The recursive implementation will validate the traversal from node to
1350 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1351 // This means that we are no longer vulnerable to node shrinks, and we don't need
1352 // to validate node version any more.
1354 assert( pos < c_stackSize );
1355 stack[pos].pParent = pNode;
1356 stack[pos].pNode = pChild;
1357 stack[pos].nVersion = nChildVersion;
1358 break; // child iteration
1360 m_stat.onRemoveRetry();
1363 return update_flags::retry;
1366 template <typename K, typename Func>
1367 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
1371 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
1372 mapped_type pVal = funcUpdate( pNew );
1373 assert( pVal != nullptr );
1374 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
1377 if ( c_bRelaxedInsert ) {
1378 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1379 || child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr )
1381 m_stat.onInsertRetry();
1382 return update_flags::retry;
1385 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1388 node_type * pDamaged;
1390 assert( pNode != nullptr );
1391 node_scoped_lock l( m_Monitor, *pNode );
1393 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1394 || child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr )
1396 if ( c_bRelaxedInsert ) {
1397 mapped_type pVal = pNew->m_pValue.load( memory_model::memory_order_relaxed );
1398 pNew->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1401 m_stat.onRelaxedInsertFailed();
1404 m_stat.onInsertRetry();
1405 return update_flags::retry;
1408 if ( !c_bRelaxedInsert )
1409 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1411 pNode->child( pNew, nDir, memory_model::memory_order_release );
1412 pDamaged = fix_height_locked( pNode );
1416 m_stat.onInsertSuccess();
1419 fix_height_and_rebalance( pDamaged, disp );
1420 m_stat.onInsertRebalanceRequired();
1423 return update_flags::result_inserted;
1426 template <typename Func>
1427 int try_update_node( int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1431 assert( pNode != nullptr );
1433 node_scoped_lock l( m_Monitor, *pNode );
1435 if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
1436 return update_flags::retry;
1438 if ( pNode->is_unlinked( memory_model::memory_order_acquire )) {
1439 m_stat.onUpdateUnlinked();
1440 return update_flags::retry;
1443 if ( pNode->is_valued( memory_model::memory_order_relaxed ) && !(nFlags & update_flags::allow_update) ) {
1444 m_stat.onInsertFailed();
1445 return update_flags::failed;
1448 pOld = pNode->value( memory_model::memory_order_relaxed );
1449 bInserted = pOld == nullptr;
1450 mapped_type pVal = funcUpdate( pNode );
1454 assert( pVal != nullptr );
1455 pNode->m_pValue.store( pVal, memory_model::memory_order_release );
1460 disp.dispose_value(pOld);
1461 m_stat.onDisposeValue();
1465 m_stat.onInsertSuccess();
1466 return update_flags::result_inserted;
1469 m_stat.onUpdateSuccess();
1470 return update_flags::result_updated;
1473 template <typename Func>
1474 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
1476 assert( pParent != nullptr );
1477 assert( pNode != nullptr );
1479 if ( !pNode->is_valued( memory_model::memory_order_acquire ) )
1480 return update_flags::failed;
1482 if ( child( pNode, left_child, memory_model::memory_order_acquire ) == nullptr
1483 || child( pNode, right_child, memory_model::memory_order_acquire ) == nullptr )
1485 // pNode can be replaced with its child
1487 node_type * pDamaged;
1490 node_scoped_lock lp( m_Monitor, *pParent );
1491 if ( pParent->is_unlinked( memory_model::memory_order_acquire ) || parent( pNode, memory_model::memory_order_acquire ) != pParent )
1492 return update_flags::retry;
1495 node_scoped_lock ln( m_Monitor, *pNode );
1496 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1497 return update_flags::retry;
1499 pOld = pNode->value( memory_model::memory_order_relaxed );
1501 return update_flags::failed;
1503 if ( !try_unlink_locked( pParent, pNode, disp ))
1504 return update_flags::retry;
1506 pDamaged = fix_height_locked( pParent );
1510 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1511 m_stat.onDisposeValue();
1513 m_stat.onExtractValue();
1516 fix_height_and_rebalance( pDamaged, disp );
1517 m_stat.onRemoveRebalanceRequired();
1521 // pNode is an internal with two children
1525 node_scoped_lock ln( m_Monitor, *pNode );
1526 pOld = pNode->value( memory_model::memory_order_relaxed );
1527 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion )
1528 return update_flags::retry;
1530 return update_flags::failed;
1532 pNode->m_pValue.store( nullptr, memory_model::memory_order_release );
1533 m_stat.onMakeRoutingNode();
1537 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1538 m_stat.onDisposeValue();
1540 m_stat.onExtractValue();
1542 return update_flags::result_removed;
1545 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1547 // pParent and pNode must be locked
1548 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
1550 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1551 node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
1552 if ( pNode != pParentLeft && pNode != pParentRight ) {
1553 // node is no longer a child of parent
1557 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ));
1558 assert( pParent == parent( pNode, memory_model::memory_order_relaxed ));
1560 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1561 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1562 if ( pLeft != nullptr && pRight != nullptr ) {
1563 // splicing is no longer possible
1566 node_type * pSplice = pLeft ? pLeft : pRight;
1568 if ( pParentLeft == pNode )
1569 pParent->m_pLeft.store( pSplice, memory_model::memory_order_release );
1571 pParent->m_pRight.store( pSplice, memory_model::memory_order_release );
1574 pSplice->parent( pParent, memory_model::memory_order_release );
1576 // Mark the node as unlinked
1577 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1579 // The value will be disposed by calling function
1580 pNode->m_pValue.store( nullptr, memory_model::memory_order_release );
1582 disp.dispose( pNode );
1583 m_stat.onDisposeNode();
1590 private: // rotations
1592 int estimate_node_condition( node_type * pNode )
1594 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_acquire );
1595 node_type * pRight = child( pNode, right_child, memory_model::memory_order_acquire );
1597 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_acquire ))
1598 return unlink_required;
1600 int h = height( pNode, memory_model::memory_order_acquire );
1601 int hL = height_null( pLeft, memory_model::memory_order_acquire );
1602 int hR = height_null( pRight, memory_model::memory_order_acquire );
1604 int hNew = 1 + std::max( hL, hR );
1605 int nBalance = hL - hR;
1607 if ( nBalance < -1 || nBalance > 1 )
1608 return rebalance_required;
1610 return h != hNew ? hNew : nothing_required;
1613 node_type * fix_height( node_type * pNode )
1615 assert( pNode != nullptr );
1616 node_scoped_lock l( m_Monitor, *pNode );
1617 return fix_height_locked( pNode );
1620 node_type * fix_height_locked( node_type * pNode )
1622 // pNode must be locked!!!
1623 int h = estimate_node_condition( pNode );
1625 case rebalance_required:
1626 case unlink_required:
1628 case nothing_required:
1631 set_height( pNode, h, memory_model::memory_order_release );
1632 return parent( pNode, memory_model::memory_order_relaxed );
1636 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1638 while ( pNode && parent( pNode, memory_model::memory_order_acquire )) {
1639 int nCond = estimate_node_condition( pNode );
1640 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_acquire ) )
1643 if ( nCond != unlink_required && nCond != rebalance_required )
1644 pNode = fix_height( pNode );
1646 node_type * pParent = parent( pNode, memory_model::memory_order_acquire );
1647 assert( pParent != nullptr );
1649 node_scoped_lock lp( m_Monitor, *pParent );
1650 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed ) && parent( pNode, memory_model::memory_order_acquire ) == pParent ) {
1651 node_scoped_lock ln( m_Monitor, *pNode );
1652 pNode = rebalance_locked( pParent, pNode, disp );
1659 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1661 // pParent and pNode should be locked.
1662 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1663 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1665 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1666 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1668 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1669 if ( try_unlink_locked( pParent, pNode, disp ))
1670 return fix_height_locked( pParent );
1672 // retry needed for pNode
1677 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1678 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1680 int h = height( pNode, memory_model::memory_order_acquire );
1681 int hL = height_null( pLeft, memory_model::memory_order_acquire );
1682 int hR = height_null( pRight, memory_model::memory_order_acquire );
1683 int hNew = 1 + std::max( hL, hR );
1684 int balance = hL - hR;
1687 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1688 else if ( balance < -1 )
1689 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1690 else if ( hNew != h ) {
1691 set_height( pNode, hNew, memory_model::memory_order_release );
1693 // pParent is already locked
1694 return fix_height_locked( pParent );
1700 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1702 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1703 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1704 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1706 // pParent and pNode is locked yet
1707 // pNode->pLeft is too large, we will rotate-right.
1708 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1711 assert( pLeft != nullptr );
1712 node_scoped_lock l( m_Monitor, *pLeft );
1713 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1714 return pNode; // retry for pNode
1716 int hL = height( pLeft, memory_model::memory_order_acquire );
1718 return pNode; // retry
1720 node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
1721 int hLR = height_null( pLRight, memory_model::memory_order_acquire );
1722 node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
1723 int hLL = height_null( pLLeft, memory_model::memory_order_acquire );
1727 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1730 assert( pLRight != nullptr );
1732 node_scoped_lock lr( m_Monitor, *pLRight );
1733 if ( pLeft->m_pRight.load( memory_model::memory_order_acquire ) != pLRight )
1734 return pNode; // retry
1736 hLR = height( pLRight, memory_model::memory_order_acquire );
1738 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1740 int hLRL = height_null( child( pLRight, left_child, memory_model::memory_order_relaxed ), memory_model::memory_order_acquire);
1741 int balance = hLL - hLRL;
1742 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
1743 // nParent.child.left won't be damaged after a double rotation
1744 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1748 // focus on pLeft, if necessary pNode will be balanced later
1749 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1754 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1756 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1757 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1758 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1760 // pParent and pNode is locked yet
1762 assert( pRight != nullptr );
1763 node_scoped_lock l( m_Monitor, *pRight );
1764 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1765 return pNode; // retry for pNode
1767 int hR = height( pRight, memory_model::memory_order_acquire );
1768 if ( hL - hR >= -1 )
1769 return pNode; // retry
1771 node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
1772 int hRL = height_null( pRLeft, memory_model::memory_order_acquire );
1773 node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
1774 int hRR = height_null( pRRight, memory_model::memory_order_acquire );
1776 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1779 assert( pRLeft != nullptr );
1780 node_scoped_lock lrl( m_Monitor, *pRLeft );
1781 if ( pRight->m_pLeft.load( memory_model::memory_order_acquire ) != pRLeft )
1782 return pNode; // retry
1784 hRL = height( pRLeft, memory_model::memory_order_acquire );
1786 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1788 node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1789 int hRLR = height_null( pRLRight, memory_model::memory_order_acquire );
1790 int balance = hRR - hRLR;
1791 if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
1792 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1794 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1798 static void begin_change( node_type * pNode, version_type version )
1800 assert(pNode->version(memory_model::memory_order_acquire) == version );
1801 assert( (version & node_type::shrinking) == 0 );
1802 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1804 static void end_change( node_type * pNode, version_type version )
1806 // Clear shrinking and unlinked flags and increment version
1807 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1810 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1812 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1813 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1815 begin_change( pNode, nodeVersion );
1817 pNode->m_pLeft.store( pLRight, memory_model::memory_order_release );
1818 if ( pLRight != nullptr )
1819 pLRight->parent( pNode, memory_model::memory_order_release );
1821 atomics::atomic_thread_fence( memory_model::memory_order_release );
1823 pLeft->m_pRight.store( pNode, memory_model::memory_order_release );
1824 pNode->parent( pLeft, memory_model::memory_order_release );
1826 atomics::atomic_thread_fence( memory_model::memory_order_release );
1828 if ( pParentLeft == pNode )
1829 pParent->m_pLeft.store( pLeft, memory_model::memory_order_release );
1831 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1832 pParent->m_pRight.store( pLeft, memory_model::memory_order_release );
1834 pLeft->parent( pParent, memory_model::memory_order_release );
1836 atomics::atomic_thread_fence( memory_model::memory_order_release );
1838 // fix up heights links
1839 int hNode = 1 + std::max( hLR, hR );
1840 set_height( pNode, hNode, memory_model::memory_order_release );
1841 set_height( pLeft, 1 + std::max( hLL, hNode ), memory_model::memory_order_release);
1843 end_change( pNode, nodeVersion );
1844 m_stat.onRotateRight();
1846 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1847 // parent.child). pNode is the deepest. Perform as many fixes as we can
1848 // with the locks we've got.
1850 // We've already fixed the height for pNode, but it might still be outside
1851 // our allowable balance range. In that case a simple fix_height_locked()
1853 int nodeBalance = hLR - hR;
1854 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1855 // we need another rotation at pNode
1856 m_stat.onRotateAfterRightRotation();
1860 // we've fixed balance and height damage for pNode, now handle
1861 // extra-routing node damage
1862 if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
1863 // we need to remove pNode and then repair
1864 m_stat.onRemoveAfterRightRotation();
1868 // we've already fixed the height at pLeft, do we need a rotation here?
1869 int leftBalance = hLL - hNode;
1870 if ( leftBalance < -1 || leftBalance > 1 ) {
1871 m_stat.onRotateAfterRightRotation();
1875 // pLeft might also have routing node damage (if pLeft.left was null)
1876 if ( hLL == 0 && !pLeft->is_valued(memory_model::memory_order_acquire) ) {
1877 m_stat.onDamageAfterRightRotation();
1881 // try to fix the parent height while we've still got the lock
1882 return fix_height_locked( pParent );
1885 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1887 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1888 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1890 begin_change( pNode, nodeVersion );
1892 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1893 pNode->m_pRight.store( pRLeft, memory_model::memory_order_release );
1894 if ( pRLeft != nullptr )
1895 pRLeft->parent( pNode, memory_model::memory_order_release );
1897 atomics::atomic_thread_fence( memory_model::memory_order_release );
1899 pRight->m_pLeft.store( pNode, memory_model::memory_order_release );
1900 pNode->parent( pRight, memory_model::memory_order_release );
1902 atomics::atomic_thread_fence( memory_model::memory_order_release );
1904 if ( pParentLeft == pNode )
1905 pParent->m_pLeft.store( pRight, memory_model::memory_order_release );
1907 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1908 pParent->m_pRight.store( pRight, memory_model::memory_order_release );
1910 pRight->parent( pParent, memory_model::memory_order_release );
1912 atomics::atomic_thread_fence( memory_model::memory_order_release );
1915 int hNode = 1 + std::max( hL, hRL );
1916 set_height( pNode, hNode, memory_model::memory_order_release );
1917 set_height( pRight, 1 + std::max( hNode, hRR ), memory_model::memory_order_release);
1919 end_change( pNode, nodeVersion );
1920 m_stat.onRotateLeft();
1922 int nodeBalance = hRL - hL;
1923 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1924 m_stat.onRotateAfterLeftRotation();
1928 if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed) ) {
1929 m_stat.onRemoveAfterLeftRotation();
1933 int rightBalance = hRR - hNode;
1934 if ( rightBalance < -1 || rightBalance > 1 ) {
1935 m_stat.onRotateAfterLeftRotation();
1939 if ( hRR == 0 && !pRight->is_valued(memory_model::memory_order_acquire) ) {
1940 m_stat.onDamageAfterLeftRotation();
1944 return fix_height_locked( pParent );
1947 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1949 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1950 version_type leftVersion = pLeft->version( memory_model::memory_order_acquire );
1952 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1953 node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_acquire );
1954 node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_acquire );
1955 int hLRR = height_null( pLRR, memory_model::memory_order_acquire );
1957 begin_change( pNode, nodeVersion );
1958 begin_change( pLeft, leftVersion );
1960 // fix up pNode links, careful about the order!
1961 pNode->m_pLeft.store( pLRR, memory_model::memory_order_release );
1962 if ( pLRR != nullptr )
1963 pLRR->parent( pNode, memory_model::memory_order_release );
1965 pLeft->m_pRight.store( pLRL, memory_model::memory_order_release );
1966 if ( pLRL != nullptr )
1967 pLRL->parent( pLeft, memory_model::memory_order_release );
1969 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_release );
1970 pLeft->parent( pLRight, memory_model::memory_order_release );
1972 pLRight->m_pRight.store( pNode, memory_model::memory_order_release );
1973 pNode->parent( pLRight, memory_model::memory_order_release );
1976 pParent->m_pLeft.store( pLRight, memory_model::memory_order_release );
1978 assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1979 pParent->m_pRight.store( pLRight, memory_model::memory_order_release );
1981 pLRight->parent( pParent, memory_model::memory_order_release );
1984 int hNode = 1 + std::max( hLRR, hR );
1985 set_height( pNode, hNode, memory_model::memory_order_release );
1986 int hLeft = 1 + std::max( hLL, hLRL );
1987 set_height( pLeft, hLeft, memory_model::memory_order_release );
1988 set_height( pLRight, 1 + std::max( hLeft, hNode ), memory_model::memory_order_release);
1990 end_change( pNode, nodeVersion );
1991 end_change( pLeft, leftVersion );
1992 m_stat.onRotateRightOverLeft();
1994 // caller should have performed only a single rotation if pLeft was going
1995 // to end up damaged
1996 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1997 assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_acquire )));
1999 // We have damaged pParent, pLR (now parent.child), and pNode (now
2000 // parent.child.right). pNode is the deepest. Perform as many fixes as we
2001 // can with the locks we've got.
2003 // We've already fixed the height for pNode, but it might still be outside
2004 // our allowable balance range. In that case a simple fix_height_locked()
2006 int nodeBalance = hLRR - hR;
2007 if ( nodeBalance < -1 || nodeBalance > 1 ) {
2008 // we need another rotation at pNode
2009 m_stat.onRotateAfterRLRotation();
2013 // pNode might also be damaged by being an unnecessary routing node
2014 if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
2015 // repair involves splicing out pNode and maybe more rotations
2016 m_stat.onRemoveAfterRLRotation();
2020 // we've already fixed the height at pLRight, do we need a rotation here?
2021 int balanceLR = hLeft - hNode;
2022 if ( balanceLR < -1 || balanceLR > 1 ) {
2023 m_stat.onRotateAfterRLRotation();
2027 // try to fix the parent height while we've still got the lock
2028 return fix_height_locked( pParent );
2031 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
2033 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
2034 version_type rightVersion = pRight->version( memory_model::memory_order_acquire );
2036 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
2037 node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_acquire );
2038 node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_acquire );
2039 int hRLL = height_null( pRLL, memory_model::memory_order_acquire );
2041 begin_change( pNode, nodeVersion );
2042 begin_change( pRight, rightVersion );
2044 // fix up pNode links, careful about the order!
2045 pNode->m_pRight.store( pRLL, memory_model::memory_order_release );
2046 if ( pRLL != nullptr )
2047 pRLL->parent( pNode, memory_model::memory_order_release );
2049 pRight->m_pLeft.store( pRLR, memory_model::memory_order_release );
2050 if ( pRLR != nullptr )
2051 pRLR->parent( pRight, memory_model::memory_order_release );
2053 pRLeft->m_pRight.store( pRight, memory_model::memory_order_release );
2054 pRight->parent( pRLeft, memory_model::memory_order_release );
2056 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_release );
2057 pNode->parent( pRLeft, memory_model::memory_order_release );
2060 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_release );
2062 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
2063 pParent->m_pRight.store( pRLeft, memory_model::memory_order_release );
2065 pRLeft->parent( pParent, memory_model::memory_order_release );
2068 int hNode = 1 + std::max( hL, hRLL );
2069 set_height( pNode, hNode, memory_model::memory_order_release );
2070 int hRight = 1 + std::max( hRLR, hRR );
2071 set_height( pRight, hRight, memory_model::memory_order_release );
2072 set_height( pRLeft, 1 + std::max( hNode, hRight ), memory_model::memory_order_release);
2074 end_change( pNode, nodeVersion );
2075 end_change( pRight, rightVersion );
2076 m_stat.onRotateLeftOverRight();
2078 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
2080 int nodeBalance = hRLL - hL;
2081 if ( nodeBalance < -1 || nodeBalance > 1 ) {
2082 m_stat.onRotateAfterLRRotation();
2086 if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed) ) {
2087 m_stat.onRemoveAfterLRRotation();
2091 int balRL = hRight - hNode;
2092 if ( balRL < -1 || balRL > 1 ) {
2093 m_stat.onRotateAfterLRRotation();
2097 return fix_height_locked( pParent );
2102 }} // namespace cds::container
2104 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H