3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <type_traits> // is_base_of
7 #include <cds/container/details/bronson_avltree_base.h>
8 #include <cds/urcu/details/check_deadlock.h>
9 #include <cds/urcu/exempt_ptr.h>
11 namespace cds { namespace container {
13 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
14 /** @ingroup cds_nonintrusive_map
15 @ingroup cds_nonintrusive_tree
16 @headerfile cds/container/bronson_avltree_map_rcu.h
17 @anchor cds_container_BronsonAVLTreeMap_rcu_ptr
19 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
20 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
21 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
22 the disposer functor provided by \p Traits template parameter.
24 <b>Template arguments</b>:
25 - \p RCU - one of \ref cds_urcu_gc "RCU type"
27 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
29 - \p Traits - tree traits, default is \p bronson_avltree::traits
30 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
31 instead of \p Traits template argument.
33 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
34 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
40 # ifdef CDS_DOXYGEN_INVOKED
41 typename Traits = bronson_avltree::traits
46 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
49 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
50 typedef Key key_type; ///< type of a key stored in the map
51 typedef T * mapped_type; ///< type of value stored in the map
52 typedef Traits traits; ///< Traits template parameter
54 # ifdef CDS_DOXYGEN_INVOKED
55 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
57 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
59 typedef typename traits::item_counter item_counter; ///< Item counting policy
60 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
61 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
62 typedef typename traits::stat stat; ///< internal statistics
63 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
64 typedef typename traits::back_off back_off; ///< Back-off strategy
65 typedef typename traits::disposer disposer; ///< Value disposer
66 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
68 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
69 static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
71 /// Group of \p extract_xxx functions does not require external locking
72 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false;
74 # ifdef CDS_DOXYGEN_INVOKED
75 /// Returned pointer to \p mapped_type of extracted node
76 typedef cds::urcu::exempt_ptr< gc, T, T, disposer, void > exempt_ptr;
78 typedef cds::urcu::exempt_ptr< gc,
79 typename std::remove_pointer<mapped_type>::type,
80 typename std::remove_pointer<mapped_type>::type,
86 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
90 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
91 typedef typename node_type::version_type version_type;
93 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
94 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
96 enum class find_result
113 result_inserted = allow_insert,
114 result_updated = allow_update,
121 nothing_required = -3,
122 rebalance_required = -2,
131 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
136 template <typename K>
137 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
139 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
142 static void free_node( node_type * pNode )
144 // Free node without disposer
145 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
146 assert( pNode->m_SyncMonitorInjection.check_free());
147 cxx_allocator().Delete( pNode );
150 static void free_value( mapped_type pVal )
155 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order = memory_model::memory_order_relaxed )
157 return pNode->child( nDir ).load( order );
160 static node_type * parent( node_type * pNode, atomics::memory_order order = memory_model::memory_order_relaxed )
162 return pNode->parent( order );
168 node_type * m_pRetiredList; ///< head of retired node list
169 mapped_type m_pRetiredValue; ///< value retired
173 : m_pRetiredList( nullptr )
174 , m_pRetiredValue( nullptr )
182 void dispose( node_type * pNode )
184 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
185 pNode->m_pNextRemoved = m_pRetiredList;
186 m_pRetiredList = pNode;
189 void dispose_value( mapped_type pVal )
191 assert( m_pRetiredValue == nullptr );
192 m_pRetiredValue = pVal;
196 struct internal_disposer
198 void operator()( node_type * p ) const
206 assert( !gc::is_locked() );
208 // TODO: use RCU::batch_retire
211 for ( node_type * p = m_pRetiredList; p; ) {
212 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
213 // Value already disposed
214 gc::template retire_ptr<internal_disposer>( p );
219 if ( m_pRetiredValue )
220 gc::template retire_ptr<disposer>( m_pRetiredValue );
228 typename node_type::base_class m_Root;
230 item_counter m_ItemCounter;
231 mutable sync_monitor m_Monitor;
236 /// Creates empty map
238 : m_pRoot( static_cast<node_type *>( &m_Root ))
249 The \p key_type should be constructible from a value of type \p K.
251 RCU \p synchronize() can be called. RCU should not be locked.
253 Returns \p true if inserting successful, \p false otherwise.
255 template <typename K>
256 bool insert( K const& key, mapped_type pVal )
258 return do_update(key, key_comparator(),
259 [pVal]( node_type * pNode ) -> mapped_type
261 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
265 update_flags::allow_insert
266 ) == update_flags::result_inserted;
269 /// Updates the value for \p key
271 The operation performs inserting or updating the value for \p key with lock-free manner.
272 If \p bInsert is \p false, only updating of existing node is possible.
274 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
275 then the new node created from \p key will be inserted into the map; note that in this case the \ref key_type should be
276 constructible from type \p K.
277 Otherwise, the value for \p key will be changed to \p pVal.
279 RCU \p synchronize() method can be called. RCU should not be locked.
281 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
282 \p second is \p true if new node has been added or \p false if the node with \p key
285 template <typename K>
286 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
288 int result = do_update( key, key_comparator(),
289 [pVal]( node_type * ) -> mapped_type
293 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
295 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
299 template <typename K>
300 std::pair<bool, bool> ensure( K const& key, mapped_type pVal )
302 return update( key, pVal, true );
307 /// Delete \p key from the map
309 RCU \p synchronize() method can be called. RCU should not be locked.
311 Return \p true if \p key is found and deleted, \p false otherwise
313 template <typename K>
314 bool erase( K const& key )
319 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
323 /// Deletes the item from the map using \p pred predicate for searching
325 The function is an analog of \p erase(K const&)
326 but \p pred is used for key comparing.
327 \p Less functor has the interface like \p std::less.
328 \p Less must imply the same element order as the comparator used for building the map.
330 template <typename K, typename Less>
331 bool erase_with( K const& key, Less pred )
336 cds::opt::details::make_comparator_from_less<Less>(),
337 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
341 /// Delete \p key from the map
343 The function searches an item with key \p key, calls \p f functor
344 and deletes the item. If \p key is not found, the functor is not called.
346 The functor \p Func interface:
349 void operator()( key_type const& key, std::remove_pointer<mapped_type>::type& val) { ... }
353 RCU \p synchronize method can be called. RCU should not be locked.
355 Return \p true if key is found and deleted, \p false otherwise
357 template <typename K, typename Func>
358 bool erase( K const& key, Func f )
363 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
366 disp.dispose_value(pVal);
372 /// Deletes the item from the map using \p pred predicate for searching
374 The function is an analog of \p erase(K const&, Func)
375 but \p pred is used for key comparing.
376 \p Less functor has the interface like \p std::less.
377 \p Less must imply the same element order as the comparator used for building the map.
379 template <typename K, typename Less, typename Func>
380 bool erase_with( K const& key, Less pred, Func f )
385 cds::opt::details::make_comparator_from_less<Less>(),
386 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
389 disp.dispose_value(pVal);
395 /// Extracts a value with minimal key from the map
397 Returns \p exempt_ptr to the leftmost item.
398 If the tree is empty, returns empty \p exempt_ptr.
400 Note that the function returns only the value for minimal key.
401 To retrieve its key use \p extract_min( Func ) member function.
403 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
404 It means that the function gets leftmost leaf of the tree and tries to unlink it.
405 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
406 So, the function returns the item with minimum key at the moment of tree traversing.
408 RCU \p synchronize method can be called. RCU should NOT be locked.
409 The function does not free the item.
410 The deallocator will be implicitly invoked when the returned object is destroyed or when
411 its \p release() member function is called.
413 exempt_ptr extract_min()
415 return exempt_ptr(do_extract_min( []( key_type const& ) {}));
418 /// Extracts minimal key key and corresponding value
420 Returns \p exempt_ptr to the leftmost item.
421 If the tree is empty, returns empty \p exempt_ptr.
423 \p Func functor is used to store minimal key.
424 \p Func has the following signature:
427 void operator()( key_type const& key );
430 If the tree is empty, \p f is not called.
431 Otherwise, is it called with minimal key, the pointer to corresponding value is returned
434 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
435 It means that the function gets leftmost leaf of the tree and tries to unlink it.
436 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
437 So, the function returns the item with minimum key at the moment of tree traversing.
439 RCU \p synchronize method can be called. RCU should NOT be locked.
440 The function does not free the item.
441 The deallocator will be implicitly invoked when the returned object is destroyed or when
442 its \p release() member function is called.
444 template <typename Func>
445 exempt_ptr extract_min( Func f )
447 return exempt_ptr(do_extract_min( [&f]( key_type const& key ) { f(key); }));
450 /// Extracts minimal key key and corresponding value
452 This function is a shortcut for the following call:
455 exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } );
457 \p key_type should be copy-assignable. The copy of minimal key
458 is returned in \p min_key argument.
460 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
461 extract_min_key( key_type& min_key )
463 return exempt_ptr(do_extract_min( [&min_key]( key_type const& key ) { min_key = key; }));
466 /// Extracts a value with maximal key from the tree
468 Returns \p exempt_ptr pointer to the rightmost item.
469 If the set is empty, returns empty \p exempt_ptr.
471 Note that the function returns only the value for maximal key.
472 To retrieve its key use \p extract_max( Func ) member function.
474 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
475 It means that the function gets rightmost leaf of the tree and tries to unlink it.
476 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
477 So, the function returns the item with maximum key at the moment of tree traversing.
479 RCU \p synchronize method can be called. RCU should NOT be locked.
480 The function does not free the item.
481 The deallocator will be implicitly invoked when the returned object is destroyed or when
482 its \p release() is called.
484 exempt_ptr extract_max()
486 return exempt_ptr(do_extract_max( []( key_type const& ) {}));
489 /// Extracts the maximal key and corresponding value
491 Returns \p exempt_ptr pointer to the rightmost item.
492 If the set is empty, returns empty \p exempt_ptr.
494 \p Func functor is used to store maximal key.
495 \p Func has the following signature:
498 void operator()( key_type const& key );
501 If the tree is empty, \p f is not called.
502 Otherwise, is it called with maximal key, the pointer to corresponding value is returned
505 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
506 It means that the function gets rightmost leaf of the tree and tries to unlink it.
507 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
508 So, the function returns the item with maximum key at the moment of tree traversing.
510 RCU \p synchronize method can be called. RCU should NOT be locked.
511 The function does not free the item.
512 The deallocator will be implicitly invoked when the returned object is destroyed or when
513 its \p release() is called.
515 template <typename Func>
516 exempt_ptr extract_max( Func f )
518 return exempt_ptr(do_extract_max( [&f]( key_type const& key ) { f(key); }));
521 /// Extracts the maximal key and corresponding value
523 This function is a shortcut for the following call:
526 exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } );
528 \p key_type should be copy-assignable. The copy of maximal key
529 is returned in \p max_key argument.
531 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
532 extract_max_key( key_type& max_key )
534 return exempt_ptr(do_extract_max( [&max_key]( key_type const& key ) { max_key = key; }));
537 /// Extracts an item from the map
539 The function searches an item with key equal to \p key in the tree,
540 unlinks it, and returns \p exempt_ptr pointer to a value found.
541 If \p key is not found the function returns an empty \p exempt_ptr.
543 RCU \p synchronize method can be called. RCU should NOT be locked.
544 The function does not destroy the value found.
545 The disposer will be implicitly invoked when the returned object is destroyed or when
546 its \p release() member function is called.
548 template <typename Q>
549 exempt_ptr extract( Q const& key )
551 return exempt_ptr(do_extract( key ));
555 /// Extracts an item from the map using \p pred for searching
557 The function is an analog of \p extract(Q const&)
558 but \p pred is used for key compare.
559 \p Less has the interface like \p std::less.
560 \p pred must imply the same element order as the comparator used for building the tree.
562 template <typename Q, typename Less>
563 exempt_ptr extract_with( Q const& key, Less pred )
565 return exempt_ptr(do_extract_with( key, pred ));
568 /// Find the key \p key
570 The function searches the item with key equal to \p key and calls the functor \p f for item found.
571 The interface of \p Func functor is:
574 void operator()( key_type const& key, mapped_type& item );
577 where \p item is the item found.
578 The functor is called under node-level lock.
580 The function applies RCU lock internally.
582 The function returns \p true if \p key is found, \p false otherwise.
584 template <typename K, typename Func>
585 bool find( K const& key, Func f )
587 return do_find( key, key_comparator(),
588 [&f]( node_type * pNode ) -> bool {
589 assert( pNode != nullptr );
590 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
592 f( pNode->m_key, *pVal );
600 /// Finds the key \p val using \p pred predicate for searching
602 The function is an analog of \p find(K const&, Func)
603 but \p pred is used for key comparing.
604 \p Less functor has the interface like \p std::less.
605 \p Less must imply the same element order as the comparator used for building the map.
607 template <typename K, typename Less, typename Func>
608 bool find_with( K const& key, Less pred, Func f )
611 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
612 [&f]( node_type * pNode ) -> bool {
613 assert( pNode != nullptr );
614 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
616 f( pNode->m_key, *pVal );
624 /// Find the key \p key
626 The function searches the item with key equal to \p key
627 and returns \p true if it is found, and \p false otherwise.
629 The function applies RCU lock internally.
631 template <typename K>
632 bool find( K const& key )
634 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
637 /// Finds the key \p val using \p pred predicate for searching
639 The function is an analog of \p find(K const&)
640 but \p pred is used for key comparing.
641 \p Less functor has the interface like \p std::less.
642 \p Less must imply the same element order as the comparator used for building the map.
644 template <typename K, typename Less>
645 bool find_with( K const& key, Less pred )
648 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
651 /// Clears the tree (thread safe, not atomic)
653 The function unlink all items from the tree.
654 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
658 assert( set.empty() );
660 the assertion could be raised.
662 For each node the \ref disposer will be called after unlinking.
664 RCU \p synchronize method can be called. RCU should not be locked.
668 while ( extract_min() );
671 /// Clears the tree (not thread safe)
673 This function is not thread safe and may be called only when no other thread deals with the tree.
674 The function is used in the tree destructor.
678 clear(); // temp solution
682 /// Checks if the map is empty
685 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
688 /// Returns item count in the map
690 Only leaf nodes containing user data are counted.
692 The value returned depends on item counter type provided by \p Traits template parameter.
693 If it is \p atomicity::empty_item_counter this function always returns 0.
695 The function is not suitable for checking the tree emptiness, use \p empty()
696 member function for this purpose.
700 return m_ItemCounter;
703 /// Returns const reference to internal statistics
704 stat const& statistics() const
709 /// Returns reference to \p sync_monitor object
710 sync_monitor& monitor()
715 sync_monitor const& monitor() const
721 /// Checks internal consistency (not atomic, not thread-safe)
723 The debugging function to check internal consistency of the tree.
725 bool check_consistency() const
727 return check_consistency([]( size_t /*nLevel*/, size_t /*hLeft*/, size_t /*hRight*/ ){} );
730 /// Checks internal consistency (not atomic, not thread-safe)
732 The debugging function to check internal consistency of the tree.
733 The functor \p Func is called if a violation of internal tree structure
737 void operator()( size_t nLevel, size_t hLeft, size_t hRight );
741 - \p nLevel - the level where the violation is found
742 - \p hLeft - the height of left subtree
743 - \p hRight - the height of right subtree
745 The functor is called for each violation found.
747 template <typename Func>
748 bool check_consistency( Func f ) const
750 node_type * pChild = child( m_pRoot, right_child );
753 do_check_consistency( pChild, 1, f, nErrors );
761 template <typename Func>
762 size_t do_check_consistency( node_type * pNode, size_t nLevel, Func f, size_t& nErrors ) const
766 node_type * pLeft = child( pNode, left_child );
767 node_type * pRight = child( pNode, right_child );
768 if ( pLeft && cmp( pLeft->m_key, pNode->m_key ) > 0 )
770 if ( pRight && cmp( pNode->m_key, pRight->m_key ) > 0 )
773 size_t hLeft = do_check_consistency( pLeft, nLevel + 1, f, nErrors );
774 size_t hRight = do_check_consistency( pRight, nLevel + 1, f, nErrors );
776 if ( hLeft >= hRight ) {
777 if ( hLeft - hRight > 1 ) {
778 f( nLevel, hLeft, hRight );
784 if ( hRight - hLeft > 1 ) {
785 f( nLevel, hLeft, hRight );
794 template <typename Q, typename Compare, typename Func>
795 bool do_find( Q& key, Compare cmp, Func f ) const
800 result = try_find( key, cmp, f, m_pRoot, right_child, 0 );
802 assert( result != find_result::retry );
803 return result == find_result::found;
806 template <typename K, typename Compare, typename Func>
807 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
809 check_deadlock_policy::check();
811 rcu_disposer removed_list;
814 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
818 template <typename K, typename Compare, typename Func>
819 bool do_remove( K const& key, Compare cmp, Func func )
821 // Func must return true if the value was disposed
822 // or false if the value was extracted
824 check_deadlock_policy::check();
826 rcu_disposer removed_list;
829 return try_remove_root( key, cmp, func, removed_list );
833 template <typename Func>
834 mapped_type do_extract_min( Func f )
836 mapped_type pExtracted = nullptr;
839 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
844 template <typename Func>
845 mapped_type do_extract_max( Func f )
847 mapped_type pExtracted = nullptr;
850 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
855 template <typename Func>
856 void do_extract_minmax( int nDir, Func func )
858 check_deadlock_policy::check();
860 rcu_disposer removed_list;
867 // get right child of root
868 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
870 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
871 if ( nChildVersion & node_type::shrinking ) {
872 m_stat.onRemoveRootWaitShrinking();
873 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
874 result = update_flags::retry;
876 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
877 result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list );
880 result = update_flags::retry;
885 if ( result == update_flags::retry )
886 m_stat.onRemoveRetry();
893 template <typename Q>
894 mapped_type do_extract( Q const& key )
896 mapped_type pExtracted = nullptr;
900 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
905 template <typename Q, typename Less>
906 mapped_type do_extract_with( Q const& key, Less pred )
909 mapped_type pExtracted = nullptr;
912 cds::opt::details::make_comparator_from_less<Less>(),
913 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
921 static int height( node_type * pNode, atomics::memory_order order = memory_model::memory_order_relaxed )
924 return pNode->m_nHeight.load( order );
926 static void set_height( node_type * pNode, int h, atomics::memory_order order = memory_model::memory_order_relaxed )
929 pNode->m_nHeight.store( h, order );
931 static int height_null( node_type * pNode, atomics::memory_order order = memory_model::memory_order_relaxed )
933 return pNode ? height( pNode, order ) : 0;
936 static CDS_CONSTEXPR int const c_stackSize = 64;
938 template <typename Q, typename Compare, typename Func>
939 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
941 assert( gc::is_locked() );
947 version_type nVersion;
951 stack_record stack[c_stackSize];
953 stack[0].pNode = pNode;
954 stack[0].nVersion = nVersion;
955 stack[0].nDir = nDir;
958 pNode = stack[pos].pNode;
959 nVersion = stack[pos].nVersion;
960 nDir = stack[pos].nDir;
963 node_type * pChild = child( pNode, nDir );
965 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
967 m_stat.onFindRetry();
970 m_stat.onFindFailed();
971 return find_result::not_found;
974 int nCmp = cmp( key, pChild->m_key );
976 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
978 node_scoped_lock l( m_Monitor, *pChild );
979 if ( child(pNode, nDir) == pChild ) {
980 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
982 m_stat.onFindSuccess();
983 return find_result::found;
988 m_stat.onFindRetry();
992 m_stat.onFindFailed();
993 return find_result::not_found;
996 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
997 if ( nChildVersion & node_type::shrinking ) {
998 m_stat.onFindWaitShrinking();
999 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1001 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1003 m_stat.onFindRetry();
1007 else if ( nChildVersion != node_type::unlinked && child( pNode, nDir ) == pChild )
1009 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1011 m_stat.onFindRetry();
1016 assert(pos < c_stackSize);
1017 stack[pos].pNode = pChild;
1018 stack[pos].nVersion = nChildVersion;
1019 stack[pos].nDir = nCmp;
1020 break; // child iteration
1023 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1025 m_stat.onFindRetry();
1029 m_stat.onFindRetry();
1032 return find_result::retry;
1036 template <typename Q, typename Compare, typename Func>
1037 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
1039 assert( gc::is_locked() );
1043 node_type * pChild = child( pNode, nDir );
1045 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1046 return find_result::retry;
1048 m_stat.onFindFailed();
1049 return find_result::not_found;
1052 int nCmp = cmp( key, pChild->m_key );
1054 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
1056 node_scoped_lock l( m_Monitor, *pChild );
1057 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
1058 if ( f( pChild ) ) {
1059 m_stat.onFindSuccess();
1060 return find_result::found;
1065 m_stat.onFindFailed();
1066 return find_result::not_found;
1069 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1070 if ( nChildVersion & node_type::shrinking ) {
1071 m_stat.onFindWaitShrinking();
1072 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1074 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1075 return find_result::retry;
1077 else if ( nChildVersion != node_type::unlinked && child( pNode, nDir ) == pChild )
1079 if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
1080 return find_result::retry;
1082 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
1083 if ( found != find_result::retry )
1087 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1088 return find_result::retry;
1090 m_stat.onFindRetry();
1095 template <typename K, typename Compare, typename Func>
1096 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
1098 assert( gc::is_locked() );
1103 // get right child of root
1104 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1106 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1107 if ( nChildVersion & node_type::shrinking ) {
1108 m_stat.onUpdateRootWaitShrinking();
1109 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1110 result = update_flags::retry;
1112 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire ))
1113 result = try_update( key, cmp, nFlags, funcUpdate, pChild, nChildVersion, disp );
1115 result = update_flags::retry;
1118 // the tree is empty
1119 if ( nFlags & update_flags::allow_insert ) {
1120 // insert into tree as right child of the root
1122 node_scoped_lock l( m_Monitor, *m_pRoot );
1123 if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
1124 result = update_flags::retry;
1128 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
1129 mapped_type pVal = funcUpdate( pNew );
1130 assert( pVal != nullptr );
1131 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
1133 m_pRoot->child( pNew, right_child, memory_model::memory_order_relaxed );
1134 set_height( m_pRoot, 2 );
1138 m_stat.onInsertSuccess();
1139 return update_flags::result_inserted;
1142 return update_flags::failed;
1145 if ( result != update_flags::retry )
1150 template <typename K, typename Compare, typename Func>
1151 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
1153 assert( gc::is_locked() );
1158 // get right child of root
1159 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1161 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1162 if ( nChildVersion & node_type::shrinking ) {
1163 m_stat.onRemoveRootWaitShrinking();
1164 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1165 result = update_flags::retry;
1167 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
1168 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
1171 result = update_flags::retry;
1176 if ( result == update_flags::retry )
1177 m_stat.onRemoveRetry();
1179 return result == update_flags::result_removed;
1183 template <typename K, typename Compare, typename Func>
1184 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1186 assert( gc::is_locked() );
1187 assert( nVersion != node_type::unlinked );
1192 version_type nVersion;
1195 stack_record stack[c_stackSize];
1197 stack[0].pNode = pNode;
1198 stack[0].nVersion = nVersion;
1200 while ( pos >= 0 ) {
1201 pNode = stack[pos].pNode;
1202 nVersion = stack[pos].nVersion;
1204 int nCmp = cmp( key, pNode->m_key );
1206 int result = try_update_node( nFlags, funcUpdate, pNode, nVersion, disp );
1207 if ( result != update_flags::retry )
1210 m_stat.onUpdateRetry();
1215 node_type * pChild = child( pNode, nCmp );
1216 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1218 m_stat.onUpdateRetry();
1222 if ( pChild == nullptr ) {
1224 if ( nFlags & update_flags::allow_insert ) {
1225 int result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
1226 if ( result != update_flags::retry )
1229 m_stat.onUpdateRetry();
1233 return update_flags::failed;
1237 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1238 if ( nChildVersion & node_type::shrinking ) {
1239 m_stat.onUpdateWaitShrinking();
1240 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1243 else if ( pChild == child( pNode, nCmp )) {
1244 // this second read is important, because it is protected by nChildVersion
1246 // validate the read that our caller took to get to node
1247 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1249 m_stat.onUpdateRetry();
1252 // At this point we know that the traversal our parent took to get to node is still valid.
1253 // The recursive implementation will validate the traversal from node to
1254 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1255 // This means that we are no longer vulnerable to node shrinks, and we don't need
1256 // to validate node version any more.
1258 assert( pos < c_stackSize );
1259 stack[pos].pNode = pChild;
1260 stack[pos].nVersion = nChildVersion;
1261 assert( nChildVersion != node_type::unlinked );
1262 break; // child iteration
1264 m_stat.onUpdateRetry();
1268 return update_flags::retry;
1272 template <typename K, typename Compare, typename Func>
1273 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1275 assert( gc::is_locked() );
1276 assert( nVersion != node_type::unlinked );
1278 int nCmp = cmp( key, pNode->m_key );
1280 return try_update_node( nFlags, funcUpdate, pNode, nVersion, disp );
1284 node_type * pChild = child( pNode, nCmp );
1285 if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
1286 return update_flags::retry;
1288 if ( pChild == nullptr ) {
1290 if ( nFlags & update_flags::allow_insert )
1291 result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
1293 result = update_flags::failed;
1297 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1298 if ( nChildVersion & node_type::shrinking ) {
1299 m_stat.onUpdateWaitShrinking();
1300 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1302 result = update_flags::retry;
1304 else if ( pChild == child( pNode, nCmp )) {
1305 // this second read is important, because it is protected by nChildVersion
1307 // validate the read that our caller took to get to node
1308 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1309 return update_flags::retry;
1311 // At this point we know that the traversal our parent took to get to node is still valid.
1312 // The recursive implementation will validate the traversal from node to
1313 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1314 // This means that we are no longer vulnerable to node shrinks, and we don't need
1315 // to validate node version any more.
1316 result = try_update( key, cmp, nFlags, funcUpdate, pChild, nChildVersion, disp );
1319 result = update_flags::retry;
1322 if ( result == update_flags::retry ) {
1323 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1324 return update_flags::retry;
1325 m_stat.onUpdateRetry();
1333 template <typename K, typename Compare, typename Func>
1334 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1336 assert( gc::is_locked() );
1337 assert( nVersion != node_type::unlinked );
1341 node_type * pParent;
1343 version_type nVersion;
1346 stack_record stack[c_stackSize];
1348 stack[0].pParent = pParent;
1349 stack[0].pNode = pNode;
1350 stack[0].nVersion = nVersion;
1352 while ( pos >= 0 ) {
1353 pParent = stack[pos].pParent;
1354 pNode = stack[pos].pNode;
1355 nVersion = stack[pos].nVersion;
1357 int nCmp = cmp( key, pNode->m_key );
1359 int result = try_remove_node( pParent, pNode, nVersion, func, disp );
1360 if ( result != update_flags::retry )
1363 m_stat.onRemoveRetry();
1368 node_type * pChild = child( pNode, nCmp );
1369 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1371 m_stat.onRemoveRetry();
1375 if ( pChild == nullptr )
1376 return update_flags::failed;
1379 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1380 if ( nChildVersion & node_type::shrinking ) {
1381 m_stat.onRemoveWaitShrinking();
1382 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1385 else if ( pChild == child( pNode, nCmp )) {
1386 // this second read is important, because it is protected by nChildVersion
1388 // validate the read that our caller took to get to node
1389 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1391 m_stat.onRemoveRetry();
1395 // At this point we know that the traversal our parent took to get to node is still valid.
1396 // The recursive implementation will validate the traversal from node to
1397 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1398 // This means that we are no longer vulnerable to node shrinks, and we don't need
1399 // to validate node version any more.
1401 assert( pos < c_stackSize );
1402 stack[pos].pParent = pNode;
1403 stack[pos].pNode = pChild;
1404 stack[pos].nVersion = nChildVersion;
1405 break; // child iteration
1407 m_stat.onRemoveRetry();
1410 return update_flags::retry;
1414 template <typename K, typename Compare, typename Func>
1415 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1417 assert( gc::is_locked() );
1418 assert( nVersion != node_type::unlinked );
1420 int nCmp = cmp( key, pNode->m_key );
1422 return try_remove_node( pParent, pNode, nVersion, func, disp );
1427 node_type * pChild = child( pNode, nCmp );
1428 if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
1429 return update_flags::retry;
1431 if ( pChild == nullptr )
1432 return update_flags::failed;
1435 result = update_flags::retry;
1436 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1437 if ( nChildVersion & node_type::shrinking ) {
1438 m_stat.onRemoveWaitShrinking();
1439 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1442 else if ( pChild == child( pNode, nCmp )) {
1443 // this second read is important, because it is protected by nChildVersion
1445 // validate the read that our caller took to get to node
1446 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1447 return update_flags::retry;
1449 // At this point we know that the traversal our parent took to get to node is still valid.
1450 // The recursive implementation will validate the traversal from node to
1451 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1452 // This means that we are no longer vulnerable to node shrinks, and we don't need
1453 // to validate node version any more.
1454 result = try_remove( key, cmp, func, pNode, pChild, nChildVersion, disp );
1457 result = update_flags::retry;
1460 if ( result == update_flags::retry ) {
1461 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1462 return update_flags::retry;
1463 m_stat.onRemoveRetry();
1471 template <typename Func>
1472 int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1474 assert( gc::is_locked() );
1475 assert( nVersion != node_type::unlinked );
1479 node_type * pParent;
1481 version_type nVersion;
1484 stack_record stack[c_stackSize];
1486 stack[0].pParent = pParent;
1487 stack[0].pNode = pNode;
1488 stack[0].nVersion = nVersion;
1490 while ( pos >= 0 ) {
1491 pParent = stack[pos].pParent;
1492 pNode = stack[pos].pNode;
1493 nVersion = stack[pos].nVersion;
1496 node_type * pChild = child( pNode, nDir );
1497 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1499 m_stat.onRemoveRetry();
1503 if ( pChild == nullptr ) {
1505 assert(pNode->is_valued( memory_model::memory_order_relaxed ));
1506 int result = try_remove_node( pParent, pNode, nVersion, func, disp );
1507 if ( result != update_flags::retry )
1510 m_stat.onRemoveRetry();
1514 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1515 if ( nChildVersion & node_type::shrinking ) {
1516 m_stat.onRemoveWaitShrinking();
1517 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1520 else if ( pChild == child( pNode, nDir )) {
1521 // this second read is important, because it is protected by nChildVersion
1523 // validate the read that our caller took to get to node
1524 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1526 m_stat.onRemoveRetry();
1530 // At this point we know that the traversal our parent took to get to node is still valid.
1531 // The recursive implementation will validate the traversal from node to
1532 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1533 // This means that we are no longer vulnerable to node shrinks, and we don't need
1534 // to validate node version any more.
1536 assert( pos < c_stackSize );
1537 stack[pos].pParent = pNode;
1538 stack[pos].pNode = pChild;
1539 stack[pos].nVersion = nChildVersion;
1540 break; // child iteration
1542 m_stat.onRemoveRetry();
1545 return update_flags::retry;
1549 template <typename Func>
1550 int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1552 assert( gc::is_locked() );
1553 assert( nVersion != node_type::unlinked );
1557 node_type * pChild = child( pNode, nDir );
1558 if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
1559 return update_flags::retry;
1561 if ( pChild == nullptr ) {
1563 assert(pNode->is_valued( memory_model::memory_order_relaxed ));
1564 return try_remove_node( pParent, pNode, nVersion, func, disp );
1567 //result = update_flags::retry;
1568 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1569 if ( nChildVersion & node_type::shrinking ) {
1570 m_stat.onRemoveWaitShrinking();
1571 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1573 result = update_flags::retry;
1575 else if ( pChild == child( pNode, nDir )) {
1576 // this second read is important, because it is protected by nChildVersion
1578 // validate the read that our caller took to get to node
1579 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1580 return update_flags::retry;
1582 // At this point we know that the traversal our parent took to get to node is still valid.
1583 // The recursive implementation will validate the traversal from node to
1584 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1585 // This means that we are no longer vulnerable to node shrinks, and we don't need
1586 // to validate node version any more.
1587 result = try_extract_minmax( nDir, func, pNode, pChild, nChildVersion, disp );
1590 result = update_flags::retry;
1593 if ( result == update_flags::retry ) {
1594 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1595 return update_flags::retry;
1596 m_stat.onRemoveRetry();
1604 template <typename K, typename Func>
1605 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
1609 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
1610 mapped_type pVal = funcUpdate( pNew );
1611 assert( pVal != nullptr );
1612 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1615 if ( c_bRelaxedInsert ) {
1616 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1617 || child( pNode, nDir ) != nullptr )
1619 m_stat.onInsertRetry();
1620 return update_flags::retry;
1623 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1626 node_type * pDamaged;
1628 assert( pNode != nullptr );
1629 node_scoped_lock l( m_Monitor, *pNode );
1631 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1632 || child( pNode, nDir ) != nullptr )
1634 if ( c_bRelaxedInsert ) {
1635 mapped_type pVal = pNew->m_pValue.load( memory_model::memory_order_relaxed );
1636 pNew->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1639 m_stat.onRelaxedInsertFailed();
1642 m_stat.onInsertRetry();
1643 return update_flags::retry;
1646 if ( !c_bRelaxedInsert )
1647 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1649 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
1650 pDamaged = fix_height_locked( pNode );
1654 m_stat.onInsertSuccess();
1657 fix_height_and_rebalance( pDamaged, disp );
1658 m_stat.onInsertRebalanceRequired();
1661 return update_flags::result_inserted;
1664 template <typename Func>
1665 int try_update_node( int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1669 assert( pNode != nullptr );
1671 node_scoped_lock l( m_Monitor, *pNode );
1673 if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
1674 return update_flags::retry;
1676 if ( pNode->is_unlinked( memory_model::memory_order_relaxed )) {
1677 m_stat.onUpdateUnlinked();
1678 return update_flags::retry;
1681 if ( pNode->is_valued( memory_model::memory_order_relaxed ) && !(nFlags & update_flags::allow_update) ) {
1682 m_stat.onInsertFailed();
1683 return update_flags::failed;
1686 pOld = pNode->value( memory_model::memory_order_relaxed );
1687 bInserted = pOld == nullptr;
1688 mapped_type pVal = funcUpdate( pNode );
1692 assert( pVal != nullptr );
1693 pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1698 disp.dispose_value(pOld);
1699 m_stat.onDisposeValue();
1703 m_stat.onInsertSuccess();
1704 return update_flags::result_inserted;
1707 m_stat.onUpdateSuccess();
1708 return update_flags::result_updated;
1711 template <typename Func>
1712 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
1714 assert( pParent != nullptr );
1715 assert( pNode != nullptr );
1717 if ( !pNode->is_valued( memory_model::memory_order_relaxed ) )
1718 return update_flags::failed;
1720 if ( child( pNode, left_child ) == nullptr || child( pNode, right_child ) == nullptr ) {
1721 // pNode can be replaced with its child
1723 node_type * pDamaged;
1726 node_scoped_lock lp( m_Monitor, *pParent );
1727 if ( pParent->is_unlinked( memory_model::memory_order_relaxed ) || parent( pNode ) != pParent )
1728 return update_flags::retry;
1731 node_scoped_lock ln( m_Monitor, *pNode );
1732 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1733 return update_flags::retry;
1735 pOld = pNode->value( memory_model::memory_order_relaxed );
1737 return update_flags::failed;
1739 if ( !try_unlink_locked( pParent, pNode, disp ))
1740 return update_flags::retry;
1742 pDamaged = fix_height_locked( pParent );
1746 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1747 m_stat.onDisposeValue();
1749 m_stat.onExtractValue();
1752 fix_height_and_rebalance( pDamaged, disp );
1753 m_stat.onRemoveRebalanceRequired();
1757 // pNode is an internal with two children
1761 node_scoped_lock ln( m_Monitor, *pNode );
1762 pOld = pNode->value( memory_model::memory_order_relaxed );
1763 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1764 return update_flags::retry;
1766 return update_flags::failed;
1768 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1772 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1773 m_stat.onDisposeValue();
1775 m_stat.onExtractValue();
1777 return update_flags::result_removed;
1780 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1782 // pParent and pNode must be locked
1783 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
1785 node_type * pParentLeft = child( pParent, left_child );
1786 node_type * pParentRight = child( pParent, right_child );
1787 if ( pNode != pParentLeft && pNode != pParentRight ) {
1788 // node is no longer a child of parent
1792 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ));
1793 assert( pParent == parent( pNode ));
1795 node_type * pLeft = child( pNode, left_child );
1796 node_type * pRight = child( pNode, right_child );
1797 if ( pLeft != nullptr && pRight != nullptr ) {
1798 // splicing is no longer possible
1801 node_type * pSplice = pLeft ? pLeft : pRight;
1803 if ( pParentLeft == pNode )
1804 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
1806 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
1809 pSplice->parent( pParent, memory_model::memory_order_relaxed );
1811 // Mark the node as unlinked
1812 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1814 // The value will be disposed by calling function
1815 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1817 disp.dispose( pNode );
1818 m_stat.onDisposeNode();
1825 private: // rotations
1827 int estimate_node_condition( node_type * pNode )
1829 node_type * pLeft = child( pNode, left_child );
1830 node_type * pRight = child( pNode, right_child );
1832 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1833 return unlink_required;
1835 int h = height( pNode );
1836 int hL = height_null( pLeft );
1837 int hR = height_null( pRight );
1839 int hNew = 1 + std::max( hL, hR );
1840 int nBalance = hL - hR;
1842 if ( nBalance < -1 || nBalance > 1 )
1843 return rebalance_required;
1845 return h != hNew ? hNew : nothing_required;
1848 node_type * fix_height( node_type * pNode )
1850 assert( pNode != nullptr );
1851 node_scoped_lock l( m_Monitor, *pNode );
1852 return fix_height_locked( pNode );
1855 node_type * fix_height_locked( node_type * pNode )
1857 // pNode must be locked!!!
1858 int h = estimate_node_condition( pNode );
1860 case rebalance_required:
1861 case unlink_required:
1863 case nothing_required:
1866 set_height( pNode, h );
1867 return parent( pNode );
1871 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1873 while ( pNode && parent( pNode )) {
1874 int nCond = estimate_node_condition( pNode );
1875 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
1878 if ( nCond != unlink_required && nCond != rebalance_required )
1879 pNode = fix_height( pNode );
1881 node_type * pParent = parent( pNode );
1882 assert( pParent != nullptr );
1884 node_scoped_lock lp( m_Monitor, *pParent );
1885 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed ) && parent( pNode ) == pParent ) {
1886 node_scoped_lock ln( m_Monitor, *pNode );
1887 pNode = rebalance_locked( pParent, pNode, disp );
1894 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1896 // pParent and pNode should be locked.
1897 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1898 assert( parent( pNode ) == pParent );
1900 node_type * pLeft = child( pNode, left_child );
1901 node_type * pRight = child( pNode, right_child );
1903 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1904 if ( try_unlink_locked( pParent, pNode, disp ))
1905 return fix_height_locked( pParent );
1907 // retry needed for pNode
1912 assert( child( pParent, left_child ) == pNode || child( pParent, right_child ) == pNode );
1914 int h = height( pNode );
1915 int hL = height_null( pLeft );
1916 int hR = height_null( pRight );
1917 int hNew = 1 + std::max( hL, hR );
1918 int balance = hL - hR;
1921 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1922 else if ( balance < -1 )
1923 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1924 else if ( hNew != h ) {
1925 set_height( pNode, hNew );
1927 // pParent is already locked
1928 return fix_height_locked( pParent );
1934 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1936 assert( parent( pNode ) == pParent );
1937 assert( child( pParent, left_child ) == pNode || child( pParent, right_child ) == pNode );
1939 // pParent and pNode is locked yet
1940 // pNode->pLeft is too large, we will rotate-right.
1941 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1944 assert( pLeft != nullptr );
1945 node_scoped_lock l( m_Monitor, *pLeft );
1946 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1947 return pNode; // retry for pNode
1949 int hL = height( pLeft );
1951 return pNode; // retry
1953 node_type * pLRight = child( pLeft, right_child );
1954 int hLR = height_null( pLRight );
1955 node_type * pLLeft = child( pLeft, left_child );
1956 int hLL = height_null( pLLeft );
1960 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1963 assert( pLRight != nullptr );
1965 node_scoped_lock lr( m_Monitor, *pLRight );
1966 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
1967 return pNode; // retry
1969 hLR = height( pLRight );
1971 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1973 int hLRL = height_null( child( pLRight, left_child ));
1974 int balance = hLL - hLRL;
1975 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
1976 // nParent.child.left won't be damaged after a double rotation
1977 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1981 // focus on pLeft, if necessary pNode will be balanced later
1982 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1987 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1989 assert( parent( pNode ) == pParent );
1990 assert( child( pParent, left_child ) == pNode || child( pParent, right_child ) == pNode );
1992 // pParent and pNode is locked yet
1994 assert( pRight != nullptr );
1995 node_scoped_lock l( m_Monitor, *pRight );
1996 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1997 return pNode; // retry for pNode
1999 int hR = height( pRight );
2000 if ( hL - hR >= -1 )
2001 return pNode; // retry
2003 node_type * pRLeft = child( pRight, left_child );
2004 int hRL = height_null( pRLeft );
2005 node_type * pRRight = child( pRight, right_child );
2006 int hRR = height_null( pRRight );
2008 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
2011 assert( pRLeft != nullptr );
2012 node_scoped_lock lrl( m_Monitor, *pRLeft );
2013 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
2014 return pNode; // retry
2016 hRL = height( pRLeft );
2018 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
2020 node_type * pRLRight = child( pRLeft, right_child );
2021 int hRLR = height_null( pRLRight );
2022 int balance = hRR - hRLR;
2023 if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
2024 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
2026 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
2030 static void begin_change( node_type * pNode, version_type version )
2032 assert(pNode->version(memory_model::memory_order_acquire) == version );
2033 assert( (version & node_type::shrinking) == 0 );
2034 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
2036 static void end_change( node_type * pNode, version_type version )
2038 // Clear shrinking and unlinked flags and increment version
2039 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
2042 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
2044 version_type nodeVersion = pNode->version( memory_model::memory_order_acquire );
2045 node_type * pParentLeft = child( pParent, left_child );
2047 begin_change( pNode, nodeVersion );
2049 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
2050 if ( pLRight != nullptr )
2051 pLRight->parent( pNode, memory_model::memory_order_relaxed );
2053 atomics::atomic_thread_fence( memory_model::memory_order_release );
2055 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
2056 pNode->parent( pLeft, memory_model::memory_order_relaxed );
2058 atomics::atomic_thread_fence( memory_model::memory_order_release );
2060 if ( pParentLeft == pNode )
2061 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
2063 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
2064 pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed );
2066 pLeft->parent( pParent, memory_model::memory_order_relaxed );
2068 atomics::atomic_thread_fence( memory_model::memory_order_release );
2070 // fix up heights links
2071 int hNode = 1 + std::max( hLR, hR );
2072 set_height( pNode, hNode );
2073 set_height( pLeft, 1 + std::max( hLL, hNode ));
2075 end_change( pNode, nodeVersion );
2076 m_stat.onRotateRight();
2078 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
2079 // parent.child). pNode is the deepest. Perform as many fixes as we can
2080 // with the locks we've got.
2082 // We've already fixed the height for pNode, but it might still be outside
2083 // our allowable balance range. In that case a simple fix_height_locked()
2085 int nodeBalance = hLR - hR;
2086 if ( nodeBalance < -1 || nodeBalance > 1 ) {
2087 // we need another rotation at pNode
2088 m_stat.onRotateAfterRightRotation();
2092 // we've fixed balance and height damage for pNode, now handle
2093 // extra-routing node damage
2094 if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
2095 // we need to remove pNode and then repair
2096 m_stat.onRemoveAfterRightRotation();
2100 // we've already fixed the height at pLeft, do we need a rotation here?
2101 int leftBalance = hLL - hNode;
2102 if ( leftBalance < -1 || leftBalance > 1 ) {
2103 m_stat.onRotateAfterRightRotation();
2107 // pLeft might also have routing node damage (if pLeft.left was null)
2108 if ( hLL == 0 && !pLeft->is_valued(memory_model::memory_order_relaxed) ) {
2109 m_stat.onDamageAfterRightRotation();
2113 // try to fix the parent height while we've still got the lock
2114 return fix_height_locked( pParent );
2117 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
2119 version_type nodeVersion = pNode->version( memory_model::memory_order_acquire );
2120 node_type * pParentLeft = child( pParent, left_child );
2122 begin_change( pNode, nodeVersion );
2124 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
2125 pNode->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
2126 if ( pRLeft != nullptr )
2127 pRLeft->parent( pNode, memory_model::memory_order_relaxed );
2129 atomics::atomic_thread_fence( memory_model::memory_order_release );
2131 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
2132 pNode->parent( pRight, memory_model::memory_order_relaxed );
2134 atomics::atomic_thread_fence( memory_model::memory_order_release );
2136 if ( pParentLeft == pNode )
2137 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
2139 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
2140 pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed );
2142 pRight->parent( pParent, memory_model::memory_order_relaxed );
2144 atomics::atomic_thread_fence( memory_model::memory_order_release );
2147 int hNode = 1 + std::max( hL, hRL );
2148 set_height( pNode, hNode );
2149 set_height( pRight, 1 + std::max( hNode, hRR ));
2151 end_change( pNode, nodeVersion );
2152 m_stat.onRotateLeft();
2154 int nodeBalance = hRL - hL;
2155 if ( nodeBalance < -1 || nodeBalance > 1 ) {
2156 m_stat.onRotateAfterLeftRotation();
2160 if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed) ) {
2161 m_stat.onRemoveAfterLeftRotation();
2165 int rightBalance = hRR - hNode;
2166 if ( rightBalance < -1 || rightBalance > 1 ) {
2167 m_stat.onRotateAfterLeftRotation();
2171 if ( hRR == 0 && !pRight->is_valued(memory_model::memory_order_relaxed) ) {
2172 m_stat.onDamageAfterLeftRotation();
2176 return fix_height_locked( pParent );
2179 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
2181 version_type nodeVersion = pNode->version( memory_model::memory_order_acquire );
2182 version_type leftVersion = pLeft->version( memory_model::memory_order_acquire );
2184 node_type * pPL = child( pParent, left_child );
2185 node_type * pLRL = child( pLRight, left_child );
2186 node_type * pLRR = child( pLRight, right_child );
2187 int hLRR = height_null( pLRR );
2189 begin_change( pNode, nodeVersion );
2190 begin_change( pLeft, leftVersion );
2192 // fix up pNode links, careful about the order!
2193 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
2194 if ( pLRR != nullptr )
2195 pLRR->parent( pNode, memory_model::memory_order_relaxed );
2196 atomics::atomic_thread_fence( memory_model::memory_order_release );
2198 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
2199 if ( pLRL != nullptr )
2200 pLRL->parent( pLeft, memory_model::memory_order_relaxed );
2201 atomics::atomic_thread_fence( memory_model::memory_order_release );
2203 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
2204 pLeft->parent( pLRight, memory_model::memory_order_relaxed );
2205 atomics::atomic_thread_fence( memory_model::memory_order_release );
2207 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
2208 pNode->parent( pLRight, memory_model::memory_order_relaxed );
2209 atomics::atomic_thread_fence( memory_model::memory_order_release );
2212 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
2214 assert( child( pParent, right_child ) == pNode );
2215 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
2217 pLRight->parent( pParent, memory_model::memory_order_relaxed );
2218 atomics::atomic_thread_fence( memory_model::memory_order_release );
2221 int hNode = 1 + std::max( hLRR, hR );
2222 set_height( pNode, hNode );
2223 int hLeft = 1 + std::max( hLL, hLRL );
2224 set_height( pLeft, hLeft );
2225 set_height( pLRight, 1 + std::max( hLeft, hNode ));
2227 end_change( pNode, nodeVersion );
2228 end_change( pLeft, leftVersion );
2229 m_stat.onRotateRightOverLeft();
2231 // caller should have performed only a single rotation if pLeft was going
2232 // to end up damaged
2233 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
2234 assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_relaxed )));
2236 // We have damaged pParent, pLR (now parent.child), and pNode (now
2237 // parent.child.right). pNode is the deepest. Perform as many fixes as we
2238 // can with the locks we've got.
2240 // We've already fixed the height for pNode, but it might still be outside
2241 // our allowable balance range. In that case a simple fix_height_locked()
2243 int nodeBalance = hLRR - hR;
2244 if ( nodeBalance < -1 || nodeBalance > 1 ) {
2245 // we need another rotation at pNode
2246 m_stat.onRotateAfterRLRotation();
2250 // pNode might also be damaged by being an unnecessary routing node
2251 if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
2252 // repair involves splicing out pNode and maybe more rotations
2253 m_stat.onRemoveAfterRLRotation();
2257 // we've already fixed the height at pLRight, do we need a rotation here?
2258 int balanceLR = hLeft - hNode;
2259 if ( balanceLR < -1 || balanceLR > 1 ) {
2260 m_stat.onRotateAfterRLRotation();
2264 // try to fix the parent height while we've still got the lock
2265 return fix_height_locked( pParent );
2268 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
2270 version_type nodeVersion = pNode->version( memory_model::memory_order_acquire );
2271 version_type rightVersion = pRight->version( memory_model::memory_order_acquire );
2273 node_type * pPL = child( pParent, left_child );
2274 node_type * pRLL = child( pRLeft, left_child );
2275 node_type * pRLR = child( pRLeft, right_child );
2276 int hRLL = height_null( pRLL );
2278 begin_change( pNode, nodeVersion );
2279 begin_change( pRight, rightVersion );
2281 // fix up pNode links, careful about the order!
2282 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
2283 if ( pRLL != nullptr )
2284 pRLL->parent( pNode, memory_model::memory_order_relaxed );
2285 atomics::atomic_thread_fence( memory_model::memory_order_release );
2287 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
2288 if ( pRLR != nullptr )
2289 pRLR->parent( pRight, memory_model::memory_order_relaxed );
2290 atomics::atomic_thread_fence( memory_model::memory_order_release );
2292 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
2293 pRight->parent( pRLeft, memory_model::memory_order_relaxed );
2294 atomics::atomic_thread_fence( memory_model::memory_order_release );
2296 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
2297 pNode->parent( pRLeft, memory_model::memory_order_relaxed );
2298 atomics::atomic_thread_fence( memory_model::memory_order_release );
2301 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
2303 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
2304 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
2306 pRLeft->parent( pParent, memory_model::memory_order_relaxed );
2307 atomics::atomic_thread_fence( memory_model::memory_order_release );
2310 int hNode = 1 + std::max( hL, hRLL );
2311 set_height( pNode, hNode );
2312 int hRight = 1 + std::max( hRLR, hRR );
2313 set_height( pRight, hRight );
2314 set_height( pRLeft, 1 + std::max( hNode, hRight ));
2316 end_change( pNode, nodeVersion );
2317 end_change( pRight, rightVersion );
2318 m_stat.onRotateLeftOverRight();
2320 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
2322 int nodeBalance = hRLL - hL;
2323 if ( nodeBalance < -1 || nodeBalance > 1 ) {
2324 m_stat.onRotateAfterLRRotation();
2328 if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed) ) {
2329 m_stat.onRemoveAfterLRRotation();
2333 int balRL = hRight - hNode;
2334 if ( balRL < -1 || balRL > 1 ) {
2335 m_stat.onRotateAfterLRRotation();
2339 return fix_height_locked( pParent );
2344 }} // namespace cds::container
2346 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H