2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H
32 #define CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H
34 #include <type_traits>
36 #include <cds/intrusive/details/skip_list_base.h>
37 #include <cds/opt/compare.h>
38 #include <cds/urcu/details/check_deadlock.h>
39 #include <cds/details/binary_functor_wrapper.h>
40 #include <cds/urcu/exempt_ptr.h>
41 #include <cds/urcu/raw_ptr.h>
42 #include <cds/intrusive/details/raw_ptr_disposer.h>
44 namespace cds { namespace intrusive {
49 template <class RCU, typename Tag>
50 class node< cds::urcu::gc< RCU >, Tag >
53 typedef cds::urcu::gc< RCU > gc; ///< Garbage collector
54 typedef Tag tag ; ///< tag
57 // bit 0 - the item is logically deleted
58 // bit 1 - the item is extracted (only for level 0)
59 typedef cds::details::marked_ptr<node, 3> marked_ptr; ///< marked pointer
60 typedef atomics::atomic< marked_ptr > atomic_marked_ptr; ///< atomic marked pointer
61 typedef atomic_marked_ptr tower_item_type;
64 atomic_marked_ptr m_pNext; ///< Next item in bottom-list (list at level 0)
66 node * m_pDelChain; ///< Deleted node chain (local for a thread)
68 unsigned int m_nHeight; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1.
69 atomic_marked_ptr * m_arrNext; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr
70 atomics::atomic<unsigned> m_nUnlink; ///< How many levels has been unlinked
73 /// Constructs a node of height 1 (a bottom-list node)
76 , m_pDelChain( nullptr )
78 , m_arrNext( nullptr )
82 /// Constructs a node of height \p nHeight
83 void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower )
85 assert( nHeight > 0 );
86 assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node
87 || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0
90 m_arrNext = nextTower;
94 atomic_marked_ptr * release_tower()
96 atomic_marked_ptr * pTower = m_arrNext;
102 atomic_marked_ptr * get_tower() const
109 for ( unsigned int nLevel = 1; nLevel < m_nHeight; ++nLevel )
110 next(nLevel).store( marked_ptr(), atomics::memory_order_relaxed );
113 /// Access to element of next pointer array
114 atomic_marked_ptr& next( unsigned int nLevel )
116 assert( nLevel < height());
117 assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr));
119 # ifdef CDS_THREAD_SANITIZER_ENABLED
120 // TSan false positive: m_arrNext is read-only array
121 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
122 atomic_marked_ptr& r = nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
123 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
126 return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
130 /// Access to element of next pointer array (const version)
131 atomic_marked_ptr const& next( unsigned int nLevel ) const
133 assert( nLevel < height());
134 assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr );
136 # ifdef CDS_THREAD_SANITIZER_ENABLED
137 // TSan false positive: m_arrNext is read-only array
138 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
139 atomic_marked_ptr& r = nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
140 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
143 return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
147 /// Access to element of next pointer array (same as \ref next function)
148 atomic_marked_ptr& operator[]( unsigned int nLevel )
150 return next( nLevel );
153 /// Access to element of next pointer array (same as \ref next function)
154 atomic_marked_ptr const& operator[]( unsigned int nLevel ) const
156 return next( nLevel );
159 /// Height of the node
160 unsigned int height() const
165 /// Clears internal links
168 assert( m_arrNext == nullptr );
169 m_pNext.store( marked_ptr(), atomics::memory_order_release );
170 m_pDelChain = nullptr;
173 bool is_cleared() const
175 return m_pNext == atomic_marked_ptr()
176 && m_arrNext == nullptr
180 bool level_unlinked( unsigned nCount = 1 )
182 return m_nUnlink.fetch_add( nCount, std::memory_order_relaxed ) + 1 == height();
185 } // namespace skip_list
189 namespace skip_list { namespace details {
191 template <class RCU, typename NodeTraits, typename BackOff, bool IsConst>
192 class iterator< cds::urcu::gc< RCU >, NodeTraits, BackOff, IsConst >
195 typedef cds::urcu::gc< RCU > gc;
196 typedef NodeTraits node_traits;
197 typedef BackOff back_off;
198 typedef typename node_traits::node_type node_type;
199 typedef typename node_traits::value_type value_type;
200 static bool const c_isConst = IsConst;
202 typedef typename std::conditional< c_isConst, value_type const &, value_type &>::type value_ref;
205 typedef typename node_type::marked_ptr marked_ptr;
206 typedef typename node_type::atomic_marked_ptr atomic_marked_ptr;
216 if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits()) {
217 // Current node is marked as deleted. So, its next pointer can point to anything
218 // In this case we interrupt our iteration and returns end() iterator.
223 marked_ptr p = m_pNode->next(0).load( atomics::memory_order_relaxed );
224 node_type * pp = p.ptr();
226 // p is marked as deleted. Spin waiting for physical removal
230 else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits()) {
231 // p is marked as deleted. Spin waiting for physical removal
241 public: // for internal use only!!!
242 iterator( node_type& refHead )
248 marked_ptr p = refHead.next(0).load( atomics::memory_order_relaxed );
254 node_type * pp = p.ptr();
255 // Logically deleted node is marked from highest level
256 if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits()) {
270 iterator( iterator const& s)
271 : m_pNode( s.m_pNode )
274 value_type * operator ->() const
276 assert( m_pNode != nullptr );
277 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
279 return node_traits::to_value_ptr( m_pNode );
282 value_ref operator *() const
284 assert( m_pNode != nullptr );
285 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
287 return *node_traits::to_value_ptr( m_pNode );
291 iterator& operator ++()
297 iterator& operator = (const iterator& src)
299 m_pNode = src.m_pNode;
303 template <typename Bkoff, bool C>
304 bool operator ==(iterator<gc, node_traits, Bkoff, C> const& i ) const
306 return m_pNode == i.m_pNode;
308 template <typename Bkoff, bool C>
309 bool operator !=(iterator<gc, node_traits, Bkoff, C> const& i ) const
311 return !( *this == i );
314 }} // namespace skip_list::details
317 /// Lock-free skip-list set (template specialization for \ref cds_urcu_desc "RCU")
318 /** @ingroup cds_intrusive_map
319 @anchor cds_intrusive_SkipListSet_rcu
321 The implementation of well-known probabilistic data structure called skip-list
322 invented by W.Pugh in his papers:
323 - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees
324 - [1990] W.Pugh A Skip List Cookbook
326 A skip-list is a probabilistic data structure that provides expected logarithmic
327 time search without the need of rebalance. The skip-list is a collection of sorted
328 linked list. Nodes are ordered by key. Each node is linked into a subset of the lists.
329 Each list has a level, ranging from 0 to 32. The bottom-level list contains
330 all the nodes, and each higher-level list is a sublist of the lower-level lists.
331 Each node is created with a random top level (with a random height), and belongs
332 to all lists up to that level. The probability that a node has the height 1 is 1/2.
333 The probability that a node has the height N is 1/2 ** N (more precisely,
334 the distribution depends on an random generator provided, but our generators
337 The lock-free variant of skip-list is implemented according to book
338 - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming",
339 chapter 14.4 "A Lock-Free Concurrent Skiplist".
341 <b>Template arguments</b>:
342 - \p RCU - one of \ref cds_urcu_gc "RCU type"
343 - \p T - type to be stored in the list. The type must be based on \p skip_list::node (for \p skip_list::base_hook)
344 or it must have a member of type \p skip_list::node (for \p skip_list::member_hook).
345 - \p Traits - set traits, default is \p skip_list::traits
346 It is possible to declare option-based list with \p cds::intrusive::skip_list::make_traits metafunction
347 instead of \p Traits template argument.
349 @note Before including <tt><cds/intrusive/skip_list_rcu.h></tt> you should include appropriate RCU header file,
350 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
354 The class supports a forward iterator (\ref iterator and \ref const_iterator).
355 The iteration is ordered.
357 You may iterate over skip-list set items only under RCU lock.
358 Only in this case the iterator is thread-safe since
359 while RCU is locked any set's item cannot be reclaimed.
361 @note The requirement of RCU lock during iterating means that any type of modification of the skip list
362 (i.e. inserting, erasing and so on) is not possible.
364 @warning The iterator object cannot be passed between threads.
366 Example how to use skip-list set iterators:
368 // First, you should include the header for RCU type you have chosen
369 #include <cds/urcu/general_buffered.h>
370 #include <cds/intrusive/skip_list_rcu.h>
372 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
378 // Traits for your skip-list.
379 // At least, you should define cds::opt::less or cds::opt::compare for Foo struct
380 struct my_traits: public cds::intrusive::skip_list::traits
384 typedef cds::intrusive::SkipListSet< rcu_type, Foo, my_traits > my_skiplist_set;
386 my_skiplist_set theSet;
392 // Apply RCU locking manually
393 typename rcu_type::scoped_lock sl;
395 for ( auto it = theList.begin(); it != theList.end(); ++it ) {
399 // rcu_type::scoped_lock destructor releases RCU lock implicitly
403 The iterator class supports the following minimalistic interface:
410 iterator( iterator const& s);
412 value_type * operator ->() const;
413 value_type& operator *() const;
416 iterator& operator ++();
419 iterator& operator = (const iterator& src);
421 bool operator ==(iterator const& i ) const;
422 bool operator !=(iterator const& i ) const;
425 Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced.
429 You should incorporate skip_list::node into your struct \p T and provide
430 appropriate skip_list::traits::hook in your \p Traits template parameters. Usually, for \p Traits you
431 define a struct based on \p skip_list::traits.
433 Example for <tt>cds::urcu::general_buffered<></tt> RCU and base hook:
435 // First, you should include the header for RCU type you have chosen
436 #include <cds/urcu/general_buffered.h>
438 // Include RCU skip-list specialization
439 #include <cds/intrusive/skip_list_rcu.h>
442 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
444 // Data stored in skip list
445 struct my_data: public cds::intrusive::skip_list::node< rcu_type >
454 // my_data compare functor
456 int operator()( const my_data& d1, const my_data& d2 )
458 return d1.strKey.compare( d2.strKey );
461 int operator()( const my_data& d, const std::string& s )
463 return d.strKey.compare(s);
466 int operator()( const std::string& s, const my_data& d )
468 return s.compare( d.strKey );
473 struct my_traits: public cds::intrusive::skip_list::traits
475 typedef cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > hook;
476 typedef my_data_cmp compare;
479 // Declare skip-list set type
480 typedef cds::intrusive::SkipListSet< rcu_type, my_data, my_traits > traits_based_set;
483 Equivalent option-based code:
485 #include <cds/urcu/general_buffered.h>
486 #include <cds/intrusive/skip_list_rcu.h>
488 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
497 // Declare option-based skip-list set
498 typedef cds::intrusive::SkipListSet< rcu_type
500 , typename cds::intrusive::skip_list::make_traits<
501 cds::intrusive::opt::hook< cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > >
502 ,cds::intrusive::opt::compare< my_data_cmp >
511 #ifdef CDS_DOXYGEN_INVOKED
512 ,typename Traits = skip_list::traits
517 class SkipListSet< cds::urcu::gc< RCU >, T, Traits >
520 typedef cds::urcu::gc< RCU > gc; ///< Garbage collector
521 typedef T value_type; ///< type of value stored in the skip-list
522 typedef Traits traits; ///< Traits template parameter
524 typedef typename traits::hook hook; ///< hook type
525 typedef typename hook::node_type node_type; ///< node type
527 # ifdef CDS_DOXYGEN_INVOKED
528 typedef implementation_defined key_comparator ; ///< key comparison functor based on \p Traits::compare and \p Traits::less
530 typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator;
533 typedef typename traits::disposer disposer; ///< disposer
534 typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits
536 typedef typename traits::item_counter item_counter; ///< Item counting policy used
537 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
538 typedef typename traits::random_level_generator random_level_generator; ///< random level generator
539 typedef typename traits::allocator allocator_type; ///< allocator for maintaining array of next pointers of the node
540 typedef typename traits::back_off back_off; ///< Back-off strategy
541 typedef typename traits::stat stat; ///< internal statistics type
542 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
543 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
544 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking
547 /// Max node height. The actual node height should be in range <tt>[0 .. c_nMaxHeight)</tt>
549 The max height is specified by \ref skip_list::random_level_generator "random level generator" constant \p m_nUpperBound
550 but it should be no more than 32 (\ref skip_list::c_nHeightLimit).
552 static unsigned int const c_nMaxHeight = std::conditional<
553 (random_level_generator::c_nUpperBound <= skip_list::c_nHeightLimit),
554 std::integral_constant< unsigned int, random_level_generator::c_nUpperBound >,
555 std::integral_constant< unsigned int, skip_list::c_nHeightLimit >
559 static unsigned int const c_nMinHeight = 5;
563 typedef typename node_type::atomic_marked_ptr atomic_node_ptr ; ///< Atomic marked node pointer
564 typedef typename node_type::marked_ptr marked_node_ptr ; ///< Node marked pointer
568 typedef skip_list::details::intrusive_node_builder< node_type, atomic_node_ptr, allocator_type > intrusive_node_builder;
570 typedef typename std::conditional<
571 std::is_same< typename traits::internal_node_builder, cds::opt::none >::value
572 ,intrusive_node_builder
573 ,typename traits::internal_node_builder
574 >::type node_builder;
576 typedef std::unique_ptr< node_type, typename node_builder::node_disposer > scoped_node_ptr;
578 static void dispose_node( value_type * pVal )
582 typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal));
588 void operator()( value_type * pVal )
590 dispose_node( pVal );
594 static void dispose_chain( node_type * pChain )
597 assert( !gc::is_locked());
599 auto f = [&pChain]() -> cds::urcu::retired_ptr {
600 node_type * p = pChain;
602 pChain = p->m_pDelChain;
603 return cds::urcu::make_retired_ptr<node_disposer>( node_traits::to_value_ptr( p ));
605 return cds::urcu::make_retired_ptr<node_disposer>( static_cast<value_type *>(nullptr));
607 gc::batch_retire(std::ref(f));
612 node_type * pPrev[ c_nMaxHeight ];
613 node_type * pSucc[ c_nMaxHeight ];
614 node_type * pNext[ c_nMaxHeight ];
617 node_type * pDelChain;
620 : pDelChain( nullptr )
625 dispose_chain( pDelChain );
628 void dispose( node_type * p )
630 assert( p != nullptr );
631 assert( p->m_pDelChain == nullptr );
633 p->m_pDelChain = pDelChain;
638 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy;
642 skip_list::details::head_node< node_type > m_Head; ///< head tower (max height)
644 item_counter m_ItemCounter; ///< item counter
645 random_level_generator m_RandomLevelGen; ///< random level generator instance
646 atomics::atomic<unsigned int> m_nHeight; ///< estimated high level
647 atomics::atomic<node_type *> m_pDeferredDelChain ; ///< Deferred deleted node chain
648 mutable stat m_Stat; ///< internal statistics
652 unsigned int random_level()
654 // Random generator produces a number from range [0..31]
655 // We need a number from range [1..32]
656 return m_RandomLevelGen() + 1;
659 template <typename Q>
660 node_type * build_node( Q v )
662 return node_builder::make_tower( v, m_RandomLevelGen );
667 using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, node_disposer, void >; ///< pointer to extracted node
671 struct chain_disposer {
672 void operator()( node_type * pChain ) const
674 dispose_chain( pChain );
677 typedef cds::intrusive::details::raw_ptr_disposer< gc, node_type, chain_disposer> raw_ptr_disposer;
681 /// Result of \p get(), \p get_with() functions - pointer to the node found
682 typedef cds::urcu::raw_ptr< gc, value_type, raw_ptr_disposer > raw_ptr;
685 /// Default constructor
687 : m_Head( c_nMaxHeight )
688 , m_nHeight( c_nMinHeight )
689 , m_pDeferredDelChain( nullptr )
691 static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
693 // Barrier for head node
694 atomics::atomic_thread_fence( memory_model::memory_order_release );
697 /// Clears and destructs the skip-list
704 ///@name Forward iterators (thread-safe under RCU lock)
708 The forward iterator has some features:
709 - it has no post-increment operator
710 - it depends on iterator of underlying \p OrderedList
712 You may safely use iterators in multi-threaded environment only under RCU lock.
713 Otherwise, a crash is possible if another thread deletes the element the iterator points to.
715 typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator;
717 /// Const iterator type
718 typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator;
720 /// Returns a forward iterator addressing the first element in a set
723 return iterator( *m_Head.head());
726 /// Returns a forward const iterator addressing the first element in a set
727 const_iterator begin() const
729 return const_iterator( *m_Head.head());
732 /// Returns a forward const iterator addressing the first element in a set
733 const_iterator cbegin() const
735 return const_iterator( *m_Head.head());
738 /// Returns a forward iterator that addresses the location succeeding the last element in a set.
744 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
745 const_iterator end() const
747 return const_iterator();
750 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
751 const_iterator cend() const
753 return const_iterator();
760 The function inserts \p val in the set if it does not contain
761 an item with key equal to \p val.
763 The function applies RCU lock internally.
765 Returns \p true if \p val is placed into the set, \p false otherwise.
767 bool insert( value_type& val )
769 return insert( val, []( value_type& ) {} );
774 This function is intended for derived non-intrusive containers.
776 The function allows to split creating of new item into two part:
777 - create item with key only
778 - insert new item into the set
779 - if inserting is success, calls \p f functor to initialize value-field of \p val.
781 The functor signature is:
783 void func( value_type& val );
785 where \p val is the item inserted. User-defined functor \p f should guarantee that during changing
786 \p val no any other changes could be made on this set's item by concurrent threads.
787 The user-defined functor is called only if the inserting is success.
789 RCU \p synchronize method can be called. RCU should not be locked.
791 template <typename Func>
792 bool insert( value_type& val, Func f )
794 check_deadlock_policy::check();
800 node_type * pNode = node_traits::to_node_ptr( val );
801 scoped_node_ptr scp( pNode );
802 unsigned int nHeight = pNode->height();
803 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
804 bool bTowerMade = false;
810 bool bFound = find_position( val, pos, key_comparator(), true );
812 // scoped_node_ptr deletes the node tower if we create it
816 m_Stat.onInsertFailed();
823 nHeight = pNode->height();
828 if ( !insert_at_position( val, pNode, pos, f )) {
829 m_Stat.onInsertRetry();
833 increase_height( nHeight );
835 m_Stat.onAddNode( nHeight );
836 m_Stat.onInsertSuccess();
848 The operation performs inserting or changing data with lock-free manner.
850 If the item \p val is not found in the set, then \p val is inserted into the set
851 iff \p bInsert is \p true.
852 Otherwise, the functor \p func is called with item found.
853 The functor signature is:
855 void func( bool bNew, value_type& item, value_type& val );
858 - \p bNew - \p true if the item has been inserted, \p false otherwise
859 - \p item - item of the set
860 - \p val - argument \p val passed into the \p %update() function
861 If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
862 refer to the same thing.
864 The functor can change non-key fields of the \p item; however, \p func must guarantee
865 that during changing no any other modifications could be made on this item by concurrent threads.
867 RCU \p synchronize method can be called. RCU should not be locked.
869 Returns std::pair<bool, bool> where \p first is \p true if operation is successful,
870 i.e. the node has been inserted or updated,
871 \p second is \p true if new item has been added or \p false if the item with \p key
874 @warning See \ref cds_intrusive_item_creating "insert item troubleshooting"
876 template <typename Func>
877 std::pair<bool, bool> update( value_type& val, Func func, bool bInsert = true )
879 check_deadlock_policy::check();
882 std::pair<bool, bool> bRet( true, false );
885 node_type * pNode = node_traits::to_node_ptr( val );
886 scoped_node_ptr scp( pNode );
887 unsigned int nHeight = pNode->height();
888 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
889 bool bTowerMade = false;
894 bool bFound = find_position( val, pos, key_comparator(), true );
896 // scoped_node_ptr deletes the node tower if we create it before
900 func( false, *node_traits::to_value_ptr(pos.pCur), val );
901 m_Stat.onUpdateExist();
913 nHeight = pNode->height();
918 if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { func( true, item, item ); })) {
919 m_Stat.onInsertRetry();
923 increase_height( nHeight );
926 m_Stat.onAddNode( nHeight );
927 m_Stat.onUpdateNew();
936 template <typename Func>
937 CDS_DEPRECATED("ensure() is deprecated, use update()")
938 std::pair<bool, bool> ensure( value_type& val, Func func )
940 return update( val, func, true );
944 /// Unlinks the item \p val from the set
946 The function searches the item \p val in the set and unlink it from the set
947 if it is found and is equal to \p val.
949 Difference between \p erase() and \p %unlink() functions: \p erase() finds <i>a key</i>
950 and deletes the item found. \p %unlink() searches an item by key and deletes it
951 only if \p val is an item of that set, i.e. the pointer to item found
952 is equal to <tt> &val </tt>.
954 RCU \p synchronize method can be called. RCU should not be locked.
956 The \ref disposer specified in \p Traits class template parameter is called
957 by garbage collector \p GC asynchronously.
959 The function returns \p true if success and \p false otherwise.
961 bool unlink( value_type& val )
963 check_deadlock_policy::check();
971 if ( !find_position( val, pos, key_comparator(), false )) {
972 m_Stat.onUnlinkFailed();
976 node_type * pDel = pos.pCur;
977 assert( key_comparator()( *node_traits::to_value_ptr( pDel ), val ) == 0 );
979 unsigned int nHeight = pDel->height();
981 if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos, [](value_type const&) {}, false )) {
983 m_Stat.onRemoveNode( nHeight );
984 m_Stat.onUnlinkSuccess();
988 m_Stat.onUnlinkFailed();
997 /// Extracts the item from the set with specified \p key
998 /** \anchor cds_intrusive_SkipListSet_rcu_extract
999 The function searches an item with key equal to \p key in the set,
1000 unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
1001 If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr.
1003 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1005 RCU \p synchronize method can be called. RCU should NOT be locked.
1006 The function does not call the disposer for the item found.
1007 The disposer will be implicitly invoked when the returned object is destroyed or when
1008 its \p release() member function is called.
1011 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1015 typename skip_list::exempt_ptr ep( theList.extract( 5 ));
1020 // Dispose returned item.
1025 template <typename Q>
1026 exempt_ptr extract( Q const& key )
1028 return exempt_ptr( do_extract( key ));
1031 /// Extracts the item from the set with comparing functor \p pred
1033 The function is an analog of \p extract(Q const&) but \p pred predicate is used for key comparing.
1034 \p Less has the interface like \p std::less.
1035 \p pred must imply the same element order as the comparator used for building the set.
1037 template <typename Q, typename Less>
1038 exempt_ptr extract_with( Q const& key, Less pred )
1040 return exempt_ptr( do_extract_with( key, pred ));
1043 /// Extracts an item with minimal key from the list
1045 The function searches an item with minimal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item.
1046 If the skip-list is empty the function returns an empty \p exempt_ptr.
1048 RCU \p synchronize method can be called. RCU should NOT be locked.
1049 The function does not call the disposer for the item found.
1050 The disposer will be implicitly invoked when the returned object is destroyed or when
1051 its \p release() member function is manually called.
1054 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1058 typename skip_list::exempt_ptr ep(theList.extract_min());
1063 // Dispose returned item.
1068 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> minimum key.
1069 It means that the function gets leftmost item and tries to unlink it.
1070 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
1071 So, the function returns the item with minimum key at the moment of list traversing.
1073 exempt_ptr extract_min()
1075 return exempt_ptr( do_extract_min());
1078 /// Extracts an item with maximal key from the list
1080 The function searches an item with maximal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item.
1081 If the skip-list is empty the function returns an empty \p exempt_ptr.
1083 RCU \p synchronize method can be called. RCU should NOT be locked.
1084 The function does not call the disposer for the item found.
1085 The disposer will be implicitly invoked when the returned object is destroyed or when
1086 its \p release() member function is manually called.
1089 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1093 typename skip_list::exempt_ptr ep( theList.extract_max());
1097 // Dispose returned item.
1102 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> maximal key.
1103 It means that the function gets rightmost item and tries to unlink it.
1104 During unlinking, a concurrent thread can insert an item with key greater than rightmost item's key.
1105 So, the function returns the item with maximum key at the moment of list traversing.
1107 exempt_ptr extract_max()
1109 return exempt_ptr( do_extract_max());
1112 /// Deletes the item from the set
1113 /** \anchor cds_intrusive_SkipListSet_rcu_erase
1114 The function searches an item with key equal to \p key in the set,
1115 unlinks it from the set, and returns \p true.
1116 If the item with key equal to \p key is not found the function return \p false.
1118 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1120 RCU \p synchronize method can be called. RCU should not be locked.
1122 template <typename Q>
1123 bool erase( const Q& key )
1125 return do_erase( key, key_comparator(), [](value_type const&) {} );
1128 /// Delete the item from the set with comparing functor \p pred
1130 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase "erase(Q const&)"
1131 but \p pred predicate is used for key comparing.
1132 \p Less has the interface like \p std::less.
1133 \p pred must imply the same element order as the comparator used for building the set.
1135 template <typename Q, typename Less>
1136 bool erase_with( const Q& key, Less pred )
1139 return do_erase( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type const&) {} );
1142 /// Deletes the item from the set
1143 /** \anchor cds_intrusive_SkipListSet_rcu_erase_func
1144 The function searches an item with key equal to \p key in the set,
1145 call \p f functor with item found, unlinks it from the set, and returns \p true.
1146 The \ref disposer specified in \p Traits class template parameter is called
1147 by garbage collector \p GC asynchronously.
1149 The \p Func interface is
1152 void operator()( value_type const& item );
1155 If the item with key equal to \p key is not found the function return \p false.
1157 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1159 RCU \p synchronize method can be called. RCU should not be locked.
1161 template <typename Q, typename Func>
1162 bool erase( Q const& key, Func f )
1164 return do_erase( key, key_comparator(), f );
1167 /// Delete the item from the set with comparing functor \p pred
1169 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase_func "erase(Q const&, Func)"
1170 but \p pred predicate is used for key comparing.
1171 \p Less has the interface like \p std::less.
1172 \p pred must imply the same element order as the comparator used for building the set.
1174 template <typename Q, typename Less, typename Func>
1175 bool erase_with( Q const& key, Less pred, Func f )
1178 return do_erase( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1182 /** @anchor cds_intrusive_SkipListSet_rcu_find_func
1183 The function searches the item with key equal to \p key and calls the functor \p f for item found.
1184 The interface of \p Func functor is:
1187 void operator()( value_type& item, Q& key );
1190 where \p item is the item found, \p key is the <tt>find</tt> function argument.
1192 The functor can change non-key fields of \p item. Note that the functor is only guarantee
1193 that \p item cannot be disposed during functor is executing.
1194 The functor does not serialize simultaneous access to the set \p item. If such access is
1195 possible you must provide your own synchronization schema on item level to exclude unsafe item modifications.
1197 The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor
1198 can modify both arguments.
1200 The function applies RCU lock internally.
1202 The function returns \p true if \p key is found, \p false otherwise.
1204 template <typename Q, typename Func>
1205 bool find( Q& key, Func f )
1207 return do_find_with( key, key_comparator(), f );
1210 template <typename Q, typename Func>
1211 bool find( Q const& key, Func f )
1213 return do_find_with( key, key_comparator(), f );
1217 /// Finds the key \p key with comparing functor \p pred
1219 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_func "find(Q&, Func)"
1220 but \p cmp is used for key comparison.
1221 \p Less functor has the interface like \p std::less.
1222 \p cmp must imply the same element order as the comparator used for building the set.
1224 template <typename Q, typename Less, typename Func>
1225 bool find_with( Q& key, Less pred, Func f )
1228 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1231 template <typename Q, typename Less, typename Func>
1232 bool find_with( Q const& key, Less pred, Func f )
1235 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1239 /// Checks whether the set contains \p key
1241 The function searches the item with key equal to \p key
1242 and returns \p true if it is found, and \p false otherwise.
1244 The function applies RCU lock internally.
1246 template <typename Q>
1247 bool contains( Q const& key )
1249 return do_find_with( key, key_comparator(), [](value_type& , Q const& ) {} );
1252 template <typename Q>
1253 CDS_DEPRECATED("deprecated, use contains()")
1254 bool find( Q const& key )
1256 return contains( key );
1260 /// Checks whether the set contains \p key using \p pred predicate for searching
1262 The function is similar to <tt>contains( key )</tt> but \p pred is used for key comparing.
1263 \p Less functor has the interface like \p std::less.
1264 \p Less must imply the same element order as the comparator used for building the set.
1266 template <typename Q, typename Less>
1267 bool contains( Q const& key, Less pred )
1270 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type& , Q const& ) {} );
1273 template <typename Q, typename Less>
1274 CDS_DEPRECATED("deprecated, use contains()")
1275 bool find_with( Q const& key, Less pred )
1277 return contains( key, pred );
1281 /// Finds \p key and return the item found
1282 /** \anchor cds_intrusive_SkipListSet_rcu_get
1283 The function searches the item with key equal to \p key and returns a \p raw_ptr object pointed to item found.
1284 If \p key is not found it returns empty \p raw_ptr.
1286 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1288 RCU should be locked before call of this function.
1289 Returned item is valid only while RCU is locked:
1291 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1294 typename skip_list::raw_ptr pVal;
1297 skip_list::rcu_lock lock;
1299 pVal = theList.get( 5 );
1305 // You can manually release pVal after RCU-locked section
1309 template <typename Q>
1310 raw_ptr get( Q const& key )
1312 assert( gc::is_locked());
1315 value_type * pFound;
1316 if ( do_find_with( key, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos ))
1317 return raw_ptr( pFound, raw_ptr_disposer( pos ));
1318 return raw_ptr( raw_ptr_disposer( pos ));
1321 /// Finds \p key and return the item found
1323 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_get "get(Q const&)"
1324 but \p pred is used for comparing the keys.
1326 \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q
1328 \p pred must imply the same element order as the comparator used for building the set.
1330 template <typename Q, typename Less>
1331 raw_ptr get_with( Q const& key, Less pred )
1334 assert( gc::is_locked());
1336 value_type * pFound = nullptr;
1338 if ( do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(),
1339 [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos ))
1341 return raw_ptr( pFound, raw_ptr_disposer( pos ));
1343 return raw_ptr( raw_ptr_disposer( pos ));
1346 /// Returns item count in the set
1348 The value returned depends on item counter type provided by \p Traits template parameter.
1349 For \p atomicity::empty_item_counter the function always returns 0.
1350 Therefore, the function is not suitable for checking the set emptiness, use \p empty()
1351 member function for this purpose.
1355 return m_ItemCounter;
1358 /// Checks if the set is empty
1361 return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr;
1364 /// Clears the set (not atomic)
1366 The function unlink all items from the set.
1367 The function is not atomic, thus, in multi-threaded environment with parallel insertions
1371 assert( set.empty());
1373 the assertion could be raised.
1375 For each item the \p disposer will be called automatically after unlinking.
1380 while ( (ep = extract_min()));
1383 /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32.
1384 static CDS_CONSTEXPR unsigned int max_height() CDS_NOEXCEPT
1386 return c_nMaxHeight;
1389 /// Returns const reference to internal statistics
1390 stat const& statistics() const
1398 bool is_extracted( marked_node_ptr const p ) const
1400 return ( p.bits() & 2 ) != 0;
1403 void help_remove( int nLevel, node_type* pPred, marked_node_ptr pCur, marked_node_ptr pSucc, position& pos )
1405 marked_node_ptr p( pCur.ptr() );
1406 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
1407 memory_model::memory_order_release, atomics::memory_order_relaxed ) )
1409 if ( pCur->level_unlinked()) {
1410 if ( !is_extracted( pSucc ) ) {
1411 // We cannot free the node at this moment because RCU is locked
1412 // Link deleted nodes to a chain to free later
1413 pos.dispose( pCur.ptr() );
1414 m_Stat.onEraseWhileFind();
1417 m_Stat.onExtractWhileFind();
1422 template <typename Q, typename Compare >
1423 bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound )
1425 assert( gc::is_locked() );
1428 marked_node_ptr pSucc;
1429 marked_node_ptr pCur;
1433 pPred = m_Head.head();
1435 for ( int nLevel = static_cast<int>( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) {
1438 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
1439 if ( pCur.bits() ) {
1440 // pCur.bits() means that pPred is logically deleted
1444 if ( pCur.ptr() == nullptr ) {
1445 // end of the list at level nLevel - goto next level
1449 // pSucc contains deletion mark for pCur
1450 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
1452 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
1455 if ( pSucc.bits() ) {
1456 // pCur is marked, i.e. logically deleted.
1457 help_remove( nLevel, pPred, pCur, pSucc, pos );
1461 nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
1464 else if ( nCmp == 0 && bStopIfFound )
1472 pos.pPrev[nLevel] = pPred;
1473 pos.pSucc[nLevel] = pCur.ptr();
1480 pos.pCur = pCur.ptr();
1481 return pCur.ptr() && nCmp == 0;
1484 bool find_min_position( position& pos )
1486 assert( gc::is_locked() );
1489 marked_node_ptr pSucc;
1490 marked_node_ptr pCur;
1493 pPred = m_Head.head();
1495 for ( int nLevel = static_cast<int>( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) {
1497 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
1498 // pCur.bits() means that pPred is logically deleted
1499 // head cannot be deleted
1500 assert( pCur.bits() == 0 );
1504 // pSucc contains deletion mark for pCur
1505 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
1507 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
1510 if ( pSucc.bits() ) {
1511 // pCur is marked, i.e. logically deleted.
1512 help_remove( nLevel, pPred, pCur, pSucc, pos );
1518 pos.pPrev[nLevel] = pPred;
1519 pos.pSucc[nLevel] = pCur.ptr();
1521 return ( pos.pCur = pCur.ptr() ) != nullptr;
1524 bool find_max_position( position& pos )
1526 assert( gc::is_locked() );
1529 marked_node_ptr pSucc;
1530 marked_node_ptr pCur;
1533 pPred = m_Head.head();
1535 for ( int nLevel = static_cast<int>( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) {
1538 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
1539 if ( pCur.bits() ) {
1540 // pCur.bits() means that pPred is logically deleted
1544 if ( pCur.ptr() == nullptr ) {
1545 // end of the list at level nLevel - goto next level
1549 // pSucc contains deletion mark for pCur
1550 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
1552 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
1555 if ( pSucc.bits() ) {
1556 // pCur is marked, i.e. logically deleted.
1557 help_remove( nLevel, pPred, pCur, pSucc, pos );
1569 pos.pPrev[nLevel] = pPred;
1570 pos.pSucc[nLevel] = pCur.ptr();
1573 return ( pos.pCur = pCur.ptr() ) != nullptr;
1576 template <typename Func>
1577 bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f )
1579 assert( gc::is_locked() );
1581 unsigned int const nHeight = pNode->height();
1582 pNode->clear_tower();
1584 // Insert at level 0
1586 marked_node_ptr p( pos.pSucc[0] );
1587 pNode->next( 0 ).store( p, memory_model::memory_order_relaxed );
1588 if ( !pos.pPrev[0]->next( 0 ).compare_exchange_strong( p, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ) ) {
1595 // Insert at level 1..max
1596 for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) {
1599 marked_node_ptr pSucc( pos.pSucc[nLevel] );
1602 // pNode->next must be null but can have a "logical deleted" flag if another thread is removing pNode right now
1603 if ( !pNode->next( nLevel ).compare_exchange_strong( p, pSucc,
1604 memory_model::memory_order_acq_rel, atomics::memory_order_acquire ))
1606 // pNode has been marked as removed while we are inserting it
1608 assert( p.bits() != 0 );
1610 if ( pNode->level_unlinked( nHeight - nLevel ) && p.bits() == 1 ) {
1611 pos.dispose( pNode );
1612 m_Stat.onEraseWhileInsert();
1615 m_Stat.onLogicDeleteWhileInsert();
1621 // Link pNode into the list at nLevel
1622 if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( pSucc, marked_node_ptr( pNode ),
1623 memory_model::memory_order_release, atomics::memory_order_relaxed ))
1629 // Renew insert position
1630 m_Stat.onRenewInsertPosition();
1631 if ( !find_position( val, pos, key_comparator(), false ) ) {
1632 // The node has been deleted while we are inserting it
1633 m_Stat.onNotFoundWhileInsert();
1641 template <typename Func>
1642 bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract )
1644 assert( pDel != nullptr );
1645 assert( gc::is_locked() );
1647 marked_node_ptr pSucc;
1650 unsigned const nMask = bExtract ? 3u : 1u;
1652 // logical deletion (marking)
1653 for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) {
1654 pSucc = pDel->next( nLevel ).load( memory_model::memory_order_relaxed );
1655 if ( pSucc.bits() == 0 ) {
1657 while ( !pDel->next( nLevel ).compare_exchange_weak( pSucc, pSucc | nMask,
1658 memory_model::memory_order_release, atomics::memory_order_acquire ))
1660 if ( pSucc.bits() == 0 ) {
1662 m_Stat.onMarkFailed();
1664 else if ( pSucc.bits() != nMask )
1670 marked_node_ptr p( pDel->next( 0 ).load( memory_model::memory_order_relaxed ).ptr() );
1672 if ( pDel->next( 0 ).compare_exchange_strong( p, p | nMask, memory_model::memory_order_release, atomics::memory_order_acquire ))
1674 f( *node_traits::to_value_ptr( pDel ) );
1676 // physical deletion
1679 unsigned nCount = 0;
1680 for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
1682 pSucc = pDel->next( nLevel ).load( memory_model::memory_order_relaxed );
1683 if ( !pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()),
1684 memory_model::memory_order_acq_rel, atomics::memory_order_acquire ) )
1688 if ( pDel->level_unlinked( nCount )) {
1689 if ( p.bits() == 1 ) {
1690 pos.dispose( pDel );
1691 m_Stat.onFastEraseHelped();
1694 m_Stat.onFastExtractHelped();
1700 find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false );
1702 m_Stat.onSlowExtract();
1704 m_Stat.onSlowErase();
1712 // We cannot free the node at this moment since RCU is locked
1713 // Link deleted nodes to a chain to free later
1714 pos.dispose( pDel );
1715 m_Stat.onFastErase();
1718 m_Stat.onFastExtract();
1721 else if ( p.bits() ) {
1722 // Another thread is deleting pDel right now
1726 m_Stat.onEraseRetry();
1731 enum finsd_fastpath_result {
1732 find_fastpath_found,
1733 find_fastpath_not_found,
1736 template <typename Q, typename Compare, typename Func>
1737 finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f ) const
1740 marked_node_ptr pCur;
1741 marked_node_ptr pSucc;
1742 marked_node_ptr pNull;
1745 unsigned attempt = 0;
1748 pPred = m_Head.head();
1749 for ( int nLevel = static_cast<int>( m_nHeight.load( memory_model::memory_order_relaxed ) - 1 ); nLevel >= 0; --nLevel ) {
1750 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
1751 if ( pCur == pNull )
1754 while ( pCur != pNull ) {
1755 if ( pCur.bits() ) {
1756 // pPrev is being removed
1757 if ( ++attempt < 4 ) {
1762 return find_fastpath_abort;
1766 int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
1769 pCur = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
1771 else if ( nCmp == 0 ) {
1773 f( *node_traits::to_value_ptr( pCur.ptr() ), val );
1774 return find_fastpath_found;
1776 else // pCur > val - go down
1782 return find_fastpath_not_found;
1785 template <typename Q, typename Compare, typename Func>
1786 bool find_slowpath( Q& val, Compare cmp, Func f, position& pos )
1788 if ( find_position( val, pos, cmp, true ) ) {
1789 assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 );
1791 f( *node_traits::to_value_ptr( pos.pCur ), val );
1798 template <typename Q, typename Compare, typename Func>
1799 bool do_find_with( Q& val, Compare cmp, Func f )
1802 return do_find_with( val, cmp, f, pos );
1805 template <typename Q, typename Compare, typename Func>
1806 bool do_find_with( Q& val, Compare cmp, Func f, position& pos )
1813 switch ( find_fastpath( val, cmp, f ) ) {
1814 case find_fastpath_found:
1815 m_Stat.onFindFastSuccess();
1817 case find_fastpath_not_found:
1818 m_Stat.onFindFastFailed();
1824 if ( find_slowpath( val, cmp, f, pos ) ) {
1825 m_Stat.onFindSlowSuccess();
1829 m_Stat.onFindSlowFailed();
1836 template <typename Q, typename Compare, typename Func>
1837 bool do_erase( Q const& val, Compare cmp, Func f )
1839 check_deadlock_policy::check();
1847 if ( !find_position( val, pos, cmp, false ) ) {
1848 m_Stat.onEraseFailed();
1852 node_type * pDel = pos.pCur;
1853 assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 );
1855 unsigned int nHeight = pDel->height();
1856 if ( try_remove_at( pDel, pos, f, false ) ) {
1858 m_Stat.onRemoveNode( nHeight );
1859 m_Stat.onEraseSuccess();
1863 m_Stat.onEraseFailed();
1872 template <typename Q, typename Compare>
1873 value_type * do_extract_key( Q const& key, Compare cmp, position& pos )
1875 // RCU should be locked!!!
1876 assert( gc::is_locked() );
1880 if ( !find_position( key, pos, cmp, false ) ) {
1881 m_Stat.onExtractFailed();
1886 assert( cmp( *node_traits::to_value_ptr( pDel ), key ) == 0 );
1888 unsigned int const nHeight = pDel->height();
1890 if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
1892 m_Stat.onRemoveNode( nHeight );
1893 m_Stat.onExtractSuccess();
1896 m_Stat.onExtractFailed();
1901 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1904 template <typename Q>
1905 value_type * do_extract( Q const& key )
1907 check_deadlock_policy::check();
1908 value_type * pDel = nullptr;
1912 pDel = do_extract_key( key, key_comparator(), pos );
1918 template <typename Q, typename Less>
1919 value_type * do_extract_with( Q const& key, Less pred )
1922 check_deadlock_policy::check();
1923 value_type * pDel = nullptr;
1927 pDel = do_extract_key( key, cds::opt::details::make_comparator_from_less<Less>(), pos );
1933 value_type * do_extract_min()
1935 assert( !gc::is_locked() );
1943 if ( !find_min_position( pos ) ) {
1944 m_Stat.onExtractMinFailed();
1949 unsigned int const nHeight = pDel->height();
1951 if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
1953 m_Stat.onRemoveNode( nHeight );
1954 m_Stat.onExtractMinSuccess();
1957 m_Stat.onExtractMinFailed();
1963 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1966 value_type * do_extract_max()
1968 assert( !gc::is_locked() );
1976 if ( !find_max_position( pos ) ) {
1977 m_Stat.onExtractMaxFailed();
1982 unsigned int const nHeight = pDel->height();
1984 if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
1986 m_Stat.onRemoveNode( nHeight );
1987 m_Stat.onExtractMaxSuccess();
1990 m_Stat.onExtractMaxFailed();
1996 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1999 void increase_height( unsigned int nHeight )
2001 unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed );
2002 if ( nCur < nHeight )
2003 m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed );
2008 node_type* p = m_Head.head()->next( 0 ).load( atomics::memory_order_relaxed ).ptr();
2010 node_type* pNext = p->next( 0 ).load( atomics::memory_order_relaxed ).ptr();
2011 dispose_node( node_traits::to_value_ptr( p ) );
2019 }} // namespace cds::intrusive
2022 #endif // #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H