2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H
32 #define CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H
34 #include <type_traits>
36 #include <cds/intrusive/details/skip_list_base.h>
37 #include <cds/opt/compare.h>
38 #include <cds/urcu/details/check_deadlock.h>
39 #include <cds/details/binary_functor_wrapper.h>
40 #include <cds/urcu/exempt_ptr.h>
41 #include <cds/urcu/raw_ptr.h>
42 #include <cds/intrusive/details/raw_ptr_disposer.h>
44 namespace cds { namespace intrusive {
49 template <class RCU, typename Tag>
50 class node< cds::urcu::gc< RCU >, Tag >
53 typedef cds::urcu::gc< RCU > gc; ///< Garbage collector
54 typedef Tag tag ; ///< tag
57 // bit 0 - the item is logically deleted
58 // bit 1 - the item is extracted (only for level 0)
59 typedef cds::details::marked_ptr<node, 3> marked_ptr; ///< marked pointer
60 typedef atomics::atomic< marked_ptr > atomic_marked_ptr; ///< atomic marked pointer
61 typedef atomic_marked_ptr tower_item_type;
64 atomic_marked_ptr m_pNext; ///< Next item in bottom-list (list at level 0)
66 node * m_pDelChain; ///< Deleted node chain (local for a thread)
68 unsigned int m_nHeight; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1.
69 atomic_marked_ptr * m_arrNext; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr
70 atomics::atomic<unsigned> m_nUnlink; ///< Unlink helper
73 /// Constructs a node of height 1 (a bottom-list node)
76 , m_pDelChain( nullptr )
78 , m_arrNext( nullptr )
80 m_nUnlink.store( 1, atomics::memory_order_release );
83 /// Constructs a node of height \p nHeight
84 void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower )
86 assert( nHeight > 0 );
87 assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node
88 || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0
91 m_arrNext = nextTower;
93 m_nUnlink.store( nHeight, atomics::memory_order_release );
96 atomic_marked_ptr * release_tower()
98 atomic_marked_ptr * pTower = m_arrNext;
104 atomic_marked_ptr * get_tower() const
111 for ( unsigned int nLevel = 1; nLevel < m_nHeight; ++nLevel )
112 next(nLevel).store( marked_ptr(), atomics::memory_order_relaxed );
115 /// Access to element of next pointer array
116 atomic_marked_ptr& next( unsigned int nLevel )
118 assert( nLevel < height());
119 assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr));
121 return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
124 /// Access to element of next pointer array (const version)
125 atomic_marked_ptr const& next( unsigned int nLevel ) const
127 assert( nLevel < height());
128 assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr );
130 return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
133 /// Access to element of next pointer array (same as \ref next function)
134 atomic_marked_ptr& operator[]( unsigned int nLevel )
136 return next( nLevel );
139 /// Access to element of next pointer array (same as \ref next function)
140 atomic_marked_ptr const& operator[]( unsigned int nLevel ) const
142 return next( nLevel );
145 /// Height of the node
146 unsigned int height() const
151 /// Clears internal links
154 assert( m_arrNext == nullptr );
155 m_pNext.store( marked_ptr(), atomics::memory_order_release );
156 m_pDelChain = nullptr;
159 bool is_cleared() const
161 return m_pNext == atomic_marked_ptr()
162 && m_arrNext == nullptr
166 bool level_unlinked( unsigned nCount = 1 )
168 return m_nUnlink.fetch_sub( nCount, std::memory_order_relaxed ) == 1;
171 bool is_upper_level( unsigned nLevel ) const
173 return m_nUnlink.load( atomics::memory_order_relaxed ) == nLevel + 1;
176 } // namespace skip_list
180 namespace skip_list { namespace details {
182 template <class RCU, typename NodeTraits, typename BackOff, bool IsConst>
183 class iterator< cds::urcu::gc< RCU >, NodeTraits, BackOff, IsConst >
186 typedef cds::urcu::gc< RCU > gc;
187 typedef NodeTraits node_traits;
188 typedef BackOff back_off;
189 typedef typename node_traits::node_type node_type;
190 typedef typename node_traits::value_type value_type;
191 static bool const c_isConst = IsConst;
193 typedef typename std::conditional< c_isConst, value_type const &, value_type &>::type value_ref;
196 typedef typename node_type::marked_ptr marked_ptr;
197 typedef typename node_type::atomic_marked_ptr atomic_marked_ptr;
207 if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits()) {
208 // Current node is marked as deleted. So, its next pointer can point to anything
209 // In this case we interrupt our iteration and returns end() iterator.
214 marked_ptr p = m_pNode->next(0).load( atomics::memory_order_relaxed );
215 node_type * pp = p.ptr();
217 // p is marked as deleted. Spin waiting for physical removal
221 else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits()) {
222 // p is marked as deleted. Spin waiting for physical removal
232 public: // for internal use only!!!
233 iterator( node_type& refHead )
239 marked_ptr p = refHead.next(0).load( atomics::memory_order_relaxed );
245 node_type * pp = p.ptr();
246 // Logically deleted node is marked from highest level
247 if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits()) {
261 iterator( iterator const& s)
262 : m_pNode( s.m_pNode )
265 value_type * operator ->() const
267 assert( m_pNode != nullptr );
268 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
270 return node_traits::to_value_ptr( m_pNode );
273 value_ref operator *() const
275 assert( m_pNode != nullptr );
276 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
278 return *node_traits::to_value_ptr( m_pNode );
282 iterator& operator ++()
288 iterator& operator = (const iterator& src)
290 m_pNode = src.m_pNode;
294 template <typename Bkoff, bool C>
295 bool operator ==(iterator<gc, node_traits, Bkoff, C> const& i ) const
297 return m_pNode == i.m_pNode;
299 template <typename Bkoff, bool C>
300 bool operator !=(iterator<gc, node_traits, Bkoff, C> const& i ) const
302 return !( *this == i );
305 }} // namespace skip_list::details
308 /// Lock-free skip-list set (template specialization for \ref cds_urcu_desc "RCU")
309 /** @ingroup cds_intrusive_map
310 @anchor cds_intrusive_SkipListSet_rcu
312 The implementation of well-known probabilistic data structure called skip-list
313 invented by W.Pugh in his papers:
314 - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees
315 - [1990] W.Pugh A Skip List Cookbook
317 A skip-list is a probabilistic data structure that provides expected logarithmic
318 time search without the need of rebalance. The skip-list is a collection of sorted
319 linked list. Nodes are ordered by key. Each node is linked into a subset of the lists.
320 Each list has a level, ranging from 0 to 32. The bottom-level list contains
321 all the nodes, and each higher-level list is a sublist of the lower-level lists.
322 Each node is created with a random top level (with a random height), and belongs
323 to all lists up to that level. The probability that a node has the height 1 is 1/2.
324 The probability that a node has the height N is 1/2 ** N (more precisely,
325 the distribution depends on an random generator provided, but our generators
328 The lock-free variant of skip-list is implemented according to book
329 - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming",
330 chapter 14.4 "A Lock-Free Concurrent Skiplist".
332 <b>Template arguments</b>:
333 - \p RCU - one of \ref cds_urcu_gc "RCU type"
334 - \p T - type to be stored in the list. The type must be based on \p skip_list::node (for \p skip_list::base_hook)
335 or it must have a member of type \p skip_list::node (for \p skip_list::member_hook).
336 - \p Traits - set traits, default is \p skip_list::traits
337 It is possible to declare option-based list with \p cds::intrusive::skip_list::make_traits metafunction
338 instead of \p Traits template argument.
340 @note Before including <tt><cds/intrusive/skip_list_rcu.h></tt> you should include appropriate RCU header file,
341 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
345 The class supports a forward iterator (\ref iterator and \ref const_iterator).
346 The iteration is ordered.
348 You may iterate over skip-list set items only under RCU lock.
349 Only in this case the iterator is thread-safe since
350 while RCU is locked any set's item cannot be reclaimed.
352 @note The requirement of RCU lock during iterating means that any type of modification of the skip list
353 (i.e. inserting, erasing and so on) is not possible.
355 @warning The iterator object cannot be passed between threads.
357 Example how to use skip-list set iterators:
359 // First, you should include the header for RCU type you have chosen
360 #include <cds/urcu/general_buffered.h>
361 #include <cds/intrusive/skip_list_rcu.h>
363 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
369 // Traits for your skip-list.
370 // At least, you should define cds::opt::less or cds::opt::compare for Foo struct
371 struct my_traits: public cds::intrusive::skip_list::traits
375 typedef cds::intrusive::SkipListSet< rcu_type, Foo, my_traits > my_skiplist_set;
377 my_skiplist_set theSet;
383 // Apply RCU locking manually
384 typename rcu_type::scoped_lock sl;
386 for ( auto it = theList.begin(); it != theList.end(); ++it ) {
390 // rcu_type::scoped_lock destructor releases RCU lock implicitly
394 The iterator class supports the following minimalistic interface:
401 iterator( iterator const& s);
403 value_type * operator ->() const;
404 value_type& operator *() const;
407 iterator& operator ++();
410 iterator& operator = (const iterator& src);
412 bool operator ==(iterator const& i ) const;
413 bool operator !=(iterator const& i ) const;
416 Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced.
420 You should incorporate skip_list::node into your struct \p T and provide
421 appropriate skip_list::traits::hook in your \p Traits template parameters. Usually, for \p Traits you
422 define a struct based on \p skip_list::traits.
424 Example for <tt>cds::urcu::general_buffered<></tt> RCU and base hook:
426 // First, you should include the header for RCU type you have chosen
427 #include <cds/urcu/general_buffered.h>
429 // Include RCU skip-list specialization
430 #include <cds/intrusive/skip_list_rcu.h>
433 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
435 // Data stored in skip list
436 struct my_data: public cds::intrusive::skip_list::node< rcu_type >
445 // my_data compare functor
447 int operator()( const my_data& d1, const my_data& d2 )
449 return d1.strKey.compare( d2.strKey );
452 int operator()( const my_data& d, const std::string& s )
454 return d.strKey.compare(s);
457 int operator()( const std::string& s, const my_data& d )
459 return s.compare( d.strKey );
464 struct my_traits: public cds::intrusive::skip_list::traits
466 typedef cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > hook;
467 typedef my_data_cmp compare;
470 // Declare skip-list set type
471 typedef cds::intrusive::SkipListSet< rcu_type, my_data, my_traits > traits_based_set;
474 Equivalent option-based code:
476 #include <cds/urcu/general_buffered.h>
477 #include <cds/intrusive/skip_list_rcu.h>
479 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
488 // Declare option-based skip-list set
489 typedef cds::intrusive::SkipListSet< rcu_type
491 , typename cds::intrusive::skip_list::make_traits<
492 cds::intrusive::opt::hook< cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > >
493 ,cds::intrusive::opt::compare< my_data_cmp >
502 #ifdef CDS_DOXYGEN_INVOKED
503 ,typename Traits = skip_list::traits
508 class SkipListSet< cds::urcu::gc< RCU >, T, Traits >
511 typedef cds::urcu::gc< RCU > gc; ///< Garbage collector
512 typedef T value_type; ///< type of value stored in the skip-list
513 typedef Traits traits; ///< Traits template parameter
515 typedef typename traits::hook hook; ///< hook type
516 typedef typename hook::node_type node_type; ///< node type
518 # ifdef CDS_DOXYGEN_INVOKED
519 typedef implementation_defined key_comparator ; ///< key comparison functor based on \p Traits::compare and \p Traits::less
521 typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator;
524 typedef typename traits::disposer disposer; ///< disposer
525 typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits
527 typedef typename traits::item_counter item_counter; ///< Item counting policy used
528 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
529 typedef typename traits::random_level_generator random_level_generator; ///< random level generator
530 typedef typename traits::allocator allocator_type; ///< allocator for maintaining array of next pointers of the node
531 typedef typename traits::back_off back_off; ///< Back-off strategy
532 typedef typename traits::stat stat; ///< internal statistics type
533 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
534 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
535 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking
538 /// Max node height. The actual node height should be in range <tt>[0 .. c_nMaxHeight)</tt>
540 The max height is specified by \ref skip_list::random_level_generator "random level generator" constant \p m_nUpperBound
541 but it should be no more than 32 (\ref skip_list::c_nHeightLimit).
543 static unsigned int const c_nMaxHeight = std::conditional<
544 (random_level_generator::c_nUpperBound <= skip_list::c_nHeightLimit),
545 std::integral_constant< unsigned int, random_level_generator::c_nUpperBound >,
546 std::integral_constant< unsigned int, skip_list::c_nHeightLimit >
550 static unsigned int const c_nMinHeight = 5;
554 typedef typename node_type::atomic_marked_ptr atomic_node_ptr ; ///< Atomic marked node pointer
555 typedef typename node_type::marked_ptr marked_node_ptr ; ///< Node marked pointer
559 typedef skip_list::details::intrusive_node_builder< node_type, atomic_node_ptr, allocator_type > intrusive_node_builder;
561 typedef typename std::conditional<
562 std::is_same< typename traits::internal_node_builder, cds::opt::none >::value
563 ,intrusive_node_builder
564 ,typename traits::internal_node_builder
565 >::type node_builder;
567 typedef std::unique_ptr< node_type, typename node_builder::node_disposer > scoped_node_ptr;
569 static void dispose_node( value_type * pVal )
573 typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal));
579 void operator()( value_type * pVal )
581 dispose_node( pVal );
585 static void dispose_chain( node_type * pChain )
588 assert( !gc::is_locked());
590 auto f = [&pChain]() -> cds::urcu::retired_ptr {
591 node_type * p = pChain;
593 pChain = p->m_pDelChain;
594 return cds::urcu::make_retired_ptr<node_disposer>( node_traits::to_value_ptr( p ));
596 return cds::urcu::make_retired_ptr<node_disposer>( static_cast<value_type *>(nullptr));
598 gc::batch_retire(std::ref(f));
603 node_type * pPrev[ c_nMaxHeight ];
604 node_type * pSucc[ c_nMaxHeight ];
605 node_type * pNext[ c_nMaxHeight ];
608 node_type * pDelChain;
611 : pDelChain( nullptr )
616 dispose_chain( pDelChain );
619 void dispose( node_type * p )
621 assert( p != nullptr );
622 assert( p->m_pDelChain == nullptr );
624 p->m_pDelChain = pDelChain;
629 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy;
633 skip_list::details::head_node< node_type > m_Head; ///< head tower (max height)
635 item_counter m_ItemCounter; ///< item counter
636 random_level_generator m_RandomLevelGen; ///< random level generator instance
637 atomics::atomic<unsigned int> m_nHeight; ///< estimated high level
638 atomics::atomic<node_type *> m_pDeferredDelChain ; ///< Deferred deleted node chain
639 mutable stat m_Stat; ///< internal statistics
643 unsigned int random_level()
645 // Random generator produces a number from range [0..31]
646 // We need a number from range [1..32]
647 return m_RandomLevelGen() + 1;
650 template <typename Q>
651 node_type * build_node( Q v )
653 return node_builder::make_tower( v, m_RandomLevelGen );
658 using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, node_disposer, void >; ///< pointer to extracted node
662 struct chain_disposer {
663 void operator()( node_type * pChain ) const
665 dispose_chain( pChain );
668 typedef cds::intrusive::details::raw_ptr_disposer< gc, node_type, chain_disposer> raw_ptr_disposer;
672 /// Result of \p get(), \p get_with() functions - pointer to the node found
673 typedef cds::urcu::raw_ptr< gc, value_type, raw_ptr_disposer > raw_ptr;
676 /// Default constructor
678 : m_Head( c_nMaxHeight )
679 , m_nHeight( c_nMinHeight )
680 , m_pDeferredDelChain( nullptr )
682 static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
684 // Barrier for head node
685 atomics::atomic_thread_fence( memory_model::memory_order_release );
688 /// Clears and destructs the skip-list
695 ///@name Forward iterators (thread-safe under RCU lock)
699 The forward iterator has some features:
700 - it has no post-increment operator
701 - it depends on iterator of underlying \p OrderedList
703 You may safely use iterators in multi-threaded environment only under RCU lock.
704 Otherwise, a crash is possible if another thread deletes the element the iterator points to.
706 typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator;
708 /// Const iterator type
709 typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator;
711 /// Returns a forward iterator addressing the first element in a set
714 return iterator( *m_Head.head());
717 /// Returns a forward const iterator addressing the first element in a set
718 const_iterator begin() const
720 return const_iterator( *m_Head.head());
723 /// Returns a forward const iterator addressing the first element in a set
724 const_iterator cbegin() const
726 return const_iterator( *m_Head.head());
729 /// Returns a forward iterator that addresses the location succeeding the last element in a set.
735 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
736 const_iterator end() const
738 return const_iterator();
741 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
742 const_iterator cend() const
744 return const_iterator();
751 The function inserts \p val in the set if it does not contain
752 an item with key equal to \p val.
754 The function applies RCU lock internally.
756 Returns \p true if \p val is placed into the set, \p false otherwise.
758 bool insert( value_type& val )
760 return insert( val, []( value_type& ) {} );
765 This function is intended for derived non-intrusive containers.
767 The function allows to split creating of new item into two part:
768 - create item with key only
769 - insert new item into the set
770 - if inserting is success, calls \p f functor to initialize value-field of \p val.
772 The functor signature is:
774 void func( value_type& val );
776 where \p val is the item inserted. User-defined functor \p f should guarantee that during changing
777 \p val no any other changes could be made on this set's item by concurrent threads.
778 The user-defined functor is called only if the inserting is success.
780 RCU \p synchronize method can be called. RCU should not be locked.
782 template <typename Func>
783 bool insert( value_type& val, Func f )
785 check_deadlock_policy::check();
791 node_type * pNode = node_traits::to_node_ptr( val );
792 scoped_node_ptr scp( pNode );
793 unsigned int nHeight = pNode->height();
794 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
795 bool bTowerMade = false;
801 bool bFound = find_position( val, pos, key_comparator(), true );
803 // scoped_node_ptr deletes the node tower if we create it
807 m_Stat.onInsertFailed();
814 nHeight = pNode->height();
819 if ( !insert_at_position( val, pNode, pos, f )) {
820 m_Stat.onInsertRetry();
824 increase_height( nHeight );
826 m_Stat.onAddNode( nHeight );
827 m_Stat.onInsertSuccess();
839 The operation performs inserting or changing data with lock-free manner.
841 If the item \p val is not found in the set, then \p val is inserted into the set
842 iff \p bInsert is \p true.
843 Otherwise, the functor \p func is called with item found.
844 The functor signature is:
846 void func( bool bNew, value_type& item, value_type& val );
849 - \p bNew - \p true if the item has been inserted, \p false otherwise
850 - \p item - item of the set
851 - \p val - argument \p val passed into the \p %update() function
852 If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
853 refer to the same thing.
855 The functor can change non-key fields of the \p item; however, \p func must guarantee
856 that during changing no any other modifications could be made on this item by concurrent threads.
858 RCU \p synchronize method can be called. RCU should not be locked.
860 Returns std::pair<bool, bool> where \p first is \p true if operation is successful,
861 i.e. the node has been inserted or updated,
862 \p second is \p true if new item has been added or \p false if the item with \p key
865 @warning See \ref cds_intrusive_item_creating "insert item troubleshooting"
867 template <typename Func>
868 std::pair<bool, bool> update( value_type& val, Func func, bool bInsert = true )
870 check_deadlock_policy::check();
873 std::pair<bool, bool> bRet( true, false );
876 node_type * pNode = node_traits::to_node_ptr( val );
877 scoped_node_ptr scp( pNode );
878 unsigned int nHeight = pNode->height();
879 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
880 bool bTowerMade = false;
885 bool bFound = find_position( val, pos, key_comparator(), true );
887 // scoped_node_ptr deletes the node tower if we create it before
891 func( false, *node_traits::to_value_ptr(pos.pCur), val );
892 m_Stat.onUpdateExist();
904 nHeight = pNode->height();
909 if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { func( true, item, item ); })) {
910 m_Stat.onInsertRetry();
914 increase_height( nHeight );
917 m_Stat.onAddNode( nHeight );
918 m_Stat.onUpdateNew();
927 template <typename Func>
928 CDS_DEPRECATED("ensure() is deprecated, use update()")
929 std::pair<bool, bool> ensure( value_type& val, Func func )
931 return update( val, func, true );
935 /// Unlinks the item \p val from the set
937 The function searches the item \p val in the set and unlink it from the set
938 if it is found and is equal to \p val.
940 Difference between \p erase() and \p %unlink() functions: \p erase() finds <i>a key</i>
941 and deletes the item found. \p %unlink() searches an item by key and deletes it
942 only if \p val is an item of that set, i.e. the pointer to item found
943 is equal to <tt> &val </tt>.
945 RCU \p synchronize method can be called. RCU should not be locked.
947 The \ref disposer specified in \p Traits class template parameter is called
948 by garbage collector \p GC asynchronously.
950 The function returns \p true if success and \p false otherwise.
952 bool unlink( value_type& val )
954 check_deadlock_policy::check();
962 if ( !find_position( val, pos, key_comparator(), false )) {
963 m_Stat.onUnlinkFailed();
967 node_type * pDel = pos.pCur;
968 assert( key_comparator()( *node_traits::to_value_ptr( pDel ), val ) == 0 );
970 unsigned int nHeight = pDel->height();
972 if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos, [](value_type const&) {}, false )) {
974 m_Stat.onRemoveNode( nHeight );
975 m_Stat.onUnlinkSuccess();
979 m_Stat.onUnlinkFailed();
988 /// Extracts the item from the set with specified \p key
989 /** \anchor cds_intrusive_SkipListSet_rcu_extract
990 The function searches an item with key equal to \p key in the set,
991 unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
992 If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr.
994 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
996 RCU \p synchronize method can be called. RCU should NOT be locked.
997 The function does not call the disposer for the item found.
998 The disposer will be implicitly invoked when the returned object is destroyed or when
999 its \p release() member function is called.
1002 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1006 typename skip_list::exempt_ptr ep( theList.extract( 5 ));
1011 // Dispose returned item.
1016 template <typename Q>
1017 exempt_ptr extract( Q const& key )
1019 return exempt_ptr( do_extract( key ));
1022 /// Extracts the item from the set with comparing functor \p pred
1024 The function is an analog of \p extract(Q const&) but \p pred predicate is used for key comparing.
1025 \p Less has the interface like \p std::less.
1026 \p pred must imply the same element order as the comparator used for building the set.
1028 template <typename Q, typename Less>
1029 exempt_ptr extract_with( Q const& key, Less pred )
1031 return exempt_ptr( do_extract_with( key, pred ));
1034 /// Extracts an item with minimal key from the list
1036 The function searches an item with minimal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item.
1037 If the skip-list is empty the function returns an empty \p exempt_ptr.
1039 RCU \p synchronize method can be called. RCU should NOT be locked.
1040 The function does not call the disposer for the item found.
1041 The disposer will be implicitly invoked when the returned object is destroyed or when
1042 its \p release() member function is manually called.
1045 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1049 typename skip_list::exempt_ptr ep(theList.extract_min());
1054 // Dispose returned item.
1059 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> minimum key.
1060 It means that the function gets leftmost item and tries to unlink it.
1061 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
1062 So, the function returns the item with minimum key at the moment of list traversing.
1064 exempt_ptr extract_min()
1066 return exempt_ptr( do_extract_min());
1069 /// Extracts an item with maximal key from the list
1071 The function searches an item with maximal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item.
1072 If the skip-list is empty the function returns an empty \p exempt_ptr.
1074 RCU \p synchronize method can be called. RCU should NOT be locked.
1075 The function does not call the disposer for the item found.
1076 The disposer will be implicitly invoked when the returned object is destroyed or when
1077 its \p release() member function is manually called.
1080 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1084 typename skip_list::exempt_ptr ep( theList.extract_max());
1088 // Dispose returned item.
1093 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> maximal key.
1094 It means that the function gets rightmost item and tries to unlink it.
1095 During unlinking, a concurrent thread can insert an item with key greater than rightmost item's key.
1096 So, the function returns the item with maximum key at the moment of list traversing.
1098 exempt_ptr extract_max()
1100 return exempt_ptr( do_extract_max());
1103 /// Deletes the item from the set
1104 /** \anchor cds_intrusive_SkipListSet_rcu_erase
1105 The function searches an item with key equal to \p key in the set,
1106 unlinks it from the set, and returns \p true.
1107 If the item with key equal to \p key is not found the function return \p false.
1109 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1111 RCU \p synchronize method can be called. RCU should not be locked.
1113 template <typename Q>
1114 bool erase( const Q& key )
1116 return do_erase( key, key_comparator(), [](value_type const&) {} );
1119 /// Delete the item from the set with comparing functor \p pred
1121 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase "erase(Q const&)"
1122 but \p pred predicate is used for key comparing.
1123 \p Less has the interface like \p std::less.
1124 \p pred must imply the same element order as the comparator used for building the set.
1126 template <typename Q, typename Less>
1127 bool erase_with( const Q& key, Less pred )
1130 return do_erase( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type const&) {} );
1133 /// Deletes the item from the set
1134 /** \anchor cds_intrusive_SkipListSet_rcu_erase_func
1135 The function searches an item with key equal to \p key in the set,
1136 call \p f functor with item found, unlinks it from the set, and returns \p true.
1137 The \ref disposer specified in \p Traits class template parameter is called
1138 by garbage collector \p GC asynchronously.
1140 The \p Func interface is
1143 void operator()( value_type const& item );
1146 If the item with key equal to \p key is not found the function return \p false.
1148 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1150 RCU \p synchronize method can be called. RCU should not be locked.
1152 template <typename Q, typename Func>
1153 bool erase( Q const& key, Func f )
1155 return do_erase( key, key_comparator(), f );
1158 /// Delete the item from the set with comparing functor \p pred
1160 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase_func "erase(Q const&, Func)"
1161 but \p pred predicate is used for key comparing.
1162 \p Less has the interface like \p std::less.
1163 \p pred must imply the same element order as the comparator used for building the set.
1165 template <typename Q, typename Less, typename Func>
1166 bool erase_with( Q const& key, Less pred, Func f )
1169 return do_erase( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1173 /** @anchor cds_intrusive_SkipListSet_rcu_find_func
1174 The function searches the item with key equal to \p key and calls the functor \p f for item found.
1175 The interface of \p Func functor is:
1178 void operator()( value_type& item, Q& key );
1181 where \p item is the item found, \p key is the <tt>find</tt> function argument.
1183 The functor can change non-key fields of \p item. Note that the functor is only guarantee
1184 that \p item cannot be disposed during functor is executing.
1185 The functor does not serialize simultaneous access to the set \p item. If such access is
1186 possible you must provide your own synchronization schema on item level to exclude unsafe item modifications.
1188 The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor
1189 can modify both arguments.
1191 The function applies RCU lock internally.
1193 The function returns \p true if \p key is found, \p false otherwise.
1195 template <typename Q, typename Func>
1196 bool find( Q& key, Func f )
1198 return do_find_with( key, key_comparator(), f );
1201 template <typename Q, typename Func>
1202 bool find( Q const& key, Func f )
1204 return do_find_with( key, key_comparator(), f );
1208 /// Finds the key \p key with comparing functor \p pred
1210 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_func "find(Q&, Func)"
1211 but \p cmp is used for key comparison.
1212 \p Less functor has the interface like \p std::less.
1213 \p cmp must imply the same element order as the comparator used for building the set.
1215 template <typename Q, typename Less, typename Func>
1216 bool find_with( Q& key, Less pred, Func f )
1219 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1222 template <typename Q, typename Less, typename Func>
1223 bool find_with( Q const& key, Less pred, Func f )
1226 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1230 /// Checks whether the set contains \p key
1232 The function searches the item with key equal to \p key
1233 and returns \p true if it is found, and \p false otherwise.
1235 The function applies RCU lock internally.
1237 template <typename Q>
1238 bool contains( Q const& key )
1240 return do_find_with( key, key_comparator(), [](value_type& , Q const& ) {} );
1243 template <typename Q>
1244 CDS_DEPRECATED("deprecated, use contains()")
1245 bool find( Q const& key )
1247 return contains( key );
1251 /// Checks whether the set contains \p key using \p pred predicate for searching
1253 The function is similar to <tt>contains( key )</tt> but \p pred is used for key comparing.
1254 \p Less functor has the interface like \p std::less.
1255 \p Less must imply the same element order as the comparator used for building the set.
1257 template <typename Q, typename Less>
1258 bool contains( Q const& key, Less pred )
1261 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type& , Q const& ) {} );
1264 template <typename Q, typename Less>
1265 CDS_DEPRECATED("deprecated, use contains()")
1266 bool find_with( Q const& key, Less pred )
1268 return contains( key, pred );
1272 /// Finds \p key and return the item found
1273 /** \anchor cds_intrusive_SkipListSet_rcu_get
1274 The function searches the item with key equal to \p key and returns a \p raw_ptr object pointed to item found.
1275 If \p key is not found it returns empty \p raw_ptr.
1277 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1279 RCU should be locked before call of this function.
1280 Returned item is valid only while RCU is locked:
1282 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1285 typename skip_list::raw_ptr pVal;
1288 skip_list::rcu_lock lock;
1290 pVal = theList.get( 5 );
1296 // You can manually release pVal after RCU-locked section
1300 template <typename Q>
1301 raw_ptr get( Q const& key )
1303 assert( gc::is_locked());
1306 value_type * pFound;
1307 if ( do_find_with( key, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos ))
1308 return raw_ptr( pFound, raw_ptr_disposer( pos ));
1309 return raw_ptr( raw_ptr_disposer( pos ));
1312 /// Finds \p key and return the item found
1314 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_get "get(Q const&)"
1315 but \p pred is used for comparing the keys.
1317 \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q
1319 \p pred must imply the same element order as the comparator used for building the set.
1321 template <typename Q, typename Less>
1322 raw_ptr get_with( Q const& key, Less pred )
1325 assert( gc::is_locked());
1327 value_type * pFound = nullptr;
1329 if ( do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(),
1330 [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos ))
1332 return raw_ptr( pFound, raw_ptr_disposer( pos ));
1334 return raw_ptr( raw_ptr_disposer( pos ));
1337 /// Returns item count in the set
1339 The value returned depends on item counter type provided by \p Traits template parameter.
1340 For \p atomicity::empty_item_counter the function always returns 0.
1341 Therefore, the function is not suitable for checking the set emptiness, use \p empty()
1342 member function for this purpose.
1346 return m_ItemCounter;
1349 /// Checks if the set is empty
1352 return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr;
1355 /// Clears the set (not atomic)
1357 The function unlink all items from the set.
1358 The function is not atomic, thus, in multi-threaded environment with parallel insertions
1362 assert( set.empty());
1364 the assertion could be raised.
1366 For each item the \p disposer will be called automatically after unlinking.
1371 while ( (ep = extract_min()));
1374 /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32.
1375 static CDS_CONSTEXPR unsigned int max_height() CDS_NOEXCEPT
1377 return c_nMaxHeight;
1380 /// Returns const reference to internal statistics
1381 stat const& statistics() const
1389 bool is_extracted( marked_node_ptr const p ) const
1391 return ( p.bits() & 2 ) != 0;
1394 void help_remove( int nLevel, node_type* pPred, marked_node_ptr pCur, marked_node_ptr pSucc, position& pos )
1396 marked_node_ptr p( pCur.ptr());
1398 if ( pCur->is_upper_level( nLevel )
1399 && pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()),
1400 memory_model::memory_order_release, atomics::memory_order_relaxed ))
1402 if ( pCur->level_unlinked()) {
1403 if ( !is_extracted( pSucc )) {
1404 // We cannot free the node at this moment because RCU is locked
1405 // Link deleted nodes to a chain to free later
1406 pos.dispose( pCur.ptr());
1407 m_Stat.onEraseWhileFind();
1410 m_Stat.onExtractWhileFind();
1415 template <typename Q, typename Compare >
1416 bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound )
1418 assert( gc::is_locked());
1421 marked_node_ptr pSucc;
1422 marked_node_ptr pCur;
1426 pPred = m_Head.head();
1428 for ( int nLevel = static_cast<int>( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) {
1431 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
1433 // pCur.bits() means that pPred is logically deleted
1437 if ( pCur.ptr() == nullptr ) {
1438 // end of the list at level nLevel - goto next level
1442 // pSucc contains deletion mark for pCur
1443 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
1445 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
1448 if ( pSucc.bits()) {
1449 // pCur is marked, i.e. logically deleted.
1450 help_remove( nLevel, pPred, pCur, pSucc, pos );
1454 nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
1457 else if ( nCmp == 0 && bStopIfFound )
1465 pos.pPrev[nLevel] = pPred;
1466 pos.pSucc[nLevel] = pCur.ptr();
1473 pos.pCur = pCur.ptr();
1474 return pCur.ptr() && nCmp == 0;
1477 bool find_min_position( position& pos )
1479 assert( gc::is_locked());
1482 marked_node_ptr pSucc;
1483 marked_node_ptr pCur;
1486 pPred = m_Head.head();
1488 for ( int nLevel = static_cast<int>( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) {
1490 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
1491 // pCur.bits() means that pPred is logically deleted
1492 // head cannot be deleted
1493 assert( pCur.bits() == 0 );
1497 // pSucc contains deletion mark for pCur
1498 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
1500 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
1503 if ( pSucc.bits()) {
1504 // pCur is marked, i.e. logically deleted.
1505 help_remove( nLevel, pPred, pCur, pSucc, pos );
1511 pos.pPrev[nLevel] = pPred;
1512 pos.pSucc[nLevel] = pCur.ptr();
1514 return ( pos.pCur = pCur.ptr()) != nullptr;
1517 bool find_max_position( position& pos )
1519 assert( gc::is_locked());
1522 marked_node_ptr pSucc;
1523 marked_node_ptr pCur;
1526 pPred = m_Head.head();
1528 for ( int nLevel = static_cast<int>( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) {
1531 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
1533 // pCur.bits() means that pPred is logically deleted
1537 if ( pCur.ptr() == nullptr ) {
1538 // end of the list at level nLevel - goto next level
1542 // pSucc contains deletion mark for pCur
1543 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
1545 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
1548 if ( pSucc.bits()) {
1549 // pCur is marked, i.e. logically deleted.
1550 help_remove( nLevel, pPred, pCur, pSucc, pos );
1562 pos.pPrev[nLevel] = pPred;
1563 pos.pSucc[nLevel] = pCur.ptr();
1566 return ( pos.pCur = pCur.ptr()) != nullptr;
1569 bool renew_insert_position( value_type& val, node_type * pNode, position& pos )
1571 assert( gc::is_locked());
1574 marked_node_ptr pSucc;
1575 marked_node_ptr pCur;
1580 pPred = m_Head.head();
1582 for ( int nLevel = static_cast<int>( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) {
1585 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
1587 // pCur.bits() means that pPred is logically deleted
1591 if ( pCur.ptr() == nullptr ) {
1592 // end of the list at level nLevel - goto next level
1596 // pSucc contains deletion mark for pCur
1597 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
1599 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
1602 if ( pSucc.bits()) {
1603 // pCur is marked, i.e. logically deleted.
1604 if ( pCur.ptr() == pNode ) {
1605 // Node is removing while we are inserting it
1609 // try to help deleting pCur
1610 help_remove( nLevel, pPred, pCur, pSucc, pos );
1614 nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
1623 pos.pPrev[nLevel] = pPred;
1624 pos.pSucc[nLevel] = pCur.ptr();
1630 template <typename Func>
1631 bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f )
1633 assert( gc::is_locked());
1635 unsigned int const nHeight = pNode->height();
1636 pNode->clear_tower();
1638 // Insert at level 0
1640 marked_node_ptr p( pos.pSucc[0] );
1641 pNode->next( 0 ).store( p, memory_model::memory_order_relaxed );
1642 if ( !pos.pPrev[0]->next( 0 ).compare_exchange_strong( p, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ))
1648 // Insert at level 1..max
1649 for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) {
1652 marked_node_ptr pSucc( pos.pSucc[nLevel] );
1655 // pNode->next must be null but can have a "logical deleted" flag if another thread is removing pNode right now
1656 if ( !pNode->next( nLevel ).compare_exchange_strong( p, pSucc,
1657 memory_model::memory_order_acq_rel, atomics::memory_order_acquire ))
1659 // pNode has been marked as removed while we are inserting it
1661 assert( p.bits() != 0 );
1663 // Here pNode is linked at least level 0 so level_unlinked() cannot returns true
1664 CDS_VERIFY_FALSE( pNode->level_unlinked( nHeight - nLevel ));
1666 // pNode is linked up to nLevel - 1
1667 // Remove it via find_position()
1668 find_position( val, pos, key_comparator(), false );
1670 m_Stat.onLogicDeleteWhileInsert();
1675 // Link pNode into the list at nLevel
1676 if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( pSucc, marked_node_ptr( pNode ),
1677 memory_model::memory_order_release, atomics::memory_order_relaxed ))
1683 // Renew insert position
1684 m_Stat.onRenewInsertPosition();
1686 if ( !renew_insert_position( val, pNode, pos )) {
1687 // The node has been deleted while we are inserting it
1688 // Update current height for concurent removing
1689 CDS_VERIFY_FALSE( pNode->level_unlinked( nHeight - nLevel ));
1691 m_Stat.onRemoveWhileInsert();
1693 // help to removing val
1694 find_position( val, pos, key_comparator(), false );
1702 template <typename Func>
1703 bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract )
1705 assert( pDel != nullptr );
1706 assert( gc::is_locked());
1708 marked_node_ptr pSucc;
1711 unsigned const nMask = bExtract ? 3u : 1u;
1713 // logical deletion (marking)
1714 for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) {
1715 pSucc = pDel->next( nLevel ).load( memory_model::memory_order_relaxed );
1716 if ( pSucc.bits() == 0 ) {
1718 while ( !pDel->next( nLevel ).compare_exchange_weak( pSucc, pSucc | nMask,
1719 memory_model::memory_order_release, atomics::memory_order_acquire ))
1721 if ( pSucc.bits() == 0 ) {
1723 m_Stat.onMarkFailed();
1725 else if ( pSucc.bits() != nMask )
1731 marked_node_ptr p( pDel->next( 0 ).load( memory_model::memory_order_relaxed ).ptr());
1733 if ( pDel->next( 0 ).compare_exchange_strong( p, p | nMask, memory_model::memory_order_release, atomics::memory_order_acquire ))
1735 f( *node_traits::to_value_ptr( pDel ));
1737 // physical deletion
1740 for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
1742 pSucc = pDel->next( nLevel ).load( memory_model::memory_order_acquire );
1743 if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()),
1744 memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ))
1746 pDel->level_unlinked();
1751 if ( find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false ))
1752 assert( pDel != pos.pCur );
1754 find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false );
1757 m_Stat.onSlowExtract();
1759 m_Stat.onSlowErase();
1765 // Fast erasing success
1767 // We cannot free the node at this moment since RCU is locked
1768 // Link deleted nodes to a chain to free later
1769 pos.dispose( pDel );
1770 m_Stat.onFastErase();
1773 m_Stat.onFastExtract();
1776 else if ( p.bits()) {
1777 // Another thread is deleting pDel right now
1778 m_Stat.onEraseContention();
1782 m_Stat.onEraseRetry();
1787 enum finsd_fastpath_result {
1788 find_fastpath_found,
1789 find_fastpath_not_found,
1792 template <typename Q, typename Compare, typename Func>
1793 finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f ) const
1796 marked_node_ptr pCur;
1797 marked_node_ptr pSucc;
1798 marked_node_ptr pNull;
1801 unsigned attempt = 0;
1804 pPred = m_Head.head();
1805 for ( int nLevel = static_cast<int>( m_nHeight.load( memory_model::memory_order_relaxed ) - 1 ); nLevel >= 0; --nLevel ) {
1806 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
1808 while ( pCur != pNull ) {
1810 // pPred is being removed
1811 if ( ++attempt < 4 ) {
1816 return find_fastpath_abort;
1820 int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
1823 pCur = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
1825 else if ( nCmp == 0 ) {
1827 f( *node_traits::to_value_ptr( pCur.ptr()), val );
1828 return find_fastpath_found;
1830 else // pCur > val - go down
1836 return find_fastpath_not_found;
1839 template <typename Q, typename Compare, typename Func>
1840 bool find_slowpath( Q& val, Compare cmp, Func f, position& pos )
1842 if ( find_position( val, pos, cmp, true )) {
1843 assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 );
1845 f( *node_traits::to_value_ptr( pos.pCur ), val );
1852 template <typename Q, typename Compare, typename Func>
1853 bool do_find_with( Q& val, Compare cmp, Func f )
1856 return do_find_with( val, cmp, f, pos );
1859 template <typename Q, typename Compare, typename Func>
1860 bool do_find_with( Q& val, Compare cmp, Func f, position& pos )
1867 switch ( find_fastpath( val, cmp, f )) {
1868 case find_fastpath_found:
1869 m_Stat.onFindFastSuccess();
1871 case find_fastpath_not_found:
1872 m_Stat.onFindFastFailed();
1878 if ( find_slowpath( val, cmp, f, pos )) {
1879 m_Stat.onFindSlowSuccess();
1883 m_Stat.onFindSlowFailed();
1890 template <typename Q, typename Compare, typename Func>
1891 bool do_erase( Q const& val, Compare cmp, Func f )
1893 check_deadlock_policy::check();
1901 if ( !find_position( val, pos, cmp, false )) {
1902 m_Stat.onEraseFailed();
1906 node_type * pDel = pos.pCur;
1907 assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 );
1909 unsigned int nHeight = pDel->height();
1910 if ( try_remove_at( pDel, pos, f, false )) {
1912 m_Stat.onRemoveNode( nHeight );
1913 m_Stat.onEraseSuccess();
1917 m_Stat.onEraseFailed();
1926 template <typename Q, typename Compare>
1927 value_type * do_extract_key( Q const& key, Compare cmp, position& pos )
1929 // RCU should be locked!!!
1930 assert( gc::is_locked());
1934 if ( !find_position( key, pos, cmp, false )) {
1935 m_Stat.onExtractFailed();
1940 assert( cmp( *node_traits::to_value_ptr( pDel ), key ) == 0 );
1942 unsigned int const nHeight = pDel->height();
1944 if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true )) {
1946 m_Stat.onRemoveNode( nHeight );
1947 m_Stat.onExtractSuccess();
1950 m_Stat.onExtractFailed();
1955 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1958 template <typename Q>
1959 value_type * do_extract( Q const& key )
1961 check_deadlock_policy::check();
1962 value_type * pDel = nullptr;
1966 pDel = do_extract_key( key, key_comparator(), pos );
1972 template <typename Q, typename Less>
1973 value_type * do_extract_with( Q const& key, Less pred )
1976 check_deadlock_policy::check();
1977 value_type * pDel = nullptr;
1981 pDel = do_extract_key( key, cds::opt::details::make_comparator_from_less<Less>(), pos );
1987 value_type * do_extract_min()
1989 assert( !gc::is_locked());
1997 if ( !find_min_position( pos )) {
1998 m_Stat.onExtractMinFailed();
2003 unsigned int const nHeight = pDel->height();
2005 if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true )) {
2007 m_Stat.onRemoveNode( nHeight );
2008 m_Stat.onExtractMinSuccess();
2011 m_Stat.onExtractMinFailed();
2017 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
2020 value_type * do_extract_max()
2022 assert( !gc::is_locked());
2030 if ( !find_max_position( pos )) {
2031 m_Stat.onExtractMaxFailed();
2036 unsigned int const nHeight = pDel->height();
2038 if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true )) {
2040 m_Stat.onRemoveNode( nHeight );
2041 m_Stat.onExtractMaxSuccess();
2044 m_Stat.onExtractMaxFailed();
2050 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
2053 void increase_height( unsigned int nHeight )
2055 unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed );
2056 if ( nCur < nHeight )
2057 m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed );
2062 node_type* p = m_Head.head()->next( 0 ).load( atomics::memory_order_relaxed ).ptr();
2064 node_type* pNext = p->next( 0 ).load( atomics::memory_order_relaxed ).ptr();
2065 dispose_node( node_traits::to_value_ptr( p ));
2073 }} // namespace cds::intrusive
2076 #endif // #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H