3 #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H
4 #define CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H
8 #include <cds/intrusive/details/skip_list_base.h>
9 #include <cds/opt/compare.h>
10 #include <cds/urcu/details/check_deadlock.h>
11 #include <cds/details/binary_functor_wrapper.h>
12 #include <cds/urcu/exempt_ptr.h>
13 #include <cds/urcu/raw_ptr.h>
14 #include <cds/intrusive/details/raw_ptr_disposer.h>
16 namespace cds { namespace intrusive {
21 template <class RCU, typename Tag>
22 class node< cds::urcu::gc< RCU >, Tag >
25 typedef cds::urcu::gc< RCU > gc; ///< Garbage collector
26 typedef Tag tag ; ///< tag
29 // bit 0 - the item is logically deleted
30 // bit 1 - the item is extracted (only for level 0)
31 typedef cds::details::marked_ptr<node, 3> marked_ptr; ///< marked pointer
32 typedef atomics::atomic< marked_ptr > atomic_marked_ptr; ///< atomic marked pointer
33 typedef atomic_marked_ptr tower_item_type;
36 atomic_marked_ptr m_pNext; ///< Next item in bottom-list (list at level 0)
38 node * m_pDelChain; ///< Deleted node chain (local for a thread)
40 bool volatile m_bLinked;
41 bool volatile m_bUnlinked;
44 unsigned int m_nHeight; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1.
45 atomic_marked_ptr * m_arrNext; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr
48 /// Constructs a node of height 1 (a bottom-list node)
51 , m_pDelChain( nullptr )
54 , m_bUnlinked( false )
57 , m_arrNext( nullptr )
63 assert( !m_bLinked || m_bUnlinked );
67 /// Constructs a node of height \p nHeight
68 void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower )
70 assert( nHeight > 0 );
71 assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node
72 || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0
75 m_arrNext = nextTower;
79 atomic_marked_ptr * release_tower()
81 atomic_marked_ptr * pTower = m_arrNext;
87 atomic_marked_ptr * get_tower() const
94 for ( unsigned int nLevel = 1; nLevel < m_nHeight; ++nLevel )
95 next(nLevel).store( marked_ptr(), atomics::memory_order_relaxed );
98 /// Access to element of next pointer array
99 atomic_marked_ptr& next( unsigned int nLevel )
101 assert( nLevel < height() );
102 assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr) );
104 # ifdef CDS_THREAD_SANITIZER_ENABLED
105 // TSan false positive: m_arrNext is read-only array
106 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
107 atomic_marked_ptr& r = nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
108 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
111 return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
115 /// Access to element of next pointer array (const version)
116 atomic_marked_ptr const& next( unsigned int nLevel ) const
118 assert( nLevel < height() );
119 assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr );
121 # ifdef CDS_THREAD_SANITIZER_ENABLED
122 // TSan false positive: m_arrNext is read-only array
123 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
124 atomic_marked_ptr& r = nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
125 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
128 return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
132 /// Access to element of next pointer array (same as \ref next function)
133 atomic_marked_ptr& operator[]( unsigned int nLevel )
135 return next( nLevel );
138 /// Access to element of next pointer array (same as \ref next function)
139 atomic_marked_ptr const& operator[]( unsigned int nLevel ) const
141 return next( nLevel );
144 /// Height of the node
145 unsigned int height() const
150 /// Clears internal links
153 assert( m_arrNext == nullptr );
154 m_pNext.store( marked_ptr(), atomics::memory_order_release );
155 m_pDelChain = nullptr;
158 bool is_cleared() const
160 return m_pNext == atomic_marked_ptr()
161 && m_arrNext == nullptr
165 } // namespace skip_list
169 namespace skip_list { namespace details {
171 template <class RCU, typename NodeTraits, typename BackOff, bool IsConst>
172 class iterator< cds::urcu::gc< RCU >, NodeTraits, BackOff, IsConst >
175 typedef cds::urcu::gc< RCU > gc;
176 typedef NodeTraits node_traits;
177 typedef BackOff back_off;
178 typedef typename node_traits::node_type node_type;
179 typedef typename node_traits::value_type value_type;
180 static bool const c_isConst = IsConst;
182 typedef typename std::conditional< c_isConst, value_type const &, value_type &>::type value_ref;
185 typedef typename node_type::marked_ptr marked_ptr;
186 typedef typename node_type::atomic_marked_ptr atomic_marked_ptr;
193 // RCU should be locked before iterating!!!
194 assert( gc::is_locked() );
199 if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
200 // Current node is marked as deleted. So, its next pointer can point to anything
201 // In this case we interrupt our iteration and returns end() iterator.
206 marked_ptr p = m_pNode->next(0).load( atomics::memory_order_relaxed );
207 node_type * pp = p.ptr();
209 // p is marked as deleted. Spin waiting for physical removal
213 else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits() ) {
214 // p is marked as deleted. Spin waiting for physical removal
224 public: // for internal use only!!!
225 iterator( node_type& refHead )
228 // RCU should be locked before iterating!!!
229 assert( gc::is_locked() );
234 marked_ptr p = refHead.next(0).load( atomics::memory_order_relaxed );
240 node_type * pp = p.ptr();
241 // Logically deleted node is marked from highest level
242 if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
255 // RCU should be locked before iterating!!!
256 assert( gc::is_locked() );
259 iterator( iterator const& s)
260 : m_pNode( s.m_pNode )
262 // RCU should be locked before iterating!!!
263 assert( gc::is_locked() );
266 value_type * operator ->() const
268 assert( m_pNode != nullptr );
269 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
271 return node_traits::to_value_ptr( m_pNode );
274 value_ref operator *() const
276 assert( m_pNode != nullptr );
277 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
279 return *node_traits::to_value_ptr( m_pNode );
283 iterator& operator ++()
289 iterator& operator = (const iterator& src)
291 m_pNode = src.m_pNode;
295 template <typename Bkoff, bool C>
296 bool operator ==(iterator<gc, node_traits, Bkoff, C> const& i ) const
298 return m_pNode == i.m_pNode;
300 template <typename Bkoff, bool C>
301 bool operator !=(iterator<gc, node_traits, Bkoff, C> const& i ) const
303 return !( *this == i );
306 }} // namespace skip_list::details
309 /// Lock-free skip-list set (template specialization for \ref cds_urcu_desc "RCU")
310 /** @ingroup cds_intrusive_map
311 @anchor cds_intrusive_SkipListSet_rcu
313 The implementation of well-known probabilistic data structure called skip-list
314 invented by W.Pugh in his papers:
315 - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees
316 - [1990] W.Pugh A Skip List Cookbook
318 A skip-list is a probabilistic data structure that provides expected logarithmic
319 time search without the need of rebalance. The skip-list is a collection of sorted
320 linked list. Nodes are ordered by key. Each node is linked into a subset of the lists.
321 Each list has a level, ranging from 0 to 32. The bottom-level list contains
322 all the nodes, and each higher-level list is a sublist of the lower-level lists.
323 Each node is created with a random top level (with a random height), and belongs
324 to all lists up to that level. The probability that a node has the height 1 is 1/2.
325 The probability that a node has the height N is 1/2 ** N (more precisely,
326 the distribution depends on an random generator provided, but our generators
329 The lock-free variant of skip-list is implemented according to book
330 - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming",
331 chapter 14.4 "A Lock-Free Concurrent Skiplist".
333 <b>Template arguments</b>:
334 - \p RCU - one of \ref cds_urcu_gc "RCU type"
335 - \p T - type to be stored in the list. The type must be based on \p skip_list::node (for \p skip_list::base_hook)
336 or it must have a member of type \p skip_list::node (for \p skip_list::member_hook).
337 - \p Traits - set traits, default is \p skip_list::traits
338 It is possible to declare option-based list with \p cds::intrusive::skip_list::make_traits metafunction
339 instead of \p Traits template argument.
341 @note Before including <tt><cds/intrusive/skip_list_rcu.h></tt> you should include appropriate RCU header file,
342 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
346 The class supports a forward iterator (\ref iterator and \ref const_iterator).
347 The iteration is ordered.
349 You may iterate over skip-list set items only under RCU lock.
350 Only in this case the iterator is thread-safe since
351 while RCU is locked any set's item cannot be reclaimed.
353 @note The requirement of RCU lock during iterating means that any type of modification of the skip list
354 (i.e. inserting, erasing and so on) is not possible.
356 @warning The iterator object cannot be passed between threads.
358 Example how to use skip-list set iterators:
360 // First, you should include the header for RCU type you have chosen
361 #include <cds/urcu/general_buffered.h>
362 #include <cds/intrusive/skip_list_rcu.h>
364 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
370 // Traits for your skip-list.
371 // At least, you should define cds::opt::less or cds::opt::compare for Foo struct
372 struct my_traits: public cds::intrusive::skip_list::traits
376 typedef cds::intrusive::SkipListSet< rcu_type, Foo, my_traits > my_skiplist_set;
378 my_skiplist_set theSet;
384 // Apply RCU locking manually
385 typename rcu_type::scoped_lock sl;
387 for ( auto it = theList.begin(); it != theList.end(); ++it ) {
391 // rcu_type::scoped_lock destructor releases RCU lock implicitly
395 The iterator class supports the following minimalistic interface:
402 iterator( iterator const& s);
404 value_type * operator ->() const;
405 value_type& operator *() const;
408 iterator& operator ++();
411 iterator& operator = (const iterator& src);
413 bool operator ==(iterator const& i ) const;
414 bool operator !=(iterator const& i ) const;
417 Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced.
421 You should incorporate skip_list::node into your struct \p T and provide
422 appropriate skip_list::traits::hook in your \p Traits template parameters. Usually, for \p Traits you
423 define a struct based on \p skip_list::traits.
425 Example for <tt>cds::urcu::general_buffered<></tt> RCU and base hook:
427 // First, you should include the header for RCU type you have chosen
428 #include <cds/urcu/general_buffered.h>
430 // Include RCU skip-list specialization
431 #include <cds/intrusive/skip_list_rcu.h>
434 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
436 // Data stored in skip list
437 struct my_data: public cds::intrusive::skip_list::node< rcu_type >
446 // my_data compare functor
448 int operator()( const my_data& d1, const my_data& d2 )
450 return d1.strKey.compare( d2.strKey );
453 int operator()( const my_data& d, const std::string& s )
455 return d.strKey.compare(s);
458 int operator()( const std::string& s, const my_data& d )
460 return s.compare( d.strKey );
465 struct my_traits: public cds::intrusive::skip_list::traits
467 typedef cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > hook;
468 typedef my_data_cmp compare;
471 // Declare skip-list set type
472 typedef cds::intrusive::SkipListSet< rcu_type, my_data, my_traits > traits_based_set;
475 Equivalent option-based code:
477 #include <cds/urcu/general_buffered.h>
478 #include <cds/intrusive/skip_list_rcu.h>
480 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
489 // Declare option-based skip-list set
490 typedef cds::intrusive::SkipListSet< rcu_type
492 , typename cds::intrusive::skip_list::make_traits<
493 cds::intrusive::opt::hook< cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > >
494 ,cds::intrusive::opt::compare< my_data_cmp >
503 #ifdef CDS_DOXYGEN_INVOKED
504 ,typename Traits = skip_list::traits
509 class SkipListSet< cds::urcu::gc< RCU >, T, Traits >
512 typedef cds::urcu::gc< RCU > gc; ///< Garbage collector
513 typedef T value_type; ///< type of value stored in the skip-list
514 typedef Traits traits; ///< Traits template parameter
516 typedef typename traits::hook hook; ///< hook type
517 typedef typename hook::node_type node_type; ///< node type
519 # ifdef CDS_DOXYGEN_INVOKED
520 typedef implementation_defined key_comparator ; ///< key comparison functor based on \p Traits::compare and \p Traits::less
522 typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator;
525 typedef typename traits::disposer disposer; ///< disposer
526 typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits
528 typedef typename traits::item_counter item_counter; ///< Item counting policy used
529 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
530 typedef typename traits::random_level_generator random_level_generator; ///< random level generator
531 typedef typename traits::allocator allocator_type; ///< allocator for maintaining array of next pointers of the node
532 typedef typename traits::back_off back_off; ///< Back-off strategy
533 typedef typename traits::stat stat; ///< internal statistics type
534 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
535 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
536 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking
539 /// Max node height. The actual node height should be in range <tt>[0 .. c_nMaxHeight)</tt>
541 The max height is specified by \ref skip_list::random_level_generator "random level generator" constant \p m_nUpperBound
542 but it should be no more than 32 (\ref skip_list::c_nHeightLimit).
544 static unsigned int const c_nMaxHeight = std::conditional<
545 (random_level_generator::c_nUpperBound <= skip_list::c_nHeightLimit),
546 std::integral_constant< unsigned int, random_level_generator::c_nUpperBound >,
547 std::integral_constant< unsigned int, skip_list::c_nHeightLimit >
551 static unsigned int const c_nMinHeight = 5;
552 typedef cds::intrusive::skip_list::implementation_tag implementation_tag;
556 typedef typename node_type::atomic_marked_ptr atomic_node_ptr ; ///< Atomic marked node pointer
557 typedef typename node_type::marked_ptr marked_node_ptr ; ///< Node marked pointer
561 typedef skip_list::details::intrusive_node_builder< node_type, atomic_node_ptr, allocator_type > intrusive_node_builder;
563 typedef typename std::conditional<
564 std::is_same< typename traits::internal_node_builder, cds::opt::none >::value
565 ,intrusive_node_builder
566 ,typename traits::internal_node_builder
567 >::type node_builder;
569 typedef std::unique_ptr< node_type, typename node_builder::node_disposer > scoped_node_ptr;
571 static void dispose_node( value_type * pVal )
575 typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal) );
581 void operator()( value_type * pVal )
583 dispose_node( pVal );
587 static void dispose_chain( node_type * pChain )
590 assert( !gc::is_locked() );
592 auto f = [&pChain]() -> cds::urcu::retired_ptr {
593 node_type * p = pChain;
595 pChain = p->m_pDelChain;
596 return cds::urcu::make_retired_ptr<node_disposer>( node_traits::to_value_ptr( p ));
598 return cds::urcu::make_retired_ptr<node_disposer>( static_cast<value_type *>(nullptr));
600 gc::batch_retire(std::ref(f));
605 node_type * pPrev[ c_nMaxHeight ];
606 node_type * pSucc[ c_nMaxHeight ];
607 node_type * pNext[ c_nMaxHeight ];
610 node_type * pDelChain;
613 : pDelChain( nullptr )
618 dispose_chain( pDelChain );
621 void dispose( node_type * p )
623 assert( p != nullptr );
624 assert( p->m_pDelChain == nullptr );
626 p->m_pDelChain = pDelChain;
631 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy;
635 skip_list::details::head_node< node_type > m_Head; ///< head tower (max height)
637 item_counter m_ItemCounter; ///< item counter
638 random_level_generator m_RandomLevelGen; ///< random level generator instance
639 atomics::atomic<unsigned int> m_nHeight; ///< estimated high level
640 atomics::atomic<node_type *> m_pDeferredDelChain ; ///< Deferred deleted node chain
641 mutable stat m_Stat; ///< internal statistics
645 unsigned int random_level()
647 // Random generator produces a number from range [0..31]
648 // We need a number from range [1..32]
649 return m_RandomLevelGen() + 1;
652 template <typename Q>
653 node_type * build_node( Q v )
655 return node_builder::make_tower( v, m_RandomLevelGen );
660 using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, node_disposer, void >; ///< pointer to extracted node
664 struct chain_disposer {
665 void operator()( node_type * pChain ) const
667 dispose_chain( pChain );
670 typedef cds::intrusive::details::raw_ptr_disposer< gc, node_type, chain_disposer> raw_ptr_disposer;
674 /// Result of \p get(), \p get_with() functions - pointer to the node found
675 typedef cds::urcu::raw_ptr< gc, value_type, raw_ptr_disposer > raw_ptr;
680 bool is_extracted( marked_node_ptr const p ) const
682 return (p.bits() & 2) != 0;
685 template <typename Q, typename Compare >
686 bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound )
688 assert( gc::is_locked() );
691 marked_node_ptr pSucc;
692 marked_node_ptr pCur;
696 pPred = m_Head.head();
698 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
701 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
703 // pCur.bits() means that pPred is logically deleted
707 if ( pCur.ptr() == nullptr ) {
708 // end of the list at level nLevel - goto next level
712 // pSucc contains deletion mark for pCur
713 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
715 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
718 if ( pSucc.bits() ) {
719 // pCur is marked, i.e. logically deleted.
720 marked_node_ptr p( pCur.ptr() );
721 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
722 memory_model::memory_order_release, atomics::memory_order_relaxed ))
726 pCur->m_bUnlinked = true;
729 if ( !is_extracted( pSucc )) {
730 // We cannot free the node at this moment since RCU is locked
731 // Link deleted nodes to a chain to free later
732 pos.dispose( pCur.ptr() );
733 m_Stat.onEraseWhileFind();
736 m_Stat.onExtractWhileFind();
743 nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
746 else if ( nCmp == 0 && bStopIfFound )
754 pos.pPrev[ nLevel ] = pPred;
755 pos.pSucc[ nLevel ] = pCur.ptr();
762 pos.pCur = pCur.ptr();
763 return pCur.ptr() && nCmp == 0;
766 bool find_min_position( position& pos )
768 assert( gc::is_locked() );
771 marked_node_ptr pSucc;
772 marked_node_ptr pCur;
775 pPred = m_Head.head();
777 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
779 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
780 // pCur.bits() means that pPred is logically deleted
781 // head cannot be deleted
782 assert( pCur.bits() == 0 );
786 // pSucc contains deletion mark for pCur
787 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
789 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
792 if ( pSucc.bits() ) {
793 // pCur is marked, i.e. logically deleted.
794 marked_node_ptr p( pCur.ptr() );
795 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
796 memory_model::memory_order_release, atomics::memory_order_relaxed ))
800 pCur->m_bUnlinked = true;
803 if ( !is_extracted( pSucc )) {
804 // We cannot free the node at this moment since RCU is locked
805 // Link deleted nodes to a chain to free later
806 pos.dispose( pCur.ptr() );
807 m_Stat.onEraseWhileFind();
810 m_Stat.onExtractWhileFind();
819 pos.pPrev[ nLevel ] = pPred;
820 pos.pSucc[ nLevel ] = pCur.ptr();
822 return (pos.pCur = pCur.ptr()) != nullptr;
825 bool find_max_position( position& pos )
827 assert( gc::is_locked() );
830 marked_node_ptr pSucc;
831 marked_node_ptr pCur;
834 pPred = m_Head.head();
836 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
839 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
841 // pCur.bits() means that pPred is logically deleted
845 if ( pCur.ptr() == nullptr ) {
846 // end of the list at level nLevel - goto next level
850 // pSucc contains deletion mark for pCur
851 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
853 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
856 if ( pSucc.bits() ) {
857 // pCur is marked, i.e. logically deleted.
858 marked_node_ptr p( pCur.ptr() );
859 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
860 memory_model::memory_order_release, atomics::memory_order_relaxed ))
864 pCur->m_bUnlinked = true;
867 if ( !is_extracted( pSucc )) {
868 // We cannot free the node at this moment since RCU is locked
869 // Link deleted nodes to a chain to free later
870 pos.dispose( pCur.ptr() );
871 m_Stat.onEraseWhileFind();
874 m_Stat.onExtractWhileFind();
889 pos.pPrev[ nLevel ] = pPred;
890 pos.pSucc[ nLevel ] = pCur.ptr();
893 return (pos.pCur = pCur.ptr()) != nullptr;
896 template <typename Func>
897 bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f )
899 assert( gc::is_locked() );
901 unsigned int nHeight = pNode->height();
902 pNode->clear_tower();
905 marked_node_ptr p( pos.pSucc[0] );
906 pNode->next( 0 ).store( p, memory_model::memory_order_release );
907 if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed )) {
911 pNode->m_bLinked = true;
916 for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) {
919 marked_node_ptr q( pos.pSucc[ nLevel ]);
920 if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
921 // pNode has been marked as removed while we are inserting it
924 m_Stat.onLogicDeleteWhileInsert();
928 if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ) )
931 // Renew insert position
932 m_Stat.onRenewInsertPosition();
933 if ( !find_position( val, pos, key_comparator(), false )) {
934 // The node has been deleted while we are inserting it
935 m_Stat.onNotFoundWhileInsert();
943 template <typename Func>
944 bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract )
946 assert( pDel != nullptr );
947 assert( gc::is_locked() );
949 marked_node_ptr pSucc;
951 // logical deletion (marking)
952 for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) {
953 pSucc = pDel->next(nLevel).load( memory_model::memory_order_relaxed );
956 || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
963 pSucc = pDel->next(0).load( memory_model::memory_order_relaxed );
968 int const nMask = bExtract ? 3 : 1;
969 if ( pDel->next(0).compare_exchange_strong( pSucc, pSucc | nMask, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
971 f( *node_traits::to_value_ptr( pDel ));
976 for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
977 if ( !pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( pSucc,
978 marked_node_ptr( pDel->next(nLevel).load(memory_model::memory_order_relaxed).ptr() ),
979 memory_model::memory_order_release, atomics::memory_order_relaxed) )
982 find_position( *node_traits::to_value_ptr(pDel), pos, key_comparator(), false );
984 m_Stat.onSlowExtract();
986 m_Stat.onSlowErase();
988 assert( pDel->m_bUnlinked );
995 pDel->m_bUnlinked = true;
998 // We cannot free the node at this moment since RCU is locked
999 // Link deleted nodes to a chain to free later
1000 pos.dispose( pDel );
1001 m_Stat.onFastErase();
1004 m_Stat.onFastExtract();
1007 m_Stat.onEraseRetry();
1011 enum finsd_fastpath_result {
1012 find_fastpath_found,
1013 find_fastpath_not_found,
1016 template <typename Q, typename Compare, typename Func>
1017 finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f ) const
1020 marked_node_ptr pCur;
1021 marked_node_ptr pSucc;
1022 marked_node_ptr pNull;
1026 pPred = m_Head.head();
1027 for ( int nLevel = static_cast<int>(m_nHeight.load(memory_model::memory_order_relaxed) - 1); nLevel >= 0; --nLevel ) {
1028 pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire );
1029 if ( pCur == pNull )
1032 while ( pCur != pNull ) {
1033 if ( pCur.bits() ) {
1034 // Wait until pCur is removed
1035 unsigned int nAttempt = 0;
1036 while ( pCur.bits() && nAttempt++ < 16 ) {
1038 pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire );
1042 if ( pCur.bits() ) {
1043 // Maybe, we are on deleted node sequence
1044 // Abort searching, try slow-path
1045 return find_fastpath_abort;
1050 int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
1053 pCur = pCur->next(nLevel).load( memory_model::memory_order_acquire );
1055 else if ( nCmp == 0 ) {
1057 f( *node_traits::to_value_ptr( pCur.ptr() ), val );
1058 return find_fastpath_found;
1060 else // pCur > val - go down
1066 return find_fastpath_not_found;
1069 template <typename Q, typename Compare, typename Func>
1070 bool find_slowpath( Q& val, Compare cmp, Func f, position& pos )
1072 if ( find_position( val, pos, cmp, true )) {
1073 assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 );
1075 f( *node_traits::to_value_ptr( pos.pCur ), val );
1082 template <typename Q, typename Compare, typename Func>
1083 bool do_find_with( Q& val, Compare cmp, Func f )
1086 return do_find_with( val, cmp, f, pos );
1089 template <typename Q, typename Compare, typename Func>
1090 bool do_find_with( Q& val, Compare cmp, Func f, position& pos )
1097 switch ( find_fastpath( val, cmp, f )) {
1098 case find_fastpath_found:
1099 m_Stat.onFindFastSuccess();
1101 case find_fastpath_not_found:
1102 m_Stat.onFindFastFailed();
1108 if ( find_slowpath( val, cmp, f, pos )) {
1109 m_Stat.onFindSlowSuccess();
1113 m_Stat.onFindSlowFailed();
1120 template <typename Q, typename Compare, typename Func>
1121 bool do_erase( Q const& val, Compare cmp, Func f )
1123 check_deadlock_policy::check();
1131 if ( !find_position( val, pos, cmp, false ) ) {
1132 m_Stat.onEraseFailed();
1136 node_type * pDel = pos.pCur;
1137 assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 );
1139 unsigned int nHeight = pDel->height();
1140 if ( try_remove_at( pDel, pos, f, false )) {
1142 m_Stat.onRemoveNode( nHeight );
1143 m_Stat.onEraseSuccess();
1147 m_Stat.onEraseFailed();
1156 template <typename Q, typename Compare>
1157 value_type * do_extract_key( Q const& key, Compare cmp, position& pos )
1159 // RCU should be locked!!!
1160 assert( gc::is_locked() );
1164 if ( !find_position( key, pos, cmp, false ) ) {
1165 m_Stat.onExtractFailed();
1170 assert( cmp( *node_traits::to_value_ptr( pDel ), key ) == 0 );
1172 unsigned int const nHeight = pDel->height();
1174 if ( try_remove_at( pDel, pos, [](value_type const&) {}, true )) {
1176 m_Stat.onRemoveNode( nHeight );
1177 m_Stat.onExtractSuccess();
1180 m_Stat.onExtractFailed();
1185 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1188 template <typename Q>
1189 value_type * do_extract( Q const& key )
1191 check_deadlock_policy::check();
1192 value_type * pDel = nullptr;
1196 pDel = do_extract_key( key, key_comparator(), pos );
1202 template <typename Q, typename Less>
1203 value_type * do_extract_with( Q const& key, Less pred )
1206 check_deadlock_policy::check();
1207 value_type * pDel = nullptr;
1211 pDel = do_extract_key( key, cds::opt::details::make_comparator_from_less<Less>(), pos );
1217 value_type * do_extract_min()
1219 assert( !gc::is_locked() );
1227 if ( !find_min_position( pos ) ) {
1228 m_Stat.onExtractMinFailed();
1233 unsigned int const nHeight = pDel->height();
1235 if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
1237 m_Stat.onRemoveNode( nHeight );
1238 m_Stat.onExtractMinSuccess();
1241 m_Stat.onExtractMinFailed();
1247 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1250 value_type * do_extract_max()
1252 assert( !gc::is_locked() );
1260 if ( !find_max_position( pos ) ) {
1261 m_Stat.onExtractMaxFailed();
1266 unsigned int const nHeight = pDel->height();
1268 if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
1270 m_Stat.onRemoveNode( nHeight );
1271 m_Stat.onExtractMaxSuccess();
1274 m_Stat.onExtractMaxFailed();
1280 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1283 void increase_height( unsigned int nHeight )
1285 unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed );
1286 if ( nCur < nHeight )
1287 m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed );
1292 /// Default constructor
1294 : m_Head( c_nMaxHeight )
1295 , m_nHeight( c_nMinHeight )
1296 , m_pDeferredDelChain( nullptr )
1298 static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
1300 // Barrier for head node
1301 atomics::atomic_thread_fence( memory_model::memory_order_release );
1304 /// Clears and destructs the skip-list
1312 typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator;
1314 /// Const iterator type
1315 typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator;
1317 /// Returns a forward iterator addressing the first element in a set
1320 return iterator( *m_Head.head() );
1323 /// Returns a forward const iterator addressing the first element in a set
1324 const_iterator begin() const
1326 return const_iterator( *m_Head.head() );
1329 /// Returns a forward const iterator addressing the first element in a set
1330 const_iterator cbegin() const
1332 return const_iterator( *m_Head.head() );
1335 /// Returns a forward iterator that addresses the location succeeding the last element in a set.
1341 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
1342 const_iterator end() const
1344 return const_iterator();
1347 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
1348 const_iterator cend() const
1350 return const_iterator();
1354 /// Inserts new node
1356 The function inserts \p val in the set if it does not contain
1357 an item with key equal to \p val.
1359 The function applies RCU lock internally.
1361 Returns \p true if \p val is placed into the set, \p false otherwise.
1363 bool insert( value_type& val )
1365 return insert( val, []( value_type& ) {} );
1368 /// Inserts new node
1370 This function is intended for derived non-intrusive containers.
1372 The function allows to split creating of new item into two part:
1373 - create item with key only
1374 - insert new item into the set
1375 - if inserting is success, calls \p f functor to initialize value-field of \p val.
1377 The functor signature is:
1379 void func( value_type& val );
1381 where \p val is the item inserted. User-defined functor \p f should guarantee that during changing
1382 \p val no any other changes could be made on this set's item by concurrent threads.
1383 The user-defined functor is called only if the inserting is success.
1385 RCU \p synchronize method can be called. RCU should not be locked.
1387 template <typename Func>
1388 bool insert( value_type& val, Func f )
1390 check_deadlock_policy::check();
1396 node_type * pNode = node_traits::to_node_ptr( val );
1397 scoped_node_ptr scp( pNode );
1398 unsigned int nHeight = pNode->height();
1399 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
1400 bool bTowerMade = false;
1406 bool bFound = find_position( val, pos, key_comparator(), true );
1408 // scoped_node_ptr deletes the node tower if we create it
1412 m_Stat.onInsertFailed();
1418 build_node( pNode );
1419 nHeight = pNode->height();
1424 if ( !insert_at_position( val, pNode, pos, f )) {
1425 m_Stat.onInsertRetry();
1429 increase_height( nHeight );
1431 m_Stat.onAddNode( nHeight );
1432 m_Stat.onInsertSuccess();
1442 /// Updates the node
1444 The operation performs inserting or changing data with lock-free manner.
1446 If the item \p val is not found in the set, then \p val is inserted into the set
1447 iff \p bInsert is \p true.
1448 Otherwise, the functor \p func is called with item found.
1449 The functor signature is:
1451 void func( bool bNew, value_type& item, value_type& val );
1454 - \p bNew - \p true if the item has been inserted, \p false otherwise
1455 - \p item - item of the set
1456 - \p val - argument \p val passed into the \p %update() function
1457 If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
1458 refer to the same thing.
1460 The functor can change non-key fields of the \p item; however, \p func must guarantee
1461 that during changing no any other modifications could be made on this item by concurrent threads.
1463 RCU \p synchronize method can be called. RCU should not be locked.
1465 Returns std::pair<bool, bool> where \p first is \p true if operation is successfull,
1466 i.e. the node has been inserted or updated,
1467 \p second is \p true if new item has been added or \p false if the item with \p key
1470 @warning See \ref cds_intrusive_item_creating "insert item troubleshooting"
1472 template <typename Func>
1473 std::pair<bool, bool> update( value_type& val, Func func, bool bInsert = true )
1475 check_deadlock_policy::check();
1478 std::pair<bool, bool> bRet( true, false );
1481 node_type * pNode = node_traits::to_node_ptr( val );
1482 scoped_node_ptr scp( pNode );
1483 unsigned int nHeight = pNode->height();
1484 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
1485 bool bTowerMade = false;
1490 bool bFound = find_position( val, pos, key_comparator(), true );
1492 // scoped_node_ptr deletes the node tower if we create it before
1496 func( false, *node_traits::to_value_ptr(pos.pCur), val );
1497 m_Stat.onUpdateExist();
1508 build_node( pNode );
1509 nHeight = pNode->height();
1514 if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { func( true, item, item ); })) {
1515 m_Stat.onInsertRetry();
1519 increase_height( nHeight );
1522 m_Stat.onAddNode( nHeight );
1523 m_Stat.onUpdateNew();
1532 // Deprecated, use update().
1533 template <typename Func>
1534 std::pair<bool, bool> ensure( value_type& val, Func func )
1536 return update( val, func, true );
1540 /// Unlinks the item \p val from the set
1542 The function searches the item \p val in the set and unlink it from the set
1543 if it is found and is equal to \p val.
1545 Difference between \p erase() and \p %unlink() functions: \p erase() finds <i>a key</i>
1546 and deletes the item found. \p %unlink() searches an item by key and deletes it
1547 only if \p val is an item of that set, i.e. the pointer to item found
1548 is equal to <tt> &val </tt>.
1550 RCU \p synchronize method can be called. RCU should not be locked.
1552 The \ref disposer specified in \p Traits class template parameter is called
1553 by garbage collector \p GC asynchronously.
1555 The function returns \p true if success and \p false otherwise.
1557 bool unlink( value_type& val )
1559 check_deadlock_policy::check();
1567 if ( !find_position( val, pos, key_comparator(), false ) ) {
1568 m_Stat.onUnlinkFailed();
1572 node_type * pDel = pos.pCur;
1573 assert( key_comparator()( *node_traits::to_value_ptr( pDel ), val ) == 0 );
1575 unsigned int nHeight = pDel->height();
1577 if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos, [](value_type const&) {}, false )) {
1579 m_Stat.onRemoveNode( nHeight );
1580 m_Stat.onUnlinkSuccess();
1584 m_Stat.onUnlinkFailed();
1593 /// Extracts the item from the set with specified \p key
1594 /** \anchor cds_intrusive_SkipListSet_rcu_extract
1595 The function searches an item with key equal to \p key in the set,
1596 unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
1597 If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr.
1599 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1601 RCU \p synchronize method can be called. RCU should NOT be locked.
1602 The function does not call the disposer for the item found.
1603 The disposer will be implicitly invoked when the returned object is destroyed or when
1604 its \p release() member function is called.
1607 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1611 typename skip_list::exempt_ptr ep( theList.extract( 5 ));
1616 // Dispose returned item.
1621 template <typename Q>
1622 exempt_ptr extract( Q const& key )
1624 return exempt_ptr( do_extract( key ));
1627 /// Extracts the item from the set with comparing functor \p pred
1629 The function is an analog of \p extract(Q const&) but \p pred predicate is used for key comparing.
1630 \p Less has the interface like \p std::less.
1631 \p pred must imply the same element order as the comparator used for building the set.
1633 template <typename Q, typename Less>
1634 exempt_ptr extract_with( Q const& key, Less pred )
1636 return exempt_ptr( do_extract_with( key, pred ));
1639 /// Extracts an item with minimal key from the list
1641 The function searches an item with minimal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item.
1642 If the skip-list is empty the function returns an empty \p exempt_ptr.
1644 RCU \p synchronize method can be called. RCU should NOT be locked.
1645 The function does not call the disposer for the item found.
1646 The disposer will be implicitly invoked when the returned object is destroyed or when
1647 its \p release() member function is manually called.
1650 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1654 typename skip_list::exempt_ptr ep(theList.extract_min());
1659 // Dispose returned item.
1664 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> minimum key.
1665 It means that the function gets leftmost item and tries to unlink it.
1666 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
1667 So, the function returns the item with minimum key at the moment of list traversing.
1669 exempt_ptr extract_min()
1671 return exempt_ptr( do_extract_min());
1674 /// Extracts an item with maximal key from the list
1676 The function searches an item with maximal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item.
1677 If the skip-list is empty the function returns an empty \p exempt_ptr.
1679 RCU \p synchronize method can be called. RCU should NOT be locked.
1680 The function does not call the disposer for the item found.
1681 The disposer will be implicitly invoked when the returned object is destroyed or when
1682 its \p release() member function is manually called.
1685 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1689 typename skip_list::exempt_ptr ep( theList.extract_max() );
1693 // Dispose returned item.
1698 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> maximal key.
1699 It means that the function gets rightmost item and tries to unlink it.
1700 During unlinking, a concurrent thread can insert an item with key greater than rightmost item's key.
1701 So, the function returns the item with maximum key at the moment of list traversing.
1703 exempt_ptr extract_max()
1705 return exempt_ptr( do_extract_max());
1708 /// Deletes the item from the set
1709 /** \anchor cds_intrusive_SkipListSet_rcu_erase
1710 The function searches an item with key equal to \p key in the set,
1711 unlinks it from the set, and returns \p true.
1712 If the item with key equal to \p key is not found the function return \p false.
1714 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1716 RCU \p synchronize method can be called. RCU should not be locked.
1718 template <typename Q>
1719 bool erase( const Q& key )
1721 return do_erase( key, key_comparator(), [](value_type const&) {} );
1724 /// Delete the item from the set with comparing functor \p pred
1726 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase "erase(Q const&)"
1727 but \p pred predicate is used for key comparing.
1728 \p Less has the interface like \p std::less.
1729 \p pred must imply the same element order as the comparator used for building the set.
1731 template <typename Q, typename Less>
1732 bool erase_with( const Q& key, Less pred )
1735 return do_erase( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type const&) {} );
1738 /// Deletes the item from the set
1739 /** \anchor cds_intrusive_SkipListSet_rcu_erase_func
1740 The function searches an item with key equal to \p key in the set,
1741 call \p f functor with item found, unlinks it from the set, and returns \p true.
1742 The \ref disposer specified in \p Traits class template parameter is called
1743 by garbage collector \p GC asynchronously.
1745 The \p Func interface is
1748 void operator()( value_type const& item );
1751 If the item with key equal to \p key is not found the function return \p false.
1753 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1755 RCU \p synchronize method can be called. RCU should not be locked.
1757 template <typename Q, typename Func>
1758 bool erase( Q const& key, Func f )
1760 return do_erase( key, key_comparator(), f );
1763 /// Delete the item from the set with comparing functor \p pred
1765 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase_func "erase(Q const&, Func)"
1766 but \p pred predicate is used for key comparing.
1767 \p Less has the interface like \p std::less.
1768 \p pred must imply the same element order as the comparator used for building the set.
1770 template <typename Q, typename Less, typename Func>
1771 bool erase_with( Q const& key, Less pred, Func f )
1774 return do_erase( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1778 /** @anchor cds_intrusive_SkipListSet_rcu_find_func
1779 The function searches the item with key equal to \p key and calls the functor \p f for item found.
1780 The interface of \p Func functor is:
1783 void operator()( value_type& item, Q& key );
1786 where \p item is the item found, \p key is the <tt>find</tt> function argument.
1788 The functor can change non-key fields of \p item. Note that the functor is only guarantee
1789 that \p item cannot be disposed during functor is executing.
1790 The functor does not serialize simultaneous access to the set \p item. If such access is
1791 possible you must provide your own synchronization schema on item level to exclude unsafe item modifications.
1793 The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor
1794 can modify both arguments.
1796 The function applies RCU lock internally.
1798 The function returns \p true if \p key is found, \p false otherwise.
1800 template <typename Q, typename Func>
1801 bool find( Q& key, Func f )
1803 return do_find_with( key, key_comparator(), f );
1806 template <typename Q, typename Func>
1807 bool find( Q const& key, Func f )
1809 return do_find_with( key, key_comparator(), f );
1813 /// Finds the key \p key with comparing functor \p pred
1815 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_func "find(Q&, Func)"
1816 but \p cmp is used for key comparison.
1817 \p Less functor has the interface like \p std::less.
1818 \p cmp must imply the same element order as the comparator used for building the set.
1820 template <typename Q, typename Less, typename Func>
1821 bool find_with( Q& key, Less pred, Func f )
1824 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1827 template <typename Q, typename Less, typename Func>
1828 bool find_with( Q const& key, Less pred, Func f )
1831 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1835 /// Checks whether the set contains \p key
1837 The function searches the item with key equal to \p key
1838 and returns \p true if it is found, and \p false otherwise.
1840 The function applies RCU lock internally.
1842 template <typename Q>
1843 bool contains( Q const& key )
1845 return do_find_with( key, key_comparator(), [](value_type& , Q const& ) {} );
1848 // Deprecated, use contains()
1849 template <typename Q>
1850 bool find( Q const& key )
1852 return contains( key );
1856 /// Checks whether the set contains \p key using \p pred predicate for searching
1858 The function is similar to <tt>contains( key )</tt> but \p pred is used for key comparing.
1859 \p Less functor has the interface like \p std::less.
1860 \p Less must imply the same element order as the comparator used for building the set.
1862 template <typename Q, typename Less>
1863 bool contains( Q const& key, Less pred )
1866 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type& , Q const& ) {} );
1869 // Deprecated, use contains()
1870 template <typename Q, typename Less>
1871 bool find_with( Q const& key, Less pred )
1873 return contains( key, pred );
1877 /// Finds \p key and return the item found
1878 /** \anchor cds_intrusive_SkipListSet_rcu_get
1879 The function searches the item with key equal to \p key and returns a \p raw_ptr object pointed to item found.
1880 If \p key is not found it returns empty \p raw_ptr.
1882 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1884 RCU should be locked before call of this function.
1885 Returned item is valid only while RCU is locked:
1887 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1890 typename skip_list::raw_ptr pVal;
1893 skip_list::rcu_lock lock;
1895 pVal = theList.get( 5 );
1901 // You can manually release pVal after RCU-locked section
1905 template <typename Q>
1906 raw_ptr get( Q const& key )
1908 assert( gc::is_locked());
1911 value_type * pFound;
1912 if ( do_find_with( key, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos ))
1913 return raw_ptr( pFound, raw_ptr_disposer( pos ));
1914 return raw_ptr( raw_ptr_disposer( pos ));
1917 /// Finds \p key and return the item found
1919 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_get "get(Q const&)"
1920 but \p pred is used for comparing the keys.
1922 \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q
1924 \p pred must imply the same element order as the comparator used for building the set.
1926 template <typename Q, typename Less>
1927 raw_ptr get_with( Q const& key, Less pred )
1930 assert( gc::is_locked());
1932 value_type * pFound = nullptr;
1934 if ( do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(),
1935 [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos ))
1937 return raw_ptr( pFound, raw_ptr_disposer( pos ));
1939 return raw_ptr( raw_ptr_disposer( pos ));
1942 /// Returns item count in the set
1944 The value returned depends on item counter type provided by \p Traits template parameter.
1945 For \p atomicity::empty_item_counter the function always returns 0.
1946 Therefore, the function is not suitable for checking the set emptiness, use \p empty()
1947 member function for this purpose.
1951 return m_ItemCounter;
1954 /// Checks if the set is empty
1957 return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr;
1960 /// Clears the set (not atomic)
1962 The function unlink all items from the set.
1963 The function is not atomic, thus, in multi-threaded environment with parallel insertions
1967 assert( set.empty() );
1969 the assertion could be raised.
1971 For each item the \p disposer will be called automatically after unlinking.
1976 while ( (ep = extract_min()) );
1979 /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32.
1980 static CDS_CONSTEXPR unsigned int max_height() CDS_NOEXCEPT
1982 return c_nMaxHeight;
1985 /// Returns const reference to internal statistics
1986 stat const& statistics() const
1992 }} // namespace cds::intrusive
1995 #endif // #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H