3 #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H
4 #define CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H
8 #include <cds/intrusive/details/skip_list_base.h>
9 #include <cds/opt/compare.h>
10 #include <cds/urcu/details/check_deadlock.h>
11 #include <cds/details/binary_functor_wrapper.h>
12 #include <cds/urcu/exempt_ptr.h>
13 #include <cds/urcu/raw_ptr.h>
14 #include <cds/intrusive/details/raw_ptr_disposer.h>
16 namespace cds { namespace intrusive {
21 template <class RCU, typename Tag>
22 class node< cds::urcu::gc< RCU >, Tag >
25 typedef cds::urcu::gc< RCU > gc; ///< Garbage collector
26 typedef Tag tag ; ///< tag
29 // bit 0 - the item is logically deleted
30 // bit 1 - the item is extracted (only for level 0)
31 typedef cds::details::marked_ptr<node, 3> marked_ptr; ///< marked pointer
32 typedef atomics::atomic< marked_ptr > atomic_marked_ptr; ///< atomic marked pointer
33 typedef atomic_marked_ptr tower_item_type;
36 atomic_marked_ptr m_pNext; ///< Next item in bottom-list (list at level 0)
38 node * m_pDelChain; ///< Deleted node chain (local for a thread)
40 bool volatile m_bLinked;
41 bool volatile m_bUnlinked;
44 unsigned int m_nHeight; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1.
45 atomic_marked_ptr * m_arrNext; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr
48 /// Constructs a node of height 1 (a bottom-list node)
51 , m_pDelChain( nullptr )
54 , m_bUnlinked( false )
57 , m_arrNext( nullptr )
63 assert( !m_bLinked || m_bUnlinked );
67 /// Constructs a node of height \p nHeight
68 void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower )
70 assert( nHeight > 0 );
71 assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node
72 || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0
75 m_arrNext = nextTower;
79 atomic_marked_ptr * release_tower()
81 atomic_marked_ptr * pTower = m_arrNext;
87 atomic_marked_ptr * get_tower() const
94 for ( unsigned int nLevel = 1; nLevel < m_nHeight; ++nLevel )
95 next(nLevel).store( marked_ptr(), atomics::memory_order_relaxed );
98 /// Access to element of next pointer array
99 atomic_marked_ptr& next( unsigned int nLevel )
101 assert( nLevel < height() );
102 assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr) );
104 return nLevel ? m_arrNext[nLevel - 1] : m_pNext;
107 /// Access to element of next pointer array (const version)
108 atomic_marked_ptr const& next( unsigned int nLevel ) const
110 assert( nLevel < height() );
111 assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr );
113 return nLevel ? m_arrNext[nLevel - 1] : m_pNext;
116 /// Access to element of next pointer array (same as \ref next function)
117 atomic_marked_ptr& operator[]( unsigned int nLevel )
119 return next( nLevel );
122 /// Access to element of next pointer array (same as \ref next function)
123 atomic_marked_ptr const& operator[]( unsigned int nLevel ) const
125 return next( nLevel );
128 /// Height of the node
129 unsigned int height() const
134 /// Clears internal links
137 assert( m_arrNext == nullptr );
138 m_pNext.store( marked_ptr(), atomics::memory_order_release );
139 m_pDelChain = nullptr;
142 bool is_cleared() const
144 return m_pNext == atomic_marked_ptr()
145 && m_arrNext == nullptr
149 } // namespace skip_list
153 namespace skip_list { namespace details {
155 template <class RCU, typename NodeTraits, typename BackOff, bool IsConst>
156 class iterator< cds::urcu::gc< RCU >, NodeTraits, BackOff, IsConst >
159 typedef cds::urcu::gc< RCU > gc;
160 typedef NodeTraits node_traits;
161 typedef BackOff back_off;
162 typedef typename node_traits::node_type node_type;
163 typedef typename node_traits::value_type value_type;
164 static bool const c_isConst = IsConst;
166 typedef typename std::conditional< c_isConst, value_type const &, value_type &>::type value_ref;
169 typedef typename node_type::marked_ptr marked_ptr;
170 typedef typename node_type::atomic_marked_ptr atomic_marked_ptr;
177 // RCU should be locked before iterating!!!
178 assert( gc::is_locked() );
183 if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
184 // Current node is marked as deleted. So, its next pointer can point to anything
185 // In this case we interrupt our iteration and returns end() iterator.
190 marked_ptr p = m_pNode->next(0).load( atomics::memory_order_relaxed );
191 node_type * pp = p.ptr();
193 // p is marked as deleted. Spin waiting for physical removal
197 else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits() ) {
198 // p is marked as deleted. Spin waiting for physical removal
208 public: // for internal use only!!!
209 iterator( node_type& refHead )
212 // RCU should be locked before iterating!!!
213 assert( gc::is_locked() );
218 marked_ptr p = refHead.next(0).load( atomics::memory_order_relaxed );
224 node_type * pp = p.ptr();
225 // Logically deleted node is marked from highest level
226 if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
239 // RCU should be locked before iterating!!!
240 assert( gc::is_locked() );
243 iterator( iterator const& s)
244 : m_pNode( s.m_pNode )
246 // RCU should be locked before iterating!!!
247 assert( gc::is_locked() );
250 value_type * operator ->() const
252 assert( m_pNode != nullptr );
253 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
255 return node_traits::to_value_ptr( m_pNode );
258 value_ref operator *() const
260 assert( m_pNode != nullptr );
261 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
263 return *node_traits::to_value_ptr( m_pNode );
267 iterator& operator ++()
273 iterator& operator = (const iterator& src)
275 m_pNode = src.m_pNode;
279 template <typename Bkoff, bool C>
280 bool operator ==(iterator<gc, node_traits, Bkoff, C> const& i ) const
282 return m_pNode == i.m_pNode;
284 template <typename Bkoff, bool C>
285 bool operator !=(iterator<gc, node_traits, Bkoff, C> const& i ) const
287 return !( *this == i );
290 }} // namespace skip_list::details
293 /// Lock-free skip-list set (template specialization for \ref cds_urcu_desc "RCU")
294 /** @ingroup cds_intrusive_map
295 @anchor cds_intrusive_SkipListSet_rcu
297 The implementation of well-known probabilistic data structure called skip-list
298 invented by W.Pugh in his papers:
299 - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees
300 - [1990] W.Pugh A Skip List Cookbook
302 A skip-list is a probabilistic data structure that provides expected logarithmic
303 time search without the need of rebalance. The skip-list is a collection of sorted
304 linked list. Nodes are ordered by key. Each node is linked into a subset of the lists.
305 Each list has a level, ranging from 0 to 32. The bottom-level list contains
306 all the nodes, and each higher-level list is a sublist of the lower-level lists.
307 Each node is created with a random top level (with a random height), and belongs
308 to all lists up to that level. The probability that a node has the height 1 is 1/2.
309 The probability that a node has the height N is 1/2 ** N (more precisely,
310 the distribution depends on an random generator provided, but our generators
313 The lock-free variant of skip-list is implemented according to book
314 - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming",
315 chapter 14.4 "A Lock-Free Concurrent Skiplist".
317 <b>Template arguments</b>:
318 - \p RCU - one of \ref cds_urcu_gc "RCU type"
319 - \p T - type to be stored in the list. The type must be based on \p skip_list::node (for \p skip_list::base_hook)
320 or it must have a member of type \p skip_list::node (for \p skip_list::member_hook).
321 - \p Traits - set traits, default is \p skip_list::traits
322 It is possible to declare option-based list with \p cds::intrusive::skip_list::make_traits metafunction
323 instead of \p Traits template argument.
325 @note Before including <tt><cds/intrusive/skip_list_rcu.h></tt> you should include appropriate RCU header file,
326 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
330 The class supports a forward iterator (\ref iterator and \ref const_iterator).
331 The iteration is ordered.
333 You may iterate over skip-list set items only under RCU lock.
334 Only in this case the iterator is thread-safe since
335 while RCU is locked any set's item cannot be reclaimed.
337 @note The requirement of RCU lock during iterating means that any type of modification of the skip list
338 (i.e. inserting, erasing and so on) is not possible.
340 @warning The iterator object cannot be passed between threads.
342 Example how to use skip-list set iterators:
344 // First, you should include the header for RCU type you have chosen
345 #include <cds/urcu/general_buffered.h>
346 #include <cds/intrusive/skip_list_rcu.h>
348 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
354 // Traits for your skip-list.
355 // At least, you should define cds::opt::less or cds::opt::compare for Foo struct
356 struct my_traits: public cds::intrusive::skip_list::traits
360 typedef cds::intrusive::SkipListSet< rcu_type, Foo, my_traits > my_skiplist_set;
362 my_skiplist_set theSet;
368 // Apply RCU locking manually
369 typename rcu_type::scoped_lock sl;
371 for ( auto it = theList.begin(); it != theList.end(); ++it ) {
375 // rcu_type::scoped_lock destructor releases RCU lock implicitly
379 The iterator class supports the following minimalistic interface:
386 iterator( iterator const& s);
388 value_type * operator ->() const;
389 value_type& operator *() const;
392 iterator& operator ++();
395 iterator& operator = (const iterator& src);
397 bool operator ==(iterator const& i ) const;
398 bool operator !=(iterator const& i ) const;
401 Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced.
405 You should incorporate skip_list::node into your struct \p T and provide
406 appropriate skip_list::traits::hook in your \p Traits template parameters. Usually, for \p Traits you
407 define a struct based on \p skip_list::traits.
409 Example for <tt>cds::urcu::general_buffered<></tt> RCU and base hook:
411 // First, you should include the header for RCU type you have chosen
412 #include <cds/urcu/general_buffered.h>
414 // Include RCU skip-list specialization
415 #include <cds/intrusive/skip_list_rcu.h>
418 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
420 // Data stored in skip list
421 struct my_data: public cds::intrusive::skip_list::node< rcu_type >
430 // my_data compare functor
432 int operator()( const my_data& d1, const my_data& d2 )
434 return d1.strKey.compare( d2.strKey );
437 int operator()( const my_data& d, const std::string& s )
439 return d.strKey.compare(s);
442 int operator()( const std::string& s, const my_data& d )
444 return s.compare( d.strKey );
449 struct my_traits: public cds::intrusive::skip_list::traits
451 typedef cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > hook;
452 typedef my_data_cmp compare;
455 // Declare skip-list set type
456 typedef cds::intrusive::SkipListSet< rcu_type, my_data, my_traits > traits_based_set;
459 Equivalent option-based code:
461 #include <cds/urcu/general_buffered.h>
462 #include <cds/intrusive/skip_list_rcu.h>
464 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
473 // Declare option-based skip-list set
474 typedef cds::intrusive::SkipListSet< rcu_type
476 , typename cds::intrusive::skip_list::make_traits<
477 cds::intrusive::opt::hook< cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > >
478 ,cds::intrusive::opt::compare< my_data_cmp >
487 #ifdef CDS_DOXYGEN_INVOKED
488 ,typename Traits = skip_list::traits
493 class SkipListSet< cds::urcu::gc< RCU >, T, Traits >
496 typedef cds::urcu::gc< RCU > gc; ///< Garbage collector
497 typedef T value_type; ///< type of value stored in the skip-list
498 typedef Traits traits; ///< Traits template parameter
500 typedef typename traits::hook hook; ///< hook type
501 typedef typename hook::node_type node_type; ///< node type
503 # ifdef CDS_DOXYGEN_INVOKED
504 typedef implementation_defined key_comparator ; ///< key comparison functor based on \p Traits::compare and \p Traits::less
506 typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator;
509 typedef typename traits::disposer disposer; ///< disposer
510 typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits
512 typedef typename traits::item_counter item_counter; ///< Item counting policy used
513 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
514 typedef typename traits::random_level_generator random_level_generator; ///< random level generator
515 typedef typename traits::allocator allocator_type; ///< allocator for maintaining array of next pointers of the node
516 typedef typename traits::back_off back_off; ///< Back-off strategy
517 typedef typename traits::stat stat; ///< internal statistics type
518 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
519 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
520 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking
523 /// Max node height. The actual node height should be in range <tt>[0 .. c_nMaxHeight)</tt>
525 The max height is specified by \ref skip_list::random_level_generator "random level generator" constant \p m_nUpperBound
526 but it should be no more than 32 (\ref skip_list::c_nHeightLimit).
528 static unsigned int const c_nMaxHeight = std::conditional<
529 (random_level_generator::c_nUpperBound <= skip_list::c_nHeightLimit),
530 std::integral_constant< unsigned int, random_level_generator::c_nUpperBound >,
531 std::integral_constant< unsigned int, skip_list::c_nHeightLimit >
535 static unsigned int const c_nMinHeight = 5;
536 typedef cds::intrusive::skip_list::implementation_tag implementation_tag;
540 typedef typename node_type::atomic_marked_ptr atomic_node_ptr ; ///< Atomic marked node pointer
541 typedef typename node_type::marked_ptr marked_node_ptr ; ///< Node marked pointer
545 typedef skip_list::details::intrusive_node_builder< node_type, atomic_node_ptr, allocator_type > intrusive_node_builder;
547 typedef typename std::conditional<
548 std::is_same< typename traits::internal_node_builder, cds::opt::none >::value
549 ,intrusive_node_builder
550 ,typename traits::internal_node_builder
551 >::type node_builder;
553 typedef std::unique_ptr< node_type, typename node_builder::node_disposer > scoped_node_ptr;
555 static void dispose_node( value_type * pVal )
559 typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal) );
565 void operator()( value_type * pVal )
567 dispose_node( pVal );
571 static void dispose_chain( node_type * pChain )
574 assert( !gc::is_locked() );
576 auto f = [&pChain]() -> cds::urcu::retired_ptr {
577 node_type * p = pChain;
579 pChain = p->m_pDelChain;
580 return cds::urcu::make_retired_ptr<node_disposer>( node_traits::to_value_ptr( p ));
582 return cds::urcu::make_retired_ptr<node_disposer>( static_cast<value_type *>(nullptr));
584 gc::batch_retire(std::ref(f));
589 node_type * pPrev[ c_nMaxHeight ];
590 node_type * pSucc[ c_nMaxHeight ];
591 node_type * pNext[ c_nMaxHeight ];
594 node_type * pDelChain;
597 : pDelChain( nullptr )
602 dispose_chain( pDelChain );
605 void dispose( node_type * p )
607 assert( p != nullptr );
608 assert( p->m_pDelChain == nullptr );
610 p->m_pDelChain = pDelChain;
615 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy;
619 skip_list::details::head_node< node_type > m_Head; ///< head tower (max height)
621 item_counter m_ItemCounter; ///< item counter
622 random_level_generator m_RandomLevelGen; ///< random level generator instance
623 atomics::atomic<unsigned int> m_nHeight; ///< estimated high level
624 atomics::atomic<node_type *> m_pDeferredDelChain ; ///< Deferred deleted node chain
625 mutable stat m_Stat; ///< internal statistics
629 unsigned int random_level()
631 // Random generator produces a number from range [0..31]
632 // We need a number from range [1..32]
633 return m_RandomLevelGen() + 1;
636 template <typename Q>
637 node_type * build_node( Q v )
639 return node_builder::make_tower( v, m_RandomLevelGen );
644 using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, node_disposer, void >; ///< pointer to extracted node
648 struct chain_disposer {
649 void operator()( node_type * pChain ) const
651 dispose_chain( pChain );
654 typedef cds::intrusive::details::raw_ptr_disposer< gc, node_type, chain_disposer> raw_ptr_disposer;
658 /// Result of \p get(), \p get_with() functions - pointer to the node found
659 typedef cds::urcu::raw_ptr< gc, value_type, raw_ptr_disposer > raw_ptr;
664 bool is_extracted( marked_node_ptr const p ) const
666 return (p.bits() & 2) != 0;
669 template <typename Q, typename Compare >
670 bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound )
672 assert( gc::is_locked() );
675 marked_node_ptr pSucc;
676 marked_node_ptr pCur;
680 pPred = m_Head.head();
682 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
685 pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed );
687 // pCur.bits() means that pPred is logically deleted
691 if ( pCur.ptr() == nullptr ) {
692 // end of the list at level nLevel - goto next level
696 // pSucc contains deletion mark for pCur
697 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed );
699 if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() )
702 if ( pSucc.bits() ) {
703 // pCur is marked, i.e. logically deleted.
704 marked_node_ptr p( pCur.ptr() );
705 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
706 memory_model::memory_order_release, atomics::memory_order_relaxed ))
710 pCur->m_bUnlinked = true;
713 if ( !is_extracted( pSucc )) {
714 // We cannot free the node at this moment since RCU is locked
715 // Link deleted nodes to a chain to free later
716 pos.dispose( pCur.ptr() );
717 m_Stat.onEraseWhileFind();
720 m_Stat.onExtractWhileFind();
727 nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
730 else if ( nCmp == 0 && bStopIfFound )
738 pos.pPrev[ nLevel ] = pPred;
739 pos.pSucc[ nLevel ] = pCur.ptr();
746 pos.pCur = pCur.ptr();
747 return pCur.ptr() && nCmp == 0;
750 bool find_min_position( position& pos )
752 assert( gc::is_locked() );
755 marked_node_ptr pSucc;
756 marked_node_ptr pCur;
759 pPred = m_Head.head();
761 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
763 pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed );
764 // pCur.bits() means that pPred is logically deleted
765 // head cannot be deleted
766 assert( pCur.bits() == 0 );
770 // pSucc contains deletion mark for pCur
771 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed );
773 if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() )
776 if ( pSucc.bits() ) {
777 // pCur is marked, i.e. logically deleted.
778 marked_node_ptr p( pCur.ptr() );
779 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
780 memory_model::memory_order_release, atomics::memory_order_relaxed ))
784 pCur->m_bUnlinked = true;
787 if ( !is_extracted( pSucc )) {
788 // We cannot free the node at this moment since RCU is locked
789 // Link deleted nodes to a chain to free later
790 pos.dispose( pCur.ptr() );
791 m_Stat.onEraseWhileFind();
794 m_Stat.onExtractWhileFind();
803 pos.pPrev[ nLevel ] = pPred;
804 pos.pSucc[ nLevel ] = pCur.ptr();
806 return (pos.pCur = pCur.ptr()) != nullptr;
809 bool find_max_position( position& pos )
811 assert( gc::is_locked() );
814 marked_node_ptr pSucc;
815 marked_node_ptr pCur;
818 pPred = m_Head.head();
820 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
823 pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed );
825 // pCur.bits() means that pPred is logically deleted
829 if ( pCur.ptr() == nullptr ) {
830 // end of the list at level nLevel - goto next level
834 // pSucc contains deletion mark for pCur
835 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed );
837 if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() )
840 if ( pSucc.bits() ) {
841 // pCur is marked, i.e. logically deleted.
842 marked_node_ptr p( pCur.ptr() );
843 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
844 memory_model::memory_order_release, atomics::memory_order_relaxed ))
848 pCur->m_bUnlinked = true;
851 if ( !is_extracted( pSucc )) {
852 // We cannot free the node at this moment since RCU is locked
853 // Link deleted nodes to a chain to free later
854 pos.dispose( pCur.ptr() );
855 m_Stat.onEraseWhileFind();
858 m_Stat.onExtractWhileFind();
873 pos.pPrev[ nLevel ] = pPred;
874 pos.pSucc[ nLevel ] = pCur.ptr();
877 return (pos.pCur = pCur.ptr()) != nullptr;
880 template <typename Func>
881 bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f )
883 assert( gc::is_locked() );
885 unsigned int nHeight = pNode->height();
886 pNode->clear_tower();
889 marked_node_ptr p( pos.pSucc[0] );
890 pNode->next( 0 ).store( p, memory_model::memory_order_release );
891 if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed )) {
895 pNode->m_bLinked = true;
900 for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) {
903 marked_node_ptr q( pos.pSucc[ nLevel ]);
904 if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
905 // pNode has been marked as removed while we are inserting it
908 m_Stat.onLogicDeleteWhileInsert();
912 if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ) )
915 // Renew insert position
916 m_Stat.onRenewInsertPosition();
917 if ( !find_position( val, pos, key_comparator(), false )) {
918 // The node has been deleted while we are inserting it
919 m_Stat.onNotFoundWhileInsert();
927 template <typename Func>
928 bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract )
930 assert( pDel != nullptr );
931 assert( gc::is_locked() );
933 marked_node_ptr pSucc;
935 // logical deletion (marking)
936 for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) {
937 pSucc = pDel->next(nLevel).load( memory_model::memory_order_relaxed );
940 || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
947 pSucc = pDel->next(0).load( memory_model::memory_order_relaxed );
952 int const nMask = bExtract ? 3 : 1;
953 if ( pDel->next(0).compare_exchange_strong( pSucc, pSucc | nMask, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
955 f( *node_traits::to_value_ptr( pDel ));
960 for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
961 if ( !pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( pSucc,
962 marked_node_ptr( pDel->next(nLevel).load(memory_model::memory_order_relaxed).ptr() ),
963 memory_model::memory_order_release, atomics::memory_order_relaxed) )
966 find_position( *node_traits::to_value_ptr(pDel), pos, key_comparator(), false );
968 m_Stat.onSlowExtract();
970 m_Stat.onSlowErase();
972 assert( pDel->m_bUnlinked );
979 pDel->m_bUnlinked = true;
982 // We cannot free the node at this moment since RCU is locked
983 // Link deleted nodes to a chain to free later
985 m_Stat.onFastErase();
988 m_Stat.onFastExtract();
995 enum finsd_fastpath_result {
997 find_fastpath_not_found,
1000 template <typename Q, typename Compare, typename Func>
1001 finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f ) const
1004 marked_node_ptr pCur;
1005 marked_node_ptr pSucc;
1006 marked_node_ptr pNull;
1010 pPred = m_Head.head();
1011 for ( int nLevel = static_cast<int>(m_nHeight.load(memory_model::memory_order_relaxed) - 1); nLevel >= 0; --nLevel ) {
1012 pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire );
1013 if ( pCur == pNull )
1016 while ( pCur != pNull ) {
1017 if ( pCur.bits() ) {
1018 // Wait until pCur is removed
1019 unsigned int nAttempt = 0;
1020 while ( pCur.bits() && nAttempt++ < 16 ) {
1022 pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire );
1026 if ( pCur.bits() ) {
1027 // Maybe, we are on deleted node sequence
1028 // Abort searching, try slow-path
1029 return find_fastpath_abort;
1034 int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
1037 pCur = pCur->next(nLevel).load( memory_model::memory_order_acquire );
1039 else if ( nCmp == 0 ) {
1041 f( *node_traits::to_value_ptr( pCur.ptr() ), val );
1042 return find_fastpath_found;
1044 else // pCur > val - go down
1050 return find_fastpath_not_found;
1053 template <typename Q, typename Compare, typename Func>
1054 bool find_slowpath( Q& val, Compare cmp, Func f, position& pos )
1056 if ( find_position( val, pos, cmp, true )) {
1057 assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 );
1059 f( *node_traits::to_value_ptr( pos.pCur ), val );
1066 template <typename Q, typename Compare, typename Func>
1067 bool do_find_with( Q& val, Compare cmp, Func f )
1070 return do_find_with( val, cmp, f, pos );
1073 template <typename Q, typename Compare, typename Func>
1074 bool do_find_with( Q& val, Compare cmp, Func f, position& pos )
1081 switch ( find_fastpath( val, cmp, f )) {
1082 case find_fastpath_found:
1083 m_Stat.onFindFastSuccess();
1085 case find_fastpath_not_found:
1086 m_Stat.onFindFastFailed();
1092 if ( find_slowpath( val, cmp, f, pos )) {
1093 m_Stat.onFindSlowSuccess();
1097 m_Stat.onFindSlowFailed();
1104 template <typename Q, typename Compare, typename Func>
1105 bool do_erase( Q const& val, Compare cmp, Func f )
1107 check_deadlock_policy::check();
1115 if ( !find_position( val, pos, cmp, false ) ) {
1116 m_Stat.onEraseFailed();
1120 node_type * pDel = pos.pCur;
1121 assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 );
1123 unsigned int nHeight = pDel->height();
1124 if ( try_remove_at( pDel, pos, f, false )) {
1126 m_Stat.onRemoveNode( nHeight );
1127 m_Stat.onEraseSuccess();
1131 m_Stat.onEraseFailed();
1140 template <typename Q, typename Compare>
1141 value_type * do_extract_key( Q const& key, Compare cmp, position& pos )
1143 // RCU should be locked!!!
1144 assert( gc::is_locked() );
1148 if ( !find_position( key, pos, cmp, false ) ) {
1149 m_Stat.onExtractFailed();
1154 assert( cmp( *node_traits::to_value_ptr( pDel ), key ) == 0 );
1156 unsigned int const nHeight = pDel->height();
1158 if ( try_remove_at( pDel, pos, [](value_type const&) {}, true )) {
1160 m_Stat.onRemoveNode( nHeight );
1161 m_Stat.onExtractSuccess();
1164 m_Stat.onExtractFailed();
1169 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1172 template <typename Q>
1173 value_type * do_extract( Q const& key )
1175 check_deadlock_policy::check();
1176 value_type * pDel = nullptr;
1180 pDel = do_extract_key( key, key_comparator(), pos );
1186 template <typename Q, typename Less>
1187 value_type * do_extract_with( Q const& key, Less pred )
1190 check_deadlock_policy::check();
1191 value_type * pDel = nullptr;
1195 pDel = do_extract_key( key, cds::opt::details::make_comparator_from_less<Less>(), pos );
1201 value_type * do_extract_min()
1203 assert( !gc::is_locked() );
1211 if ( !find_min_position( pos ) ) {
1212 m_Stat.onExtractMinFailed();
1217 unsigned int const nHeight = pDel->height();
1219 if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
1221 m_Stat.onRemoveNode( nHeight );
1222 m_Stat.onExtractMinSuccess();
1225 m_Stat.onExtractMinFailed();
1231 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1234 value_type * do_extract_max()
1236 assert( !gc::is_locked() );
1244 if ( !find_max_position( pos ) ) {
1245 m_Stat.onExtractMaxFailed();
1250 unsigned int const nHeight = pDel->height();
1252 if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
1254 m_Stat.onRemoveNode( nHeight );
1255 m_Stat.onExtractMaxSuccess();
1258 m_Stat.onExtractMaxFailed();
1264 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1267 void increase_height( unsigned int nHeight )
1269 unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed );
1270 if ( nCur < nHeight )
1271 m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed );
1276 /// Default constructor
1278 : m_Head( c_nMaxHeight )
1279 , m_nHeight( c_nMinHeight )
1280 , m_pDeferredDelChain( nullptr )
1282 static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
1284 // Barrier for head node
1285 atomics::atomic_thread_fence( memory_model::memory_order_release );
1288 /// Clears and destructs the skip-list
1296 typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator;
1298 /// Const iterator type
1299 typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator;
1301 /// Returns a forward iterator addressing the first element in a set
1304 return iterator( *m_Head.head() );
1307 /// Returns a forward const iterator addressing the first element in a set
1308 const_iterator begin() const
1310 return const_iterator( *m_Head.head() );
1313 /// Returns a forward const iterator addressing the first element in a set
1314 const_iterator cbegin() const
1316 return const_iterator( *m_Head.head() );
1319 /// Returns a forward iterator that addresses the location succeeding the last element in a set.
1325 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
1326 const_iterator end() const
1328 return const_iterator();
1331 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
1332 const_iterator cend() const
1334 return const_iterator();
1338 /// Inserts new node
1340 The function inserts \p val in the set if it does not contain
1341 an item with key equal to \p val.
1343 The function applies RCU lock internally.
1345 Returns \p true if \p val is placed into the set, \p false otherwise.
1347 bool insert( value_type& val )
1349 return insert( val, []( value_type& ) {} );
1352 /// Inserts new node
1354 This function is intended for derived non-intrusive containers.
1356 The function allows to split creating of new item into two part:
1357 - create item with key only
1358 - insert new item into the set
1359 - if inserting is success, calls \p f functor to initialize value-field of \p val.
1361 The functor signature is:
1363 void func( value_type& val );
1365 where \p val is the item inserted. User-defined functor \p f should guarantee that during changing
1366 \p val no any other changes could be made on this set's item by concurrent threads.
1367 The user-defined functor is called only if the inserting is success.
1369 RCU \p synchronize method can be called. RCU should not be locked.
1371 template <typename Func>
1372 bool insert( value_type& val, Func f )
1374 check_deadlock_policy::check();
1380 node_type * pNode = node_traits::to_node_ptr( val );
1381 scoped_node_ptr scp( pNode );
1382 unsigned int nHeight = pNode->height();
1383 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
1384 bool bTowerMade = false;
1390 bool bFound = find_position( val, pos, key_comparator(), true );
1392 // scoped_node_ptr deletes the node tower if we create it
1396 m_Stat.onInsertFailed();
1402 build_node( pNode );
1403 nHeight = pNode->height();
1408 if ( !insert_at_position( val, pNode, pos, f )) {
1409 m_Stat.onInsertRetry();
1413 increase_height( nHeight );
1415 m_Stat.onAddNode( nHeight );
1416 m_Stat.onInsertSuccess();
1426 /// Ensures that the \p val exists in the set
1428 The operation performs inserting or changing data with lock-free manner.
1430 If the item \p val is not found in the set, then \p val is inserted into the set.
1431 Otherwise, the functor \p func is called with item found.
1432 The functor signature is:
1434 void func( bool bNew, value_type& item, value_type& val );
1437 - \p bNew - \p true if the item has been inserted, \p false otherwise
1438 - \p item - item of the set
1439 - \p val - argument \p val passed into the \p %ensure() function
1440 If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
1441 refer to the same thing.
1443 The functor can change non-key fields of the \p item; however, \p func must guarantee
1444 that during changing no any other modifications could be made on this item by concurrent threads.
1446 RCU \p synchronize method can be called. RCU should not be locked.
1448 Returns std::pair<bool, bool> where \p first is \p true if operation is successfull,
1449 \p second is \p true if new item has been added or \p false if the item with \p key
1450 already is in the set.
1452 @warning See \ref cds_intrusive_item_creating "insert item troubleshooting"
1454 template <typename Func>
1455 std::pair<bool, bool> ensure( value_type& val, Func func )
1457 check_deadlock_policy::check();
1460 std::pair<bool, bool> bRet( true, false );
1463 node_type * pNode = node_traits::to_node_ptr( val );
1464 scoped_node_ptr scp( pNode );
1465 unsigned int nHeight = pNode->height();
1466 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
1467 bool bTowerMade = false;
1472 bool bFound = find_position( val, pos, key_comparator(), true );
1474 // scoped_node_ptr deletes the node tower if we create it before
1478 func( false, *node_traits::to_value_ptr(pos.pCur), val );
1479 m_Stat.onEnsureExist();
1484 build_node( pNode );
1485 nHeight = pNode->height();
1490 if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { func( true, item, item ); })) {
1491 m_Stat.onInsertRetry();
1495 increase_height( nHeight );
1498 m_Stat.onAddNode( nHeight );
1499 m_Stat.onEnsureNew();
1508 /// Unlinks the item \p val from the set
1510 The function searches the item \p val in the set and unlink it from the set
1511 if it is found and is equal to \p val.
1513 Difference between \p erase() and \p %unlink() functions: \p erase() finds <i>a key</i>
1514 and deletes the item found. \p %unlink() searches an item by key and deletes it
1515 only if \p val is an item of that set, i.e. the pointer to item found
1516 is equal to <tt> &val </tt>.
1518 RCU \p synchronize method can be called. RCU should not be locked.
1520 The \ref disposer specified in \p Traits class template parameter is called
1521 by garbage collector \p GC asynchronously.
1523 The function returns \p true if success and \p false otherwise.
1525 bool unlink( value_type& val )
1527 check_deadlock_policy::check();
1535 if ( !find_position( val, pos, key_comparator(), false ) ) {
1536 m_Stat.onUnlinkFailed();
1540 node_type * pDel = pos.pCur;
1541 assert( key_comparator()( *node_traits::to_value_ptr( pDel ), val ) == 0 );
1543 unsigned int nHeight = pDel->height();
1545 if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos, [](value_type const&) {}, false )) {
1547 m_Stat.onRemoveNode( nHeight );
1548 m_Stat.onUnlinkSuccess();
1552 m_Stat.onUnlinkFailed();
1561 /// Extracts the item from the set with specified \p key
1562 /** \anchor cds_intrusive_SkipListSet_rcu_extract
1563 The function searches an item with key equal to \p key in the set,
1564 unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
1565 If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr.
1567 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1569 RCU \p synchronize method can be called. RCU should NOT be locked.
1570 The function does not call the disposer for the item found.
1571 The disposer will be implicitly invoked when the returned object is destroyed or when
1572 its \p release() member function is called.
1575 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1579 typename skip_list::exempt_ptr ep( theList.extract( 5 ));
1584 // Dispose returned item.
1589 template <typename Q>
1590 exempt_ptr extract( Q const& key )
1592 return exempt_ptr( do_extract( key ));
1595 /// Extracts the item from the set with comparing functor \p pred
1597 The function is an analog of \p extract(Q const&) but \p pred predicate is used for key comparing.
1598 \p Less has the interface like \p std::less.
1599 \p pred must imply the same element order as the comparator used for building the set.
1601 template <typename Q, typename Less>
1602 exempt_ptr extract_with( Q const& key, Less pred )
1604 return exempt_ptr( do_extract_with( key, pred ));
1607 /// Extracts an item with minimal key from the list
1609 The function searches an item with minimal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item.
1610 If the skip-list is empty the function returns an empty \p exempt_ptr.
1612 RCU \p synchronize method can be called. RCU should NOT be locked.
1613 The function does not call the disposer for the item found.
1614 The disposer will be implicitly invoked when the returned object is destroyed or when
1615 its \p release() member function is manually called.
1618 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1622 typename skip_list::exempt_ptr ep(theList.extract_min());
1627 // Dispose returned item.
1632 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> minimum key.
1633 It means that the function gets leftmost item and tries to unlink it.
1634 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
1635 So, the function returns the item with minimum key at the moment of list traversing.
1637 exempt_ptr extract_min()
1639 return exempt_ptr( do_extract_min());
1642 /// Extracts an item with maximal key from the list
1644 The function searches an item with maximal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item.
1645 If the skip-list is empty the function returns an empty \p exempt_ptr.
1647 RCU \p synchronize method can be called. RCU should NOT be locked.
1648 The function does not call the disposer for the item found.
1649 The disposer will be implicitly invoked when the returned object is destroyed or when
1650 its \p release() member function is manually called.
1653 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1657 typename skip_list::exempt_ptr ep( theList.extract_max() );
1661 // Dispose returned item.
1666 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> maximal key.
1667 It means that the function gets rightmost item and tries to unlink it.
1668 During unlinking, a concurrent thread can insert an item with key greater than rightmost item's key.
1669 So, the function returns the item with maximum key at the moment of list traversing.
1671 exempt_ptr extract_max()
1673 return exempt_ptr( do_extract_max());
1676 /// Deletes the item from the set
1677 /** \anchor cds_intrusive_SkipListSet_rcu_erase
1678 The function searches an item with key equal to \p key in the set,
1679 unlinks it from the set, and returns \p true.
1680 If the item with key equal to \p key is not found the function return \p false.
1682 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1684 RCU \p synchronize method can be called. RCU should not be locked.
1686 template <typename Q>
1687 bool erase( const Q& key )
1689 return do_erase( key, key_comparator(), [](value_type const&) {} );
1692 /// Delete the item from the set with comparing functor \p pred
1694 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase "erase(Q const&)"
1695 but \p pred predicate is used for key comparing.
1696 \p Less has the interface like \p std::less.
1697 \p pred must imply the same element order as the comparator used for building the set.
1699 template <typename Q, typename Less>
1700 bool erase_with( const Q& key, Less pred )
1703 return do_erase( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type const&) {} );
1706 /// Deletes the item from the set
1707 /** \anchor cds_intrusive_SkipListSet_rcu_erase_func
1708 The function searches an item with key equal to \p key in the set,
1709 call \p f functor with item found, unlinks it from the set, and returns \p true.
1710 The \ref disposer specified in \p Traits class template parameter is called
1711 by garbage collector \p GC asynchronously.
1713 The \p Func interface is
1716 void operator()( value_type const& item );
1719 If the item with key equal to \p key is not found the function return \p false.
1721 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1723 RCU \p synchronize method can be called. RCU should not be locked.
1725 template <typename Q, typename Func>
1726 bool erase( Q const& key, Func f )
1728 return do_erase( key, key_comparator(), f );
1731 /// Delete the item from the set with comparing functor \p pred
1733 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase_func "erase(Q const&, Func)"
1734 but \p pred predicate is used for key comparing.
1735 \p Less has the interface like \p std::less.
1736 \p pred must imply the same element order as the comparator used for building the set.
1738 template <typename Q, typename Less, typename Func>
1739 bool erase_with( Q const& key, Less pred, Func f )
1742 return do_erase( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1746 /** @anchor cds_intrusive_SkipListSet_rcu_find_func
1747 The function searches the item with key equal to \p key and calls the functor \p f for item found.
1748 The interface of \p Func functor is:
1751 void operator()( value_type& item, Q& key );
1754 where \p item is the item found, \p key is the <tt>find</tt> function argument.
1756 The functor can change non-key fields of \p item. Note that the functor is only guarantee
1757 that \p item cannot be disposed during functor is executing.
1758 The functor does not serialize simultaneous access to the set \p item. If such access is
1759 possible you must provide your own synchronization schema on item level to exclude unsafe item modifications.
1761 The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor
1762 can modify both arguments.
1764 The function applies RCU lock internally.
1766 The function returns \p true if \p key is found, \p false otherwise.
1768 template <typename Q, typename Func>
1769 bool find( Q& key, Func f )
1771 return do_find_with( key, key_comparator(), f );
1774 template <typename Q, typename Func>
1775 bool find( Q const& key, Func f )
1777 return do_find_with( key, key_comparator(), f );
1781 /// Finds the key \p key with comparing functor \p pred
1783 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_func "find(Q&, Func)"
1784 but \p cmp is used for key comparison.
1785 \p Less functor has the interface like \p std::less.
1786 \p cmp must imply the same element order as the comparator used for building the set.
1788 template <typename Q, typename Less, typename Func>
1789 bool find_with( Q& key, Less pred, Func f )
1792 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1795 template <typename Q, typename Less, typename Func>
1796 bool find_with( Q const& key, Less pred, Func f )
1799 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1804 /** @anchor cds_intrusive_SkipListSet_rcu_find_val
1805 The function searches the item with key equal to \p key
1806 and returns \p true if it is found, and \p false otherwise.
1808 The function applies RCU lock internally.
1810 template <typename Q>
1811 bool find( Q const& key )
1813 return do_find_with( key, key_comparator(), [](value_type& , Q const& ) {} );
1816 /// Finds \p key with comparing functor \p pred
1818 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_val "find(Q const&)"
1819 but \p pred is used for key compare.
1820 \p Less functor has the interface like \p std::less.
1821 \p pred must imply the same element order as the comparator used for building the set.
1823 template <typename Q, typename Less>
1824 bool find_with( Q const& key, Less pred )
1827 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type& , Q const& ) {} );
1830 /// Finds \p key and return the item found
1831 /** \anchor cds_intrusive_SkipListSet_rcu_get
1832 The function searches the item with key equal to \p key and returns a \p raw_ptr object pointed to item found.
1833 If \p key is not found it returns empty \p raw_ptr.
1835 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1837 RCU should be locked before call of this function.
1838 Returned item is valid only while RCU is locked:
1840 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1843 typename skip_list::raw_ptr pVal;
1846 skip_list::rcu_lock lock;
1848 pVal = theList.get( 5 );
1854 // You can manually release pVal after RCU-locked section
1858 template <typename Q>
1859 raw_ptr get( Q const& key )
1861 assert( gc::is_locked());
1864 value_type * pFound;
1865 if ( do_find_with( key, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos ))
1866 return raw_ptr( pFound, raw_ptr_disposer( pos ));
1867 return raw_ptr( raw_ptr_disposer( pos ));
1870 /// Finds \p key and return the item found
1872 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_get "get(Q const&)"
1873 but \p pred is used for comparing the keys.
1875 \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q
1877 \p pred must imply the same element order as the comparator used for building the set.
1879 template <typename Q, typename Less>
1880 raw_ptr get_with( Q const& key, Less pred )
1883 assert( gc::is_locked());
1885 value_type * pFound;
1887 if ( do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(),
1888 [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos ))
1890 return raw_ptr( pFound, raw_ptr_disposer( pos ));
1892 return raw_ptr( raw_ptr_disposer( pos ));
1895 /// Returns item count in the set
1897 The value returned depends on item counter type provided by \p Traits template parameter.
1898 For \p atomicity::empty_item_counter the function always returns 0.
1899 Therefore, the function is not suitable for checking the set emptiness, use \p empty()
1900 member function for this purpose.
1904 return m_ItemCounter;
1907 /// Checks if the set is empty
1910 return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr;
1913 /// Clears the set (not atomic)
1915 The function unlink all items from the set.
1916 The function is not atomic, thus, in multi-threaded environment with parallel insertions
1920 assert( set.empty() );
1922 the assertion could be raised.
1924 For each item the \p disposer will be called automatically after unlinking.
1929 while ( (ep = extract_min()) );
1932 /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32.
1933 static CDS_CONSTEXPR unsigned int max_height() CDS_NOEXCEPT
1935 return c_nMaxHeight;
1938 /// Returns const reference to internal statistics
1939 stat const& statistics() const
1945 }} // namespace cds::intrusive
1948 #endif // #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H