3 #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H
4 #define CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H
8 #include <cds/intrusive/details/skip_list_base.h>
9 #include <cds/opt/compare.h>
10 #include <cds/urcu/details/check_deadlock.h>
11 #include <cds/details/binary_functor_wrapper.h>
12 #include <cds/urcu/exempt_ptr.h>
13 #include <cds/urcu/raw_ptr.h>
14 #include <cds/intrusive/details/raw_ptr_disposer.h>
16 namespace cds { namespace intrusive {
21 template <class RCU, typename Tag>
22 class node< cds::urcu::gc< RCU >, Tag >
25 typedef cds::urcu::gc< RCU > gc; ///< Garbage collector
26 typedef Tag tag ; ///< tag
29 // bit 0 - the item is logically deleted
30 // bit 1 - the item is extracted (only for level 0)
31 typedef cds::details::marked_ptr<node, 3> marked_ptr; ///< marked pointer
32 typedef atomics::atomic< marked_ptr > atomic_marked_ptr; ///< atomic marked pointer
33 typedef atomic_marked_ptr tower_item_type;
36 atomic_marked_ptr m_pNext; ///< Next item in bottom-list (list at level 0)
38 node * m_pDelChain; ///< Deleted node chain (local for a thread)
40 bool volatile m_bLinked;
41 bool volatile m_bUnlinked;
44 unsigned int m_nHeight; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1.
45 atomic_marked_ptr * m_arrNext; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr
48 /// Constructs a node of height 1 (a bottom-list node)
51 , m_pDelChain( nullptr )
54 , m_bUnlinked( false )
57 , m_arrNext( nullptr )
63 assert( !m_bLinked || m_bUnlinked );
67 /// Constructs a node of height \p nHeight
68 void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower )
70 assert( nHeight > 0 );
71 assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node
72 || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0
75 m_arrNext = nextTower;
79 atomic_marked_ptr * release_tower()
81 atomic_marked_ptr * pTower = m_arrNext;
87 atomic_marked_ptr * get_tower() const
94 for ( unsigned int nLevel = 1; nLevel < m_nHeight; ++nLevel )
95 next(nLevel).store( marked_ptr(), atomics::memory_order_relaxed );
98 /// Access to element of next pointer array
99 atomic_marked_ptr& next( unsigned int nLevel )
101 assert( nLevel < height() );
102 assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr) );
104 # ifdef CDS_THREAD_SANITIZER_ENABLED
105 // TSan false positive: m_arrNext is read-only array
106 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
107 atomic_marked_ptr& r = nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
108 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
111 return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
115 /// Access to element of next pointer array (const version)
116 atomic_marked_ptr const& next( unsigned int nLevel ) const
118 assert( nLevel < height() );
119 assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr );
121 # ifdef CDS_THREAD_SANITIZER_ENABLED
122 // TSan false positive: m_arrNext is read-only array
123 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
124 atomic_marked_ptr& r = nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
125 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
128 return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
132 /// Access to element of next pointer array (same as \ref next function)
133 atomic_marked_ptr& operator[]( unsigned int nLevel )
135 return next( nLevel );
138 /// Access to element of next pointer array (same as \ref next function)
139 atomic_marked_ptr const& operator[]( unsigned int nLevel ) const
141 return next( nLevel );
144 /// Height of the node
145 unsigned int height() const
150 /// Clears internal links
153 assert( m_arrNext == nullptr );
154 m_pNext.store( marked_ptr(), atomics::memory_order_release );
155 m_pDelChain = nullptr;
158 bool is_cleared() const
160 return m_pNext == atomic_marked_ptr()
161 && m_arrNext == nullptr
165 } // namespace skip_list
169 namespace skip_list { namespace details {
171 template <class RCU, typename NodeTraits, typename BackOff, bool IsConst>
172 class iterator< cds::urcu::gc< RCU >, NodeTraits, BackOff, IsConst >
175 typedef cds::urcu::gc< RCU > gc;
176 typedef NodeTraits node_traits;
177 typedef BackOff back_off;
178 typedef typename node_traits::node_type node_type;
179 typedef typename node_traits::value_type value_type;
180 static bool const c_isConst = IsConst;
182 typedef typename std::conditional< c_isConst, value_type const &, value_type &>::type value_ref;
185 typedef typename node_type::marked_ptr marked_ptr;
186 typedef typename node_type::atomic_marked_ptr atomic_marked_ptr;
193 // RCU should be locked before iterating!!!
194 assert( gc::is_locked() );
199 if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
200 // Current node is marked as deleted. So, its next pointer can point to anything
201 // In this case we interrupt our iteration and returns end() iterator.
206 marked_ptr p = m_pNode->next(0).load( atomics::memory_order_relaxed );
207 node_type * pp = p.ptr();
209 // p is marked as deleted. Spin waiting for physical removal
213 else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits() ) {
214 // p is marked as deleted. Spin waiting for physical removal
224 public: // for internal use only!!!
225 iterator( node_type& refHead )
228 // RCU should be locked before iterating!!!
229 assert( gc::is_locked() );
234 marked_ptr p = refHead.next(0).load( atomics::memory_order_relaxed );
240 node_type * pp = p.ptr();
241 // Logically deleted node is marked from highest level
242 if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
255 // RCU should be locked before iterating!!!
256 assert( gc::is_locked() );
259 iterator( iterator const& s)
260 : m_pNode( s.m_pNode )
262 // RCU should be locked before iterating!!!
263 assert( gc::is_locked() );
266 value_type * operator ->() const
268 assert( m_pNode != nullptr );
269 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
271 return node_traits::to_value_ptr( m_pNode );
274 value_ref operator *() const
276 assert( m_pNode != nullptr );
277 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
279 return *node_traits::to_value_ptr( m_pNode );
283 iterator& operator ++()
289 iterator& operator = (const iterator& src)
291 m_pNode = src.m_pNode;
295 template <typename Bkoff, bool C>
296 bool operator ==(iterator<gc, node_traits, Bkoff, C> const& i ) const
298 return m_pNode == i.m_pNode;
300 template <typename Bkoff, bool C>
301 bool operator !=(iterator<gc, node_traits, Bkoff, C> const& i ) const
303 return !( *this == i );
306 }} // namespace skip_list::details
309 /// Lock-free skip-list set (template specialization for \ref cds_urcu_desc "RCU")
310 /** @ingroup cds_intrusive_map
311 @anchor cds_intrusive_SkipListSet_rcu
313 The implementation of well-known probabilistic data structure called skip-list
314 invented by W.Pugh in his papers:
315 - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees
316 - [1990] W.Pugh A Skip List Cookbook
318 A skip-list is a probabilistic data structure that provides expected logarithmic
319 time search without the need of rebalance. The skip-list is a collection of sorted
320 linked list. Nodes are ordered by key. Each node is linked into a subset of the lists.
321 Each list has a level, ranging from 0 to 32. The bottom-level list contains
322 all the nodes, and each higher-level list is a sublist of the lower-level lists.
323 Each node is created with a random top level (with a random height), and belongs
324 to all lists up to that level. The probability that a node has the height 1 is 1/2.
325 The probability that a node has the height N is 1/2 ** N (more precisely,
326 the distribution depends on an random generator provided, but our generators
329 The lock-free variant of skip-list is implemented according to book
330 - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming",
331 chapter 14.4 "A Lock-Free Concurrent Skiplist".
333 <b>Template arguments</b>:
334 - \p RCU - one of \ref cds_urcu_gc "RCU type"
335 - \p T - type to be stored in the list. The type must be based on \p skip_list::node (for \p skip_list::base_hook)
336 or it must have a member of type \p skip_list::node (for \p skip_list::member_hook).
337 - \p Traits - set traits, default is \p skip_list::traits
338 It is possible to declare option-based list with \p cds::intrusive::skip_list::make_traits metafunction
339 instead of \p Traits template argument.
341 @note Before including <tt><cds/intrusive/skip_list_rcu.h></tt> you should include appropriate RCU header file,
342 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
346 The class supports a forward iterator (\ref iterator and \ref const_iterator).
347 The iteration is ordered.
349 You may iterate over skip-list set items only under RCU lock.
350 Only in this case the iterator is thread-safe since
351 while RCU is locked any set's item cannot be reclaimed.
353 @note The requirement of RCU lock during iterating means that any type of modification of the skip list
354 (i.e. inserting, erasing and so on) is not possible.
356 @warning The iterator object cannot be passed between threads.
358 Example how to use skip-list set iterators:
360 // First, you should include the header for RCU type you have chosen
361 #include <cds/urcu/general_buffered.h>
362 #include <cds/intrusive/skip_list_rcu.h>
364 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
370 // Traits for your skip-list.
371 // At least, you should define cds::opt::less or cds::opt::compare for Foo struct
372 struct my_traits: public cds::intrusive::skip_list::traits
376 typedef cds::intrusive::SkipListSet< rcu_type, Foo, my_traits > my_skiplist_set;
378 my_skiplist_set theSet;
384 // Apply RCU locking manually
385 typename rcu_type::scoped_lock sl;
387 for ( auto it = theList.begin(); it != theList.end(); ++it ) {
391 // rcu_type::scoped_lock destructor releases RCU lock implicitly
395 The iterator class supports the following minimalistic interface:
402 iterator( iterator const& s);
404 value_type * operator ->() const;
405 value_type& operator *() const;
408 iterator& operator ++();
411 iterator& operator = (const iterator& src);
413 bool operator ==(iterator const& i ) const;
414 bool operator !=(iterator const& i ) const;
417 Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced.
421 You should incorporate skip_list::node into your struct \p T and provide
422 appropriate skip_list::traits::hook in your \p Traits template parameters. Usually, for \p Traits you
423 define a struct based on \p skip_list::traits.
425 Example for <tt>cds::urcu::general_buffered<></tt> RCU and base hook:
427 // First, you should include the header for RCU type you have chosen
428 #include <cds/urcu/general_buffered.h>
430 // Include RCU skip-list specialization
431 #include <cds/intrusive/skip_list_rcu.h>
434 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
436 // Data stored in skip list
437 struct my_data: public cds::intrusive::skip_list::node< rcu_type >
446 // my_data compare functor
448 int operator()( const my_data& d1, const my_data& d2 )
450 return d1.strKey.compare( d2.strKey );
453 int operator()( const my_data& d, const std::string& s )
455 return d.strKey.compare(s);
458 int operator()( const std::string& s, const my_data& d )
460 return s.compare( d.strKey );
465 struct my_traits: public cds::intrusive::skip_list::traits
467 typedef cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > hook;
468 typedef my_data_cmp compare;
471 // Declare skip-list set type
472 typedef cds::intrusive::SkipListSet< rcu_type, my_data, my_traits > traits_based_set;
475 Equivalent option-based code:
477 #include <cds/urcu/general_buffered.h>
478 #include <cds/intrusive/skip_list_rcu.h>
480 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
489 // Declare option-based skip-list set
490 typedef cds::intrusive::SkipListSet< rcu_type
492 , typename cds::intrusive::skip_list::make_traits<
493 cds::intrusive::opt::hook< cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > >
494 ,cds::intrusive::opt::compare< my_data_cmp >
503 #ifdef CDS_DOXYGEN_INVOKED
504 ,typename Traits = skip_list::traits
509 class SkipListSet< cds::urcu::gc< RCU >, T, Traits >
512 typedef cds::urcu::gc< RCU > gc; ///< Garbage collector
513 typedef T value_type; ///< type of value stored in the skip-list
514 typedef Traits traits; ///< Traits template parameter
516 typedef typename traits::hook hook; ///< hook type
517 typedef typename hook::node_type node_type; ///< node type
519 # ifdef CDS_DOXYGEN_INVOKED
520 typedef implementation_defined key_comparator ; ///< key comparison functor based on \p Traits::compare and \p Traits::less
522 typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator;
525 typedef typename traits::disposer disposer; ///< disposer
526 typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits
528 typedef typename traits::item_counter item_counter; ///< Item counting policy used
529 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
530 typedef typename traits::random_level_generator random_level_generator; ///< random level generator
531 typedef typename traits::allocator allocator_type; ///< allocator for maintaining array of next pointers of the node
532 typedef typename traits::back_off back_off; ///< Back-off strategy
533 typedef typename traits::stat stat; ///< internal statistics type
534 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
535 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
536 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking
539 /// Max node height. The actual node height should be in range <tt>[0 .. c_nMaxHeight)</tt>
541 The max height is specified by \ref skip_list::random_level_generator "random level generator" constant \p m_nUpperBound
542 but it should be no more than 32 (\ref skip_list::c_nHeightLimit).
544 static unsigned int const c_nMaxHeight = std::conditional<
545 (random_level_generator::c_nUpperBound <= skip_list::c_nHeightLimit),
546 std::integral_constant< unsigned int, random_level_generator::c_nUpperBound >,
547 std::integral_constant< unsigned int, skip_list::c_nHeightLimit >
551 static unsigned int const c_nMinHeight = 5;
555 typedef typename node_type::atomic_marked_ptr atomic_node_ptr ; ///< Atomic marked node pointer
556 typedef typename node_type::marked_ptr marked_node_ptr ; ///< Node marked pointer
560 typedef skip_list::details::intrusive_node_builder< node_type, atomic_node_ptr, allocator_type > intrusive_node_builder;
562 typedef typename std::conditional<
563 std::is_same< typename traits::internal_node_builder, cds::opt::none >::value
564 ,intrusive_node_builder
565 ,typename traits::internal_node_builder
566 >::type node_builder;
568 typedef std::unique_ptr< node_type, typename node_builder::node_disposer > scoped_node_ptr;
570 static void dispose_node( value_type * pVal )
574 typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal) );
580 void operator()( value_type * pVal )
582 dispose_node( pVal );
586 static void dispose_chain( node_type * pChain )
589 assert( !gc::is_locked() );
591 auto f = [&pChain]() -> cds::urcu::retired_ptr {
592 node_type * p = pChain;
594 pChain = p->m_pDelChain;
595 return cds::urcu::make_retired_ptr<node_disposer>( node_traits::to_value_ptr( p ));
597 return cds::urcu::make_retired_ptr<node_disposer>( static_cast<value_type *>(nullptr));
599 gc::batch_retire(std::ref(f));
604 node_type * pPrev[ c_nMaxHeight ];
605 node_type * pSucc[ c_nMaxHeight ];
606 node_type * pNext[ c_nMaxHeight ];
609 node_type * pDelChain;
612 : pDelChain( nullptr )
617 dispose_chain( pDelChain );
620 void dispose( node_type * p )
622 assert( p != nullptr );
623 assert( p->m_pDelChain == nullptr );
625 p->m_pDelChain = pDelChain;
630 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy;
634 skip_list::details::head_node< node_type > m_Head; ///< head tower (max height)
636 item_counter m_ItemCounter; ///< item counter
637 random_level_generator m_RandomLevelGen; ///< random level generator instance
638 atomics::atomic<unsigned int> m_nHeight; ///< estimated high level
639 atomics::atomic<node_type *> m_pDeferredDelChain ; ///< Deferred deleted node chain
640 mutable stat m_Stat; ///< internal statistics
644 unsigned int random_level()
646 // Random generator produces a number from range [0..31]
647 // We need a number from range [1..32]
648 return m_RandomLevelGen() + 1;
651 template <typename Q>
652 node_type * build_node( Q v )
654 return node_builder::make_tower( v, m_RandomLevelGen );
659 using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, node_disposer, void >; ///< pointer to extracted node
663 struct chain_disposer {
664 void operator()( node_type * pChain ) const
666 dispose_chain( pChain );
669 typedef cds::intrusive::details::raw_ptr_disposer< gc, node_type, chain_disposer> raw_ptr_disposer;
673 /// Result of \p get(), \p get_with() functions - pointer to the node found
674 typedef cds::urcu::raw_ptr< gc, value_type, raw_ptr_disposer > raw_ptr;
679 bool is_extracted( marked_node_ptr const p ) const
681 return (p.bits() & 2) != 0;
684 template <typename Q, typename Compare >
685 bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound )
687 assert( gc::is_locked() );
690 marked_node_ptr pSucc;
691 marked_node_ptr pCur;
695 pPred = m_Head.head();
697 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
700 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
702 // pCur.bits() means that pPred is logically deleted
706 if ( pCur.ptr() == nullptr ) {
707 // end of the list at level nLevel - goto next level
711 // pSucc contains deletion mark for pCur
712 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
714 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
717 if ( pSucc.bits() ) {
718 // pCur is marked, i.e. logically deleted.
719 marked_node_ptr p( pCur.ptr() );
720 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
721 memory_model::memory_order_release, atomics::memory_order_relaxed ))
725 pCur->m_bUnlinked = true;
728 if ( !is_extracted( pSucc )) {
729 // We cannot free the node at this moment since RCU is locked
730 // Link deleted nodes to a chain to free later
731 pos.dispose( pCur.ptr() );
732 m_Stat.onEraseWhileFind();
735 m_Stat.onExtractWhileFind();
742 nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
745 else if ( nCmp == 0 && bStopIfFound )
753 pos.pPrev[ nLevel ] = pPred;
754 pos.pSucc[ nLevel ] = pCur.ptr();
761 pos.pCur = pCur.ptr();
762 return pCur.ptr() && nCmp == 0;
765 bool find_min_position( position& pos )
767 assert( gc::is_locked() );
770 marked_node_ptr pSucc;
771 marked_node_ptr pCur;
774 pPred = m_Head.head();
776 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
778 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
779 // pCur.bits() means that pPred is logically deleted
780 // head cannot be deleted
781 assert( pCur.bits() == 0 );
785 // pSucc contains deletion mark for pCur
786 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
788 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
791 if ( pSucc.bits() ) {
792 // pCur is marked, i.e. logically deleted.
793 marked_node_ptr p( pCur.ptr() );
794 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
795 memory_model::memory_order_release, atomics::memory_order_relaxed ))
799 pCur->m_bUnlinked = true;
802 if ( !is_extracted( pSucc )) {
803 // We cannot free the node at this moment since RCU is locked
804 // Link deleted nodes to a chain to free later
805 pos.dispose( pCur.ptr() );
806 m_Stat.onEraseWhileFind();
809 m_Stat.onExtractWhileFind();
818 pos.pPrev[ nLevel ] = pPred;
819 pos.pSucc[ nLevel ] = pCur.ptr();
821 return (pos.pCur = pCur.ptr()) != nullptr;
824 bool find_max_position( position& pos )
826 assert( gc::is_locked() );
829 marked_node_ptr pSucc;
830 marked_node_ptr pCur;
833 pPred = m_Head.head();
835 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
838 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
840 // pCur.bits() means that pPred is logically deleted
844 if ( pCur.ptr() == nullptr ) {
845 // end of the list at level nLevel - goto next level
849 // pSucc contains deletion mark for pCur
850 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
852 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
855 if ( pSucc.bits() ) {
856 // pCur is marked, i.e. logically deleted.
857 marked_node_ptr p( pCur.ptr() );
858 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
859 memory_model::memory_order_release, atomics::memory_order_relaxed ))
863 pCur->m_bUnlinked = true;
866 if ( !is_extracted( pSucc )) {
867 // We cannot free the node at this moment since RCU is locked
868 // Link deleted nodes to a chain to free later
869 pos.dispose( pCur.ptr() );
870 m_Stat.onEraseWhileFind();
873 m_Stat.onExtractWhileFind();
888 pos.pPrev[ nLevel ] = pPred;
889 pos.pSucc[ nLevel ] = pCur.ptr();
892 return (pos.pCur = pCur.ptr()) != nullptr;
895 template <typename Func>
896 bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f )
898 assert( gc::is_locked() );
900 unsigned int nHeight = pNode->height();
901 pNode->clear_tower();
904 marked_node_ptr p( pos.pSucc[0] );
905 pNode->next( 0 ).store( p, memory_model::memory_order_release );
906 if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed )) {
910 pNode->m_bLinked = true;
915 for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) {
918 marked_node_ptr q( pos.pSucc[ nLevel ]);
919 if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
920 // pNode has been marked as removed while we are inserting it
923 m_Stat.onLogicDeleteWhileInsert();
927 if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ) )
930 // Renew insert position
931 m_Stat.onRenewInsertPosition();
932 if ( !find_position( val, pos, key_comparator(), false )) {
933 // The node has been deleted while we are inserting it
934 m_Stat.onNotFoundWhileInsert();
942 template <typename Func>
943 bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract )
945 assert( pDel != nullptr );
946 assert( gc::is_locked() );
948 marked_node_ptr pSucc;
950 // logical deletion (marking)
951 for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) {
952 pSucc = pDel->next(nLevel).load( memory_model::memory_order_relaxed );
955 || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
962 pSucc = pDel->next(0).load( memory_model::memory_order_relaxed );
967 int const nMask = bExtract ? 3 : 1;
968 if ( pDel->next(0).compare_exchange_strong( pSucc, pSucc | nMask, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
970 f( *node_traits::to_value_ptr( pDel ));
975 for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
976 if ( !pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( pSucc,
977 marked_node_ptr( pDel->next(nLevel).load(memory_model::memory_order_relaxed).ptr() ),
978 memory_model::memory_order_release, atomics::memory_order_relaxed) )
981 find_position( *node_traits::to_value_ptr(pDel), pos, key_comparator(), false );
983 m_Stat.onSlowExtract();
985 m_Stat.onSlowErase();
987 assert( pDel->m_bUnlinked );
994 pDel->m_bUnlinked = true;
997 // We cannot free the node at this moment since RCU is locked
998 // Link deleted nodes to a chain to free later
1000 m_Stat.onFastErase();
1003 m_Stat.onFastExtract();
1006 m_Stat.onEraseRetry();
1010 enum finsd_fastpath_result {
1011 find_fastpath_found,
1012 find_fastpath_not_found,
1015 template <typename Q, typename Compare, typename Func>
1016 finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f ) const
1019 marked_node_ptr pCur;
1020 marked_node_ptr pSucc;
1021 marked_node_ptr pNull;
1025 pPred = m_Head.head();
1026 for ( int nLevel = static_cast<int>(m_nHeight.load(memory_model::memory_order_relaxed) - 1); nLevel >= 0; --nLevel ) {
1027 pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire );
1028 if ( pCur == pNull )
1031 while ( pCur != pNull ) {
1032 if ( pCur.bits() ) {
1033 // Wait until pCur is removed
1034 unsigned int nAttempt = 0;
1035 while ( pCur.bits() && nAttempt++ < 16 ) {
1037 pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire );
1041 if ( pCur.bits() ) {
1042 // Maybe, we are on deleted node sequence
1043 // Abort searching, try slow-path
1044 return find_fastpath_abort;
1049 int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
1052 pCur = pCur->next(nLevel).load( memory_model::memory_order_acquire );
1054 else if ( nCmp == 0 ) {
1056 f( *node_traits::to_value_ptr( pCur.ptr() ), val );
1057 return find_fastpath_found;
1059 else // pCur > val - go down
1065 return find_fastpath_not_found;
1068 template <typename Q, typename Compare, typename Func>
1069 bool find_slowpath( Q& val, Compare cmp, Func f, position& pos )
1071 if ( find_position( val, pos, cmp, true )) {
1072 assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 );
1074 f( *node_traits::to_value_ptr( pos.pCur ), val );
1081 template <typename Q, typename Compare, typename Func>
1082 bool do_find_with( Q& val, Compare cmp, Func f )
1085 return do_find_with( val, cmp, f, pos );
1088 template <typename Q, typename Compare, typename Func>
1089 bool do_find_with( Q& val, Compare cmp, Func f, position& pos )
1096 switch ( find_fastpath( val, cmp, f )) {
1097 case find_fastpath_found:
1098 m_Stat.onFindFastSuccess();
1100 case find_fastpath_not_found:
1101 m_Stat.onFindFastFailed();
1107 if ( find_slowpath( val, cmp, f, pos )) {
1108 m_Stat.onFindSlowSuccess();
1112 m_Stat.onFindSlowFailed();
1119 template <typename Q, typename Compare, typename Func>
1120 bool do_erase( Q const& val, Compare cmp, Func f )
1122 check_deadlock_policy::check();
1130 if ( !find_position( val, pos, cmp, false ) ) {
1131 m_Stat.onEraseFailed();
1135 node_type * pDel = pos.pCur;
1136 assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 );
1138 unsigned int nHeight = pDel->height();
1139 if ( try_remove_at( pDel, pos, f, false )) {
1141 m_Stat.onRemoveNode( nHeight );
1142 m_Stat.onEraseSuccess();
1146 m_Stat.onEraseFailed();
1155 template <typename Q, typename Compare>
1156 value_type * do_extract_key( Q const& key, Compare cmp, position& pos )
1158 // RCU should be locked!!!
1159 assert( gc::is_locked() );
1163 if ( !find_position( key, pos, cmp, false ) ) {
1164 m_Stat.onExtractFailed();
1169 assert( cmp( *node_traits::to_value_ptr( pDel ), key ) == 0 );
1171 unsigned int const nHeight = pDel->height();
1173 if ( try_remove_at( pDel, pos, [](value_type const&) {}, true )) {
1175 m_Stat.onRemoveNode( nHeight );
1176 m_Stat.onExtractSuccess();
1179 m_Stat.onExtractFailed();
1184 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1187 template <typename Q>
1188 value_type * do_extract( Q const& key )
1190 check_deadlock_policy::check();
1191 value_type * pDel = nullptr;
1195 pDel = do_extract_key( key, key_comparator(), pos );
1201 template <typename Q, typename Less>
1202 value_type * do_extract_with( Q const& key, Less pred )
1205 check_deadlock_policy::check();
1206 value_type * pDel = nullptr;
1210 pDel = do_extract_key( key, cds::opt::details::make_comparator_from_less<Less>(), pos );
1216 value_type * do_extract_min()
1218 assert( !gc::is_locked() );
1226 if ( !find_min_position( pos ) ) {
1227 m_Stat.onExtractMinFailed();
1232 unsigned int const nHeight = pDel->height();
1234 if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
1236 m_Stat.onRemoveNode( nHeight );
1237 m_Stat.onExtractMinSuccess();
1240 m_Stat.onExtractMinFailed();
1246 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1249 value_type * do_extract_max()
1251 assert( !gc::is_locked() );
1259 if ( !find_max_position( pos ) ) {
1260 m_Stat.onExtractMaxFailed();
1265 unsigned int const nHeight = pDel->height();
1267 if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
1269 m_Stat.onRemoveNode( nHeight );
1270 m_Stat.onExtractMaxSuccess();
1273 m_Stat.onExtractMaxFailed();
1279 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1282 void increase_height( unsigned int nHeight )
1284 unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed );
1285 if ( nCur < nHeight )
1286 m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed );
1291 /// Default constructor
1293 : m_Head( c_nMaxHeight )
1294 , m_nHeight( c_nMinHeight )
1295 , m_pDeferredDelChain( nullptr )
1297 static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
1299 // Barrier for head node
1300 atomics::atomic_thread_fence( memory_model::memory_order_release );
1303 /// Clears and destructs the skip-list
1311 typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator;
1313 /// Const iterator type
1314 typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator;
1316 /// Returns a forward iterator addressing the first element in a set
1319 return iterator( *m_Head.head() );
1322 /// Returns a forward const iterator addressing the first element in a set
1323 const_iterator begin() const
1325 return const_iterator( *m_Head.head() );
1328 /// Returns a forward const iterator addressing the first element in a set
1329 const_iterator cbegin() const
1331 return const_iterator( *m_Head.head() );
1334 /// Returns a forward iterator that addresses the location succeeding the last element in a set.
1340 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
1341 const_iterator end() const
1343 return const_iterator();
1346 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
1347 const_iterator cend() const
1349 return const_iterator();
1353 /// Inserts new node
1355 The function inserts \p val in the set if it does not contain
1356 an item with key equal to \p val.
1358 The function applies RCU lock internally.
1360 Returns \p true if \p val is placed into the set, \p false otherwise.
1362 bool insert( value_type& val )
1364 return insert( val, []( value_type& ) {} );
1367 /// Inserts new node
1369 This function is intended for derived non-intrusive containers.
1371 The function allows to split creating of new item into two part:
1372 - create item with key only
1373 - insert new item into the set
1374 - if inserting is success, calls \p f functor to initialize value-field of \p val.
1376 The functor signature is:
1378 void func( value_type& val );
1380 where \p val is the item inserted. User-defined functor \p f should guarantee that during changing
1381 \p val no any other changes could be made on this set's item by concurrent threads.
1382 The user-defined functor is called only if the inserting is success.
1384 RCU \p synchronize method can be called. RCU should not be locked.
1386 template <typename Func>
1387 bool insert( value_type& val, Func f )
1389 check_deadlock_policy::check();
1395 node_type * pNode = node_traits::to_node_ptr( val );
1396 scoped_node_ptr scp( pNode );
1397 unsigned int nHeight = pNode->height();
1398 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
1399 bool bTowerMade = false;
1405 bool bFound = find_position( val, pos, key_comparator(), true );
1407 // scoped_node_ptr deletes the node tower if we create it
1411 m_Stat.onInsertFailed();
1417 build_node( pNode );
1418 nHeight = pNode->height();
1423 if ( !insert_at_position( val, pNode, pos, f )) {
1424 m_Stat.onInsertRetry();
1428 increase_height( nHeight );
1430 m_Stat.onAddNode( nHeight );
1431 m_Stat.onInsertSuccess();
1441 /// Updates the node
1443 The operation performs inserting or changing data with lock-free manner.
1445 If the item \p val is not found in the set, then \p val is inserted into the set
1446 iff \p bInsert is \p true.
1447 Otherwise, the functor \p func is called with item found.
1448 The functor signature is:
1450 void func( bool bNew, value_type& item, value_type& val );
1453 - \p bNew - \p true if the item has been inserted, \p false otherwise
1454 - \p item - item of the set
1455 - \p val - argument \p val passed into the \p %update() function
1456 If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
1457 refer to the same thing.
1459 The functor can change non-key fields of the \p item; however, \p func must guarantee
1460 that during changing no any other modifications could be made on this item by concurrent threads.
1462 RCU \p synchronize method can be called. RCU should not be locked.
1464 Returns std::pair<bool, bool> where \p first is \p true if operation is successfull,
1465 i.e. the node has been inserted or updated,
1466 \p second is \p true if new item has been added or \p false if the item with \p key
1469 @warning See \ref cds_intrusive_item_creating "insert item troubleshooting"
1471 template <typename Func>
1472 std::pair<bool, bool> update( value_type& val, Func func, bool bInsert = true )
1474 check_deadlock_policy::check();
1477 std::pair<bool, bool> bRet( true, false );
1480 node_type * pNode = node_traits::to_node_ptr( val );
1481 scoped_node_ptr scp( pNode );
1482 unsigned int nHeight = pNode->height();
1483 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
1484 bool bTowerMade = false;
1489 bool bFound = find_position( val, pos, key_comparator(), true );
1491 // scoped_node_ptr deletes the node tower if we create it before
1495 func( false, *node_traits::to_value_ptr(pos.pCur), val );
1496 m_Stat.onUpdateExist();
1507 build_node( pNode );
1508 nHeight = pNode->height();
1513 if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { func( true, item, item ); })) {
1514 m_Stat.onInsertRetry();
1518 increase_height( nHeight );
1521 m_Stat.onAddNode( nHeight );
1522 m_Stat.onUpdateNew();
1531 template <typename Func>
1532 CDS_DEPRECATED("ensure() is deprecated, use update()")
1533 std::pair<bool, bool> ensure( value_type& val, Func func )
1535 return update( val, func, true );
1539 /// Unlinks the item \p val from the set
1541 The function searches the item \p val in the set and unlink it from the set
1542 if it is found and is equal to \p val.
1544 Difference between \p erase() and \p %unlink() functions: \p erase() finds <i>a key</i>
1545 and deletes the item found. \p %unlink() searches an item by key and deletes it
1546 only if \p val is an item of that set, i.e. the pointer to item found
1547 is equal to <tt> &val </tt>.
1549 RCU \p synchronize method can be called. RCU should not be locked.
1551 The \ref disposer specified in \p Traits class template parameter is called
1552 by garbage collector \p GC asynchronously.
1554 The function returns \p true if success and \p false otherwise.
1556 bool unlink( value_type& val )
1558 check_deadlock_policy::check();
1566 if ( !find_position( val, pos, key_comparator(), false ) ) {
1567 m_Stat.onUnlinkFailed();
1571 node_type * pDel = pos.pCur;
1572 assert( key_comparator()( *node_traits::to_value_ptr( pDel ), val ) == 0 );
1574 unsigned int nHeight = pDel->height();
1576 if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos, [](value_type const&) {}, false )) {
1578 m_Stat.onRemoveNode( nHeight );
1579 m_Stat.onUnlinkSuccess();
1583 m_Stat.onUnlinkFailed();
1592 /// Extracts the item from the set with specified \p key
1593 /** \anchor cds_intrusive_SkipListSet_rcu_extract
1594 The function searches an item with key equal to \p key in the set,
1595 unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
1596 If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr.
1598 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1600 RCU \p synchronize method can be called. RCU should NOT be locked.
1601 The function does not call the disposer for the item found.
1602 The disposer will be implicitly invoked when the returned object is destroyed or when
1603 its \p release() member function is called.
1606 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1610 typename skip_list::exempt_ptr ep( theList.extract( 5 ));
1615 // Dispose returned item.
1620 template <typename Q>
1621 exempt_ptr extract( Q const& key )
1623 return exempt_ptr( do_extract( key ));
1626 /// Extracts the item from the set with comparing functor \p pred
1628 The function is an analog of \p extract(Q const&) but \p pred predicate is used for key comparing.
1629 \p Less has the interface like \p std::less.
1630 \p pred must imply the same element order as the comparator used for building the set.
1632 template <typename Q, typename Less>
1633 exempt_ptr extract_with( Q const& key, Less pred )
1635 return exempt_ptr( do_extract_with( key, pred ));
1638 /// Extracts an item with minimal key from the list
1640 The function searches an item with minimal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item.
1641 If the skip-list is empty the function returns an empty \p exempt_ptr.
1643 RCU \p synchronize method can be called. RCU should NOT be locked.
1644 The function does not call the disposer for the item found.
1645 The disposer will be implicitly invoked when the returned object is destroyed or when
1646 its \p release() member function is manually called.
1649 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1653 typename skip_list::exempt_ptr ep(theList.extract_min());
1658 // Dispose returned item.
1663 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> minimum key.
1664 It means that the function gets leftmost item and tries to unlink it.
1665 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
1666 So, the function returns the item with minimum key at the moment of list traversing.
1668 exempt_ptr extract_min()
1670 return exempt_ptr( do_extract_min());
1673 /// Extracts an item with maximal key from the list
1675 The function searches an item with maximal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item.
1676 If the skip-list is empty the function returns an empty \p exempt_ptr.
1678 RCU \p synchronize method can be called. RCU should NOT be locked.
1679 The function does not call the disposer for the item found.
1680 The disposer will be implicitly invoked when the returned object is destroyed or when
1681 its \p release() member function is manually called.
1684 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1688 typename skip_list::exempt_ptr ep( theList.extract_max() );
1692 // Dispose returned item.
1697 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> maximal key.
1698 It means that the function gets rightmost item and tries to unlink it.
1699 During unlinking, a concurrent thread can insert an item with key greater than rightmost item's key.
1700 So, the function returns the item with maximum key at the moment of list traversing.
1702 exempt_ptr extract_max()
1704 return exempt_ptr( do_extract_max());
1707 /// Deletes the item from the set
1708 /** \anchor cds_intrusive_SkipListSet_rcu_erase
1709 The function searches an item with key equal to \p key in the set,
1710 unlinks it from the set, and returns \p true.
1711 If the item with key equal to \p key is not found the function return \p false.
1713 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1715 RCU \p synchronize method can be called. RCU should not be locked.
1717 template <typename Q>
1718 bool erase( const Q& key )
1720 return do_erase( key, key_comparator(), [](value_type const&) {} );
1723 /// Delete the item from the set with comparing functor \p pred
1725 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase "erase(Q const&)"
1726 but \p pred predicate is used for key comparing.
1727 \p Less has the interface like \p std::less.
1728 \p pred must imply the same element order as the comparator used for building the set.
1730 template <typename Q, typename Less>
1731 bool erase_with( const Q& key, Less pred )
1734 return do_erase( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type const&) {} );
1737 /// Deletes the item from the set
1738 /** \anchor cds_intrusive_SkipListSet_rcu_erase_func
1739 The function searches an item with key equal to \p key in the set,
1740 call \p f functor with item found, unlinks it from the set, and returns \p true.
1741 The \ref disposer specified in \p Traits class template parameter is called
1742 by garbage collector \p GC asynchronously.
1744 The \p Func interface is
1747 void operator()( value_type const& item );
1750 If the item with key equal to \p key is not found the function return \p false.
1752 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1754 RCU \p synchronize method can be called. RCU should not be locked.
1756 template <typename Q, typename Func>
1757 bool erase( Q const& key, Func f )
1759 return do_erase( key, key_comparator(), f );
1762 /// Delete the item from the set with comparing functor \p pred
1764 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase_func "erase(Q const&, Func)"
1765 but \p pred predicate is used for key comparing.
1766 \p Less has the interface like \p std::less.
1767 \p pred must imply the same element order as the comparator used for building the set.
1769 template <typename Q, typename Less, typename Func>
1770 bool erase_with( Q const& key, Less pred, Func f )
1773 return do_erase( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1777 /** @anchor cds_intrusive_SkipListSet_rcu_find_func
1778 The function searches the item with key equal to \p key and calls the functor \p f for item found.
1779 The interface of \p Func functor is:
1782 void operator()( value_type& item, Q& key );
1785 where \p item is the item found, \p key is the <tt>find</tt> function argument.
1787 The functor can change non-key fields of \p item. Note that the functor is only guarantee
1788 that \p item cannot be disposed during functor is executing.
1789 The functor does not serialize simultaneous access to the set \p item. If such access is
1790 possible you must provide your own synchronization schema on item level to exclude unsafe item modifications.
1792 The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor
1793 can modify both arguments.
1795 The function applies RCU lock internally.
1797 The function returns \p true if \p key is found, \p false otherwise.
1799 template <typename Q, typename Func>
1800 bool find( Q& key, Func f )
1802 return do_find_with( key, key_comparator(), f );
1805 template <typename Q, typename Func>
1806 bool find( Q const& key, Func f )
1808 return do_find_with( key, key_comparator(), f );
1812 /// Finds the key \p key with comparing functor \p pred
1814 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_func "find(Q&, Func)"
1815 but \p cmp is used for key comparison.
1816 \p Less functor has the interface like \p std::less.
1817 \p cmp must imply the same element order as the comparator used for building the set.
1819 template <typename Q, typename Less, typename Func>
1820 bool find_with( Q& key, Less pred, Func f )
1823 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1826 template <typename Q, typename Less, typename Func>
1827 bool find_with( Q const& key, Less pred, Func f )
1830 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1834 /// Checks whether the set contains \p key
1836 The function searches the item with key equal to \p key
1837 and returns \p true if it is found, and \p false otherwise.
1839 The function applies RCU lock internally.
1841 template <typename Q>
1842 bool contains( Q const& key )
1844 return do_find_with( key, key_comparator(), [](value_type& , Q const& ) {} );
1847 template <typename Q>
1848 CDS_DEPRECATED("deprecated, use contains()")
1849 bool find( Q const& key )
1851 return contains( key );
1855 /// Checks whether the set contains \p key using \p pred predicate for searching
1857 The function is similar to <tt>contains( key )</tt> but \p pred is used for key comparing.
1858 \p Less functor has the interface like \p std::less.
1859 \p Less must imply the same element order as the comparator used for building the set.
1861 template <typename Q, typename Less>
1862 bool contains( Q const& key, Less pred )
1865 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type& , Q const& ) {} );
1868 template <typename Q, typename Less>
1869 CDS_DEPRECATED("deprecated, use contains()")
1870 bool find_with( Q const& key, Less pred )
1872 return contains( key, pred );
1876 /// Finds \p key and return the item found
1877 /** \anchor cds_intrusive_SkipListSet_rcu_get
1878 The function searches the item with key equal to \p key and returns a \p raw_ptr object pointed to item found.
1879 If \p key is not found it returns empty \p raw_ptr.
1881 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1883 RCU should be locked before call of this function.
1884 Returned item is valid only while RCU is locked:
1886 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1889 typename skip_list::raw_ptr pVal;
1892 skip_list::rcu_lock lock;
1894 pVal = theList.get( 5 );
1900 // You can manually release pVal after RCU-locked section
1904 template <typename Q>
1905 raw_ptr get( Q const& key )
1907 assert( gc::is_locked());
1910 value_type * pFound;
1911 if ( do_find_with( key, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos ))
1912 return raw_ptr( pFound, raw_ptr_disposer( pos ));
1913 return raw_ptr( raw_ptr_disposer( pos ));
1916 /// Finds \p key and return the item found
1918 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_get "get(Q const&)"
1919 but \p pred is used for comparing the keys.
1921 \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q
1923 \p pred must imply the same element order as the comparator used for building the set.
1925 template <typename Q, typename Less>
1926 raw_ptr get_with( Q const& key, Less pred )
1929 assert( gc::is_locked());
1931 value_type * pFound = nullptr;
1933 if ( do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(),
1934 [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos ))
1936 return raw_ptr( pFound, raw_ptr_disposer( pos ));
1938 return raw_ptr( raw_ptr_disposer( pos ));
1941 /// Returns item count in the set
1943 The value returned depends on item counter type provided by \p Traits template parameter.
1944 For \p atomicity::empty_item_counter the function always returns 0.
1945 Therefore, the function is not suitable for checking the set emptiness, use \p empty()
1946 member function for this purpose.
1950 return m_ItemCounter;
1953 /// Checks if the set is empty
1956 return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr;
1959 /// Clears the set (not atomic)
1961 The function unlink all items from the set.
1962 The function is not atomic, thus, in multi-threaded environment with parallel insertions
1966 assert( set.empty() );
1968 the assertion could be raised.
1970 For each item the \p disposer will be called automatically after unlinking.
1975 while ( (ep = extract_min()) );
1978 /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32.
1979 static CDS_CONSTEXPR unsigned int max_height() CDS_NOEXCEPT
1981 return c_nMaxHeight;
1984 /// Returns const reference to internal statistics
1985 stat const& statistics() const
1991 }} // namespace cds::intrusive
1994 #endif // #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H