3 #ifndef __CDS_INTRUSIVE_SKIP_LIST_RCU_H
4 #define __CDS_INTRUSIVE_SKIP_LIST_RCU_H
8 #include <cds/intrusive/skip_list_base.h>
9 #include <cds/opt/compare.h>
11 #include <cds/urcu/details/check_deadlock.h>
12 #include <cds/details/binary_functor_wrapper.h>
13 #include <cds/urcu/exempt_ptr.h>
16 namespace cds { namespace intrusive {
21 template <class RCU, typename Tag>
22 class node< cds::urcu::gc< RCU >, Tag >
25 typedef cds::urcu::gc< RCU > gc; ///< Garbage collector
26 typedef Tag tag ; ///< tag
29 // bit 0 - the item is logically deleted
30 // bit 1 - the item is extracted (only for level 0)
31 typedef cds::details::marked_ptr<node, 3> marked_ptr ; ///< marked pointer
32 typedef CDS_ATOMIC::atomic< marked_ptr > atomic_marked_ptr ; ///< atomic marked pointer
33 typedef atomic_marked_ptr tower_item_type;
36 atomic_marked_ptr m_pNext ; ///< Next item in bottom-list (list at level 0)
38 node * m_pDelChain ; ///< Deleted node chain (local for a thread)
40 bool volatile m_bLinked;
41 bool volatile m_bUnlinked;
44 unsigned int m_nHeight ; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1.
45 atomic_marked_ptr * m_arrNext ; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p NULL
48 /// Constructs a node of height 1 (a bottom-list node)
51 , m_pDelChain( nullptr )
54 , m_bUnlinked( false )
57 , m_arrNext( nullptr )
63 assert( !m_bLinked || m_bUnlinked );
67 /// Constructs a node of height \p nHeight
68 void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower )
70 assert( nHeight > 0 );
71 assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node
72 || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0
75 m_arrNext = nextTower;
79 atomic_marked_ptr * release_tower()
81 atomic_marked_ptr * pTower = m_arrNext;
87 atomic_marked_ptr * get_tower() const
94 for ( unsigned int nLevel = 1; nLevel < m_nHeight; ++nLevel )
95 next(nLevel).store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed );
98 /// Access to element of next pointer array
99 atomic_marked_ptr& next( unsigned int nLevel )
101 assert( nLevel < height() );
102 assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr) );
104 return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
107 /// Access to element of next pointer array (const version)
108 atomic_marked_ptr const& next( unsigned int nLevel ) const
110 assert( nLevel < height() );
111 assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr );
113 return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
116 /// Access to element of next pointer array (same as \ref next function)
117 atomic_marked_ptr& operator[]( unsigned int nLevel )
119 return next( nLevel );
122 /// Access to element of next pointer array (same as \ref next function)
123 atomic_marked_ptr const& operator[]( unsigned int nLevel ) const
125 return next( nLevel );
128 /// Height of the node
129 unsigned int height() const
134 /// Clears internal links
137 assert( m_arrNext == nullptr );
138 m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_release );
139 m_pDelChain = nullptr;
142 bool is_cleared() const
144 return m_pNext == atomic_marked_ptr()
145 && m_arrNext == nullptr
149 } // namespace skip_list
153 namespace skip_list { namespace details {
155 template <class RCU, typename NodeTraits, typename BackOff, bool IsConst>
156 class iterator< cds::urcu::gc< RCU >, NodeTraits, BackOff, IsConst >
159 typedef cds::urcu::gc< RCU > gc;
160 typedef NodeTraits node_traits;
161 typedef BackOff back_off;
162 typedef typename node_traits::node_type node_type;
163 typedef typename node_traits::value_type value_type;
164 static bool const c_isConst = IsConst;
166 typedef typename std::conditional< c_isConst, value_type const &, value_type &>::type value_ref;
169 typedef typename node_type::marked_ptr marked_ptr;
170 typedef typename node_type::atomic_marked_ptr atomic_marked_ptr;
177 // RCU should be locked before iterating!!!
178 assert( gc::is_locked() );
183 if ( m_pNode->next( m_pNode->height() - 1 ).load( CDS_ATOMIC::memory_order_acquire ).bits() ) {
184 // Current node is marked as deleted. So, its next pointer can point to anything
185 // In this case we interrupt our iteration and returns end() iterator.
190 marked_ptr p = m_pNode->next(0).load( CDS_ATOMIC::memory_order_relaxed );
191 node_type * pp = p.ptr();
193 // p is marked as deleted. Spin waiting for physical removal
197 else if ( pp && pp->next( pp->height() - 1 ).load( CDS_ATOMIC::memory_order_relaxed ).bits() ) {
198 // p is marked as deleted. Spin waiting for physical removal
208 public: // for internal use only!!!
209 iterator( node_type& refHead )
212 // RCU should be locked before iterating!!!
213 assert( gc::is_locked() );
218 marked_ptr p = refHead.next(0).load( CDS_ATOMIC::memory_order_relaxed );
224 node_type * pp = p.ptr();
225 // Logically deleted node is marked from highest level
226 if ( !pp->next( pp->height() - 1 ).load( CDS_ATOMIC::memory_order_acquire ).bits() ) {
239 // RCU should be locked before iterating!!!
240 assert( gc::is_locked() );
243 iterator( iterator const& s)
244 : m_pNode( s.m_pNode )
246 // RCU should be locked before iterating!!!
247 assert( gc::is_locked() );
250 value_type * operator ->() const
252 assert( m_pNode != nullptr );
253 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
255 return node_traits::to_value_ptr( m_pNode );
258 value_ref operator *() const
260 assert( m_pNode != nullptr );
261 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
263 return *node_traits::to_value_ptr( m_pNode );
267 iterator& operator ++()
273 iterator& operator = (const iterator& src)
275 m_pNode = src.m_pNode;
279 template <typename Bkoff, bool C>
280 bool operator ==(iterator<gc, node_traits, Bkoff, C> const& i ) const
282 return m_pNode == i.m_pNode;
284 template <typename Bkoff, bool C>
285 bool operator !=(iterator<gc, node_traits, Bkoff, C> const& i ) const
287 return !( *this == i );
290 }} // namespace skip_list::details
293 /// Lock-free skip-list set (template specialization for \ref cds_urcu_desc "RCU")
294 /** @ingroup cds_intrusive_map
295 @anchor cds_intrusive_SkipListSet_rcu
297 The implementation of well-known probabilistic data structure called skip-list
298 invented by W.Pugh in his papers:
299 - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees
300 - [1990] W.Pugh A Skip List Cookbook
302 A skip-list is a probabilistic data structure that provides expected logarithmic
303 time search without the need of rebalance. The skip-list is a collection of sorted
304 linked list. Nodes are ordered by key. Each node is linked into a subset of the lists.
305 Each list has a level, ranging from 0 to 32. The bottom-level list contains
306 all the nodes, and each higher-level list is a sublist of the lower-level lists.
307 Each node is created with a random top level (with a random height), and belongs
308 to all lists up to that level. The probability that a node has the height 1 is 1/2.
309 The probability that a node has the height N is 1/2 ** N (more precisely,
310 the distribution depends on an random generator provided, but our generators
313 The lock-free variant of skip-list is implemented according to book
314 - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming",
315 chapter 14.4 "A Lock-Free Concurrent Skiplist".
316 \note The algorithm described in this book cannot be directly adapted for C++ (roughly speaking,
317 the algo contains a lot of bugs). The \b libcds implementation applies the approach discovered
318 by M.Michael in his \ref cds_intrusive_MichaelList_hp "lock-free linked list".
320 <b>Template arguments</b>:
321 - \p RCU - one of \ref cds_urcu_gc "RCU type"
322 - \p T - type to be stored in the list. The type must be based on \p skip_list::node (for \p skip_list::base_hook)
323 or it must have a member of type \p skip_list::node (for \p skip_list::member_hook).
324 - \p Traits - type traits. See \p skip_list::type_traits (the default) for explanation.
326 It is possible to declare option-based list with \p cds::intrusive::skip_list::make_traits metafunction instead of \p Traits template
328 Template argument list \p Options of \p %cds::intrusive::skip_list::make_traits metafunction is:
329 - \p opt::hook - hook used. Possible values are: \p skip_list::base_hook, \p skip_list::member_hook, \p skip_list::traits_hook.
330 If the option is not specified, <tt>skip_list::base_hook<></tt> is used.
331 - \p opt::compare - key comparison functor. No default functor is provided.
332 If the option is not specified, the \p opt::less is used.
333 - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less<T>.
334 - \p opt::disposer - the functor used for dispose removed items. Default is \p opt::v::empty_disposer. Due the nature
335 of GC schema the disposer may be called asynchronously.
336 - \p opt::item_counter - the type of item counting feature. Default is \p atomicity::empty_item_counter that is no item counting.
337 - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
338 or \p opt::v::sequential_consistent (sequentially consisnent memory model).
339 - \p skip_list::random_level_generator - random level generator. Can be \p skip_list::xorshift, \p skip_list::turbo_pascal or
340 user-provided one. See \p skip_list::random_level_generator option description for explanation.
341 Default is \p %skip_list::turbo_pascal.
342 - \p opt::allocator - although the skip-list is an intrusive container,
343 an allocator should be provided to maintain variable randomly-calculated height of the node
344 since the node can contain up to 32 next pointers. The allocator option is used to allocate an array of next pointers
345 for nodes which height is more than 1. Default is \ref CDS_DEFAULT_ALLOCATOR.
346 - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used.
347 - \p opt::stat - internal statistics. Available types: \p skip_list::stat, \p skip_list::empty_stat (the default)
348 - \p opt::rcu_check_deadlock - a deadlock checking policy. Default is \p opt::v::rcu_throw_deadlock
350 @note Before including <tt><cds/intrusive/skip_list_rcu.h></tt> you should include appropriate RCU header file,
351 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
355 The class supports a forward iterator (\ref iterator and \ref const_iterator).
356 The iteration is ordered.
358 You may iterate over skip-list set items only under RCU lock.
359 Only in this case the iterator is thread-safe since
360 while RCU is locked any set's item cannot be reclaimed.
362 @note The requirement of RCU lock during iterating means that any type of modification of the skip list
363 (i.e. inserting, erasing and so on) is not possible.
365 @warning The iterator object cannot be passed between threads.
367 Example how to use skip-list set iterators:
369 // First, you should include the header for RCU type you have chosen
370 #include <cds/urcu/general_buffered.h>
371 #include <cds/intrusive/skip_list_rcu.h>
373 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
379 // Traits for your skip-list.
380 // At least, you should define cds::opt::less or cds::opt::compare for Foo struct
381 struct my_traits: public cds::intrusive::skip_list::type_traits
385 typedef cds::intrusive::SkipListSet< rcu_type, Foo, my_traits > my_skiplist_set;
387 my_skiplist_set theSet;
393 // Apply RCU locking manually
394 typename rcu_type::scoped_lock sl;
396 for ( auto it = theList.begin(); it != theList.end(); ++it ) {
400 // rcu_type::scoped_lock destructor releases RCU lock implicitly
404 The iterator class supports the following minimalistic interface:
411 iterator( iterator const& s);
413 value_type * operator ->() const;
414 value_type& operator *() const;
417 iterator& operator ++();
420 iterator& operator = (const iterator& src);
422 bool operator ==(iterator const& i ) const;
423 bool operator !=(iterator const& i ) const;
426 Note, the iterator object returned by \ref end, \p cend member functions points to \p NULL and should not be dereferenced.
430 You should incorporate skip_list::node into your struct \p T and provide
431 appropriate skip_list::type_traits::hook in your \p Traits template parameters. Usually, for \p Traits you
432 define a struct based on \p skip_list::type_traits.
434 Example for <tt>cds::urcu::general_buffered<></tt> RCU and base hook:
436 // First, you should include the header for RCU type you have chosen
437 #include <cds/urcu/general_buffered.h>
439 // Include RCU skip-list specialization
440 #include <cds/intrusive/skip_list_rcu.h>
443 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
445 // Data stored in skip list
446 struct my_data: public cds::intrusive::skip_list::node< rcu_type >
455 // my_data compare functor
457 int operator()( const my_data& d1, const my_data& d2 )
459 return d1.strKey.compare( d2.strKey );
462 int operator()( const my_data& d, const std::string& s )
464 return d.strKey.compare(s);
467 int operator()( const std::string& s, const my_data& d )
469 return s.compare( d.strKey );
474 // Declare type_traits
475 struct my_traits: public cds::intrusive::skip_list::type_traits
477 typedef cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > hook;
478 typedef my_data_cmp compare;
481 // Declare skip-list set type
482 typedef cds::intrusive::SkipListSet< rcu_type, my_data, my_traits > traits_based_set;
485 Equivalent option-based code:
487 #include <cds/urcu/general_buffered.h>
488 #include <cds/intrusive/skip_list_rcu.h>
490 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
499 // Declare option-based skip-list set
500 typedef cds::intrusive::SkipListSet< rcu_type
502 , typename cds::intrusive::skip_list::make_traits<
503 cds::intrusive::opt::hook< cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > >
504 ,cds::intrusive::opt::compare< my_data_cmp >
513 #ifdef CDS_DOXYGEN_INVOKED
514 ,typename Traits = skip_list::type_traits
519 class SkipListSet< cds::urcu::gc< RCU >, T, Traits >
522 typedef T value_type ; ///< type of value stored in the skip-list
523 typedef Traits options ; ///< Traits template parameter
525 typedef typename options::hook hook ; ///< hook type
526 typedef typename hook::node_type node_type ; ///< node type
528 # ifdef CDS_DOXYGEN_INVOKED
529 typedef implementation_defined key_comparator ; ///< key comparison functor based on opt::compare and opt::less option setter.
531 typedef typename opt::details::make_comparator< value_type, options >::type key_comparator;
534 typedef typename options::disposer disposer ; ///< disposer used
535 typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits
537 typedef cds::urcu::gc< RCU > gc ; ///< Garbage collector
538 typedef typename options::item_counter item_counter; ///< Item counting policy used
539 typedef typename options::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
540 typedef typename options::random_level_generator random_level_generator ; ///< random level generator
541 typedef typename options::allocator allocator_type ; ///< allocator for maintaining array of next pointers of the node
542 typedef typename options::back_off back_off ; ///< Back-off trategy
543 typedef typename options::stat stat ; ///< internal statistics type
544 typedef typename options::rcu_check_deadlock rcu_check_deadlock ; ///< Deadlock checking policy
545 typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock
546 static CDS_CONSTEXPR_CONST bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking
549 /// Max node height. The actual node height should be in range <tt>[0 .. c_nMaxHeight)</tt>
551 The max height is specified by \ref skip_list::random_level_generator "random level generator" constant \p m_nUpperBound
552 but it should be no more than 32 (\ref skip_list::c_nHeightLimit).
554 static unsigned int const c_nMaxHeight = std::conditional<
555 (random_level_generator::c_nUpperBound <= skip_list::c_nHeightLimit),
556 std::integral_constant< unsigned int, random_level_generator::c_nUpperBound >,
557 std::integral_constant< unsigned int, skip_list::c_nHeightLimit >
561 static unsigned int const c_nMinHeight = 5;
565 typedef typename node_type::atomic_marked_ptr atomic_node_ptr ; ///< Atomic marked node pointer
566 typedef typename node_type::marked_ptr marked_node_ptr ; ///< Node marked pointer
570 typedef skip_list::details::intrusive_node_builder< node_type, atomic_node_ptr, allocator_type > intrusive_node_builder;
572 typedef typename std::conditional<
573 std::is_same< typename options::internal_node_builder, cds::opt::none >::value
574 ,intrusive_node_builder
575 ,typename options::internal_node_builder
576 >::type node_builder;
578 typedef std::unique_ptr< node_type, typename node_builder::node_disposer > scoped_node_ptr;
581 node_type * pPrev[ c_nMaxHeight ];
582 node_type * pSucc[ c_nMaxHeight ];
583 node_type * pNext[ c_nMaxHeight ];
586 node_type * pDelChain;
589 : pDelChain( nullptr )
594 assert( pDelChain == nullptr );
599 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy;
601 # ifndef CDS_CXX11_LAMBDA_SUPPORT
602 struct empty_insert_functor {
603 void operator()( value_type& )
607 struct empty_erase_functor {
608 void operator()( value_type const& )
612 struct empty_find_functor {
613 template <typename Q>
614 void operator()( value_type& item, Q& val )
621 template <typename Q>
622 void operator()( value_type& item, Q& val )
628 template <typename Func>
629 struct insert_at_ensure_functor {
631 insert_at_ensure_functor( Func f ) : m_func(f) {}
633 void operator()( value_type& item )
635 cds::unref( m_func)( true, item, item );
639 struct copy_value_functor {
640 template <typename Q>
641 void operator()( Q& dest, value_type const& src ) const
647 # endif // ifndef CDS_CXX11_LAMBDA_SUPPORT
652 skip_list::details::head_node< node_type > m_Head ; ///< head tower (max height)
654 item_counter m_ItemCounter ; ///< item counter
655 random_level_generator m_RandomLevelGen ; ///< random level generator instance
656 CDS_ATOMIC::atomic<unsigned int> m_nHeight ; ///< estimated high level
657 CDS_ATOMIC::atomic<node_type *> m_pDeferredDelChain ; ///< Deferred deleted node chain
658 mutable stat m_Stat ; ///< internal statistics
662 unsigned int random_level()
664 // Random generator produces a number from range [0..31]
665 // We need a number from range [1..32]
666 return m_RandomLevelGen() + 1;
669 template <typename Q>
670 node_type * build_node( Q v )
672 return node_builder::make_tower( v, m_RandomLevelGen );
675 static void dispose_node( value_type * pVal )
677 assert( pVal != NULL );
679 typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal) );
685 void operator()( value_type * pVal )
687 dispose_node( pVal );
693 typedef cds::urcu::exempt_ptr< gc, value_type, value_type, node_disposer, void > exempt_ptr ; ///< pointer to extracted node
698 bool is_extracted( marked_node_ptr const p ) const
700 return (p.bits() & 2) != 0;
703 template <typename Q, typename Compare >
704 bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound )
706 assert( gc::is_locked() );
709 marked_node_ptr pSucc;
710 marked_node_ptr pCur;
714 pPred = m_Head.head();
716 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
719 pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed );
721 // pCur.bits() means that pPred is logically deleted
725 if ( pCur.ptr() == nullptr ) {
726 // end of the list at level nLevel - goto next level
730 // pSucc contains deletion mark for pCur
731 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed );
733 if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() )
736 if ( pSucc.bits() ) {
737 // pCur is marked, i.e. logically deleted.
738 marked_node_ptr p( pCur.ptr() );
739 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
740 memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
744 pCur->m_bUnlinked = true;
747 if ( !is_extracted( pSucc )) {
748 // We cannot free the node at this moment since RCU is locked
749 // Link deleted nodes to a chain to free later
750 link_for_remove( pos, pCur.ptr() );
751 m_Stat.onEraseWhileFind();
754 m_Stat.onExtractWhileFind();
761 nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
764 else if ( nCmp == 0 && bStopIfFound )
772 pos.pPrev[ nLevel ] = pPred;
773 pos.pSucc[ nLevel ] = pCur.ptr();
780 pos.pCur = pCur.ptr();
781 return pCur.ptr() && nCmp == 0;
784 bool find_min_position( position& pos )
786 assert( gc::is_locked() );
789 marked_node_ptr pSucc;
790 marked_node_ptr pCur;
793 pPred = m_Head.head();
795 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
797 pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed );
798 // pCur.bits() means that pPred is logically deleted
799 // head cannot be deleted
800 assert( pCur.bits() == 0 );
804 // pSucc contains deletion mark for pCur
805 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed );
807 if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() )
810 if ( pSucc.bits() ) {
811 // pCur is marked, i.e. logically deleted.
812 marked_node_ptr p( pCur.ptr() );
813 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
814 memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
818 pCur->m_bUnlinked = true;
821 if ( !is_extracted( pSucc )) {
822 // We cannot free the node at this moment since RCU is locked
823 // Link deleted nodes to a chain to free later
824 link_for_remove( pos, pCur.ptr() );
825 m_Stat.onEraseWhileFind();
828 m_Stat.onExtractWhileFind();
837 pos.pPrev[ nLevel ] = pPred;
838 pos.pSucc[ nLevel ] = pCur.ptr();
840 return (pos.pCur = pCur.ptr()) != nullptr;
843 bool find_max_position( position& pos )
845 assert( gc::is_locked() );
848 marked_node_ptr pSucc;
849 marked_node_ptr pCur;
852 pPred = m_Head.head();
854 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
857 pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed );
859 // pCur.bits() means that pPred is logically deleted
863 if ( pCur.ptr() == nullptr ) {
864 // end of the list at level nLevel - goto next level
868 // pSucc contains deletion mark for pCur
869 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed );
871 if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() )
874 if ( pSucc.bits() ) {
875 // pCur is marked, i.e. logically deleted.
876 marked_node_ptr p( pCur.ptr() );
877 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
878 memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
882 pCur->m_bUnlinked = true;
885 if ( !is_extracted( pSucc )) {
886 // We cannot free the node at this moment since RCU is locked
887 // Link deleted nodes to a chain to free later
888 link_for_remove( pos, pCur.ptr() );
889 m_Stat.onEraseWhileFind();
892 m_Stat.onExtractWhileFind();
907 pos.pPrev[ nLevel ] = pPred;
908 pos.pSucc[ nLevel ] = pCur.ptr();
911 return (pos.pCur = pCur.ptr()) != nullptr;
914 template <typename Func>
915 bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f )
917 assert( gc::is_locked() );
919 unsigned int nHeight = pNode->height();
920 pNode->clear_tower();
923 marked_node_ptr p( pos.pSucc[0] );
924 pNode->next( 0 ).store( p, memory_model::memory_order_release );
925 if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
929 pNode->m_bLinked = true;
931 cds::unref( f )( val );
934 for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) {
937 marked_node_ptr q( pos.pSucc[ nLevel ]);
938 if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) {
939 // pNode has been marked as removed while we are inserting it
942 m_Stat.onLogicDeleteWhileInsert();
946 if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) )
949 // Renew insert position
950 m_Stat.onRenewInsertPosition();
951 if ( !find_position( val, pos, key_comparator(), false )) {
952 // The node has been deleted while we are inserting it
953 m_Stat.onNotFoundWhileInsert();
961 static void link_for_remove( position& pos, node_type * pDel )
963 assert( pDel->m_pDelChain == nullptr );
965 pDel->m_pDelChain = pos.pDelChain;
966 pos.pDelChain = pDel;
969 template <typename Func>
970 bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract )
972 assert( pDel != nullptr );
973 assert( gc::is_locked() );
975 marked_node_ptr pSucc;
977 // logical deletion (marking)
978 for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) {
979 pSucc = pDel->next(nLevel).load( memory_model::memory_order_relaxed );
982 || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
989 pSucc = pDel->next(0).load( memory_model::memory_order_relaxed );
994 int const nMask = bExtract ? 3 : 1;
995 if ( pDel->next(0).compare_exchange_strong( pSucc, pSucc | nMask, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
997 cds::unref(f)( *node_traits::to_value_ptr( pDel ));
1002 for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
1003 if ( !pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( pSucc,
1004 marked_node_ptr( pDel->next(nLevel).load(memory_model::memory_order_relaxed).ptr() ),
1005 memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed) )
1008 find_position( *node_traits::to_value_ptr(pDel), pos, key_comparator(), false );
1010 m_Stat.onSlowExtract();
1012 m_Stat.onSlowErase();
1014 assert( pDel->m_bUnlinked );
1021 pDel->m_bUnlinked = true;
1024 // We cannot free the node at this moment since RCU is locked
1025 // Link deleted nodes to a chain to free later
1026 link_for_remove( pos, pDel );
1027 m_Stat.onFastErase();
1030 m_Stat.onFastExtract();
1037 enum finsd_fastpath_result {
1038 find_fastpath_found,
1039 find_fastpath_not_found,
1042 template <typename Q, typename Compare, typename Func>
1043 finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f ) const
1046 marked_node_ptr pCur;
1047 marked_node_ptr pSucc;
1048 marked_node_ptr pNull;
1052 pPred = m_Head.head();
1053 for ( int nLevel = static_cast<int>(m_nHeight.load(memory_model::memory_order_relaxed) - 1); nLevel >= 0; --nLevel ) {
1054 pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire );
1055 if ( pCur == pNull )
1058 while ( pCur != pNull ) {
1059 if ( pCur.bits() ) {
1060 // Wait until pCur is removed
1061 unsigned int nAttempt = 0;
1062 while ( pCur.bits() && nAttempt++ < 16 ) {
1064 pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire );
1068 if ( pCur.bits() ) {
1069 // Maybe, we are on deleted node sequence
1070 // Abort searching, try slow-path
1071 return find_fastpath_abort;
1076 int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
1079 pCur = pCur->next(nLevel).load( memory_model::memory_order_acquire );
1081 else if ( nCmp == 0 ) {
1083 cds::unref(f)( *node_traits::to_value_ptr( pCur.ptr() ), val );
1084 return find_fastpath_found;
1086 else // pCur > val - go down
1092 return find_fastpath_not_found;
1095 template <typename Q, typename Compare, typename Func>
1096 bool find_slowpath( Q& val, Compare cmp, Func f, position& pos )
1098 if ( find_position( val, pos, cmp, true )) {
1099 assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 );
1101 cds::unref(f)( *node_traits::to_value_ptr( pos.pCur ), val );
1108 template <typename Q, typename Compare, typename Func>
1109 bool do_find_with( Q& val, Compare cmp, Func f )
1116 switch ( find_fastpath( val, cmp, f )) {
1117 case find_fastpath_found:
1118 m_Stat.onFindFastSuccess();
1120 case find_fastpath_not_found:
1121 m_Stat.onFindFastFailed();
1127 if ( find_slowpath( val, cmp, f, pos )) {
1128 m_Stat.onFindSlowSuccess();
1132 m_Stat.onFindSlowFailed();
1141 template <typename Q, typename Compare, typename Func>
1142 bool do_erase( Q const& val, Compare cmp, Func f )
1144 check_deadlock_policy::check();
1152 if ( !find_position( val, pos, cmp, false ) ) {
1153 m_Stat.onEraseFailed();
1157 node_type * pDel = pos.pCur;
1158 assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 );
1160 unsigned int nHeight = pDel->height();
1161 if ( try_remove_at( pDel, pos, f, false )) {
1163 m_Stat.onRemoveNode( nHeight );
1164 m_Stat.onEraseSuccess();
1168 m_Stat.onEraseFailed();
1174 dispose_chain( pos );
1178 template <typename Q, typename Compare>
1179 value_type * do_extract_key( Q const& key, Compare cmp )
1181 // RCU should be locked!!!
1182 assert( gc::is_locked() );
1187 if ( !find_position( key, pos, cmp, false ) ) {
1188 m_Stat.onExtractFailed();
1193 assert( cmp( *node_traits::to_value_ptr( pDel ), key ) == 0 );
1195 unsigned int const nHeight = pDel->height();
1197 if ( try_remove_at( pDel, pos,
1198 # ifdef CDS_CXX11_LAMBDA_SUPPORT
1199 [](value_type const&) {}
1201 empty_erase_functor()
1206 m_Stat.onRemoveNode( nHeight );
1207 m_Stat.onExtractSuccess();
1210 m_Stat.onExtractFailed();
1216 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1219 template <typename ExemptPtr, typename Q>
1220 bool do_extract( ExemptPtr& result, Q const& key )
1222 check_deadlock_policy::check();
1227 value_type * pDel = do_extract_key( key, key_comparator() );
1228 bReturn = pDel != nullptr;
1237 template <typename ExemptPtr, typename Q, typename Less>
1238 bool do_extract_with( ExemptPtr& result, Q const& key, Less pred )
1240 check_deadlock_policy::check();
1245 value_type * pDel = do_extract_key( key, cds::opt::details::make_comparator_from_less<Less>() );
1246 bReturn = pDel != nullptr;
1255 node_type * do_extract_min()
1257 assert( gc::is_locked() );
1262 if ( !find_min_position( pos ) ) {
1263 m_Stat.onExtractMinFailed();
1268 unsigned int const nHeight = pDel->height();
1270 if ( try_remove_at( pDel, pos,
1271 # ifdef CDS_CXX11_LAMBDA_SUPPORT
1272 [](value_type const&) {}
1274 empty_erase_functor()
1279 m_Stat.onRemoveNode( nHeight );
1280 m_Stat.onExtractMinSuccess();
1283 m_Stat.onExtractMinFailed();
1292 template <typename ExemptPtr>
1293 bool do_extract_min( ExemptPtr& result )
1295 check_deadlock_policy::check();
1300 node_type * pDel = do_extract_min();
1301 bReturn = pDel != nullptr;
1303 result = node_traits::to_value_ptr(pDel);
1310 node_type * do_extract_max()
1312 assert( gc::is_locked() );
1317 if ( !find_max_position( pos ) ) {
1318 m_Stat.onExtractMaxFailed();
1323 unsigned int const nHeight = pDel->height();
1325 if ( try_remove_at( pDel, pos,
1326 # ifdef CDS_CXX11_LAMBDA_SUPPORT
1327 [](value_type const&) {}
1329 empty_erase_functor()
1334 m_Stat.onRemoveNode( nHeight );
1335 m_Stat.onExtractMaxSuccess();
1338 m_Stat.onExtractMaxFailed();
1347 template <typename ExemptPtr>
1348 bool do_extract_max( ExemptPtr& result )
1350 check_deadlock_policy::check();
1355 node_type * pDel = do_extract_max();
1356 bReturn = pDel != nullptr;
1358 result = node_traits::to_value_ptr(pDel);
1365 void increase_height( unsigned int nHeight )
1367 unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed );
1368 if ( nCur < nHeight )
1369 m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
1372 class deferred_list_iterator
1376 explicit deferred_list_iterator( node_type * p )
1379 deferred_list_iterator()
1383 cds::urcu::retired_ptr operator *() const
1385 return cds::urcu::retired_ptr( node_traits::to_value_ptr(pCur), dispose_node );
1390 pCur = pCur->m_pDelChain;
1393 bool operator ==( deferred_list_iterator const& i ) const
1395 return pCur == i.pCur;
1397 bool operator !=( deferred_list_iterator const& i ) const
1399 return !operator ==( i );
1403 void dispose_chain( node_type * pHead )
1405 // RCU should NOT be locked
1406 check_deadlock_policy::check();
1408 gc::batch_retire( deferred_list_iterator( pHead ), deferred_list_iterator() );
1411 void dispose_chain( position& pos )
1413 // RCU should NOT be locked
1414 check_deadlock_policy::check();
1416 // Delete local chain
1417 if ( pos.pDelChain ) {
1418 dispose_chain( pos.pDelChain );
1419 pos.pDelChain = nullptr;
1422 // Delete deferred chain
1426 void dispose_deferred()
1428 dispose_chain( m_pDeferredDelChain.exchange( nullptr, memory_model::memory_order_acq_rel ) );
1431 void defer_chain( position& pos )
1433 if ( pos.pDelChain ) {
1434 node_type * pHead = pos.pDelChain;
1435 node_type * pTail = pHead;
1436 while ( pTail->m_pDelChain )
1437 pTail = pTail->m_pDelChain;
1439 node_type * pDeferList = m_pDeferredDelChain.load( memory_model::memory_order_relaxed );
1441 pTail->m_pDelChain = pDeferList;
1442 } while ( !m_pDeferredDelChain.compare_exchange_weak( pDeferList, pHead, memory_model::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed ));
1444 pos.pDelChain = nullptr;
1451 /// Default constructor
1453 : m_Head( c_nMaxHeight )
1454 , m_nHeight( c_nMinHeight )
1455 , m_pDeferredDelChain( nullptr )
1457 static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
1459 // Barrier for head node
1460 CDS_ATOMIC::atomic_thread_fence( memory_model::memory_order_release );
1463 /// Clears and destructs the skip-list
1471 typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator;
1473 /// Const iterator type
1474 typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator;
1476 /// Returns a forward iterator addressing the first element in a set
1479 return iterator( *m_Head.head() );
1482 /// Returns a forward const iterator addressing the first element in a set
1483 const_iterator begin() const
1485 return const_iterator( *m_Head.head() );
1488 /// Returns a forward const iterator addressing the first element in a set
1489 const_iterator cbegin()
1491 return const_iterator( *m_Head.head() );
1494 /// Returns a forward iterator that addresses the location succeeding the last element in a set.
1500 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
1501 const_iterator end() const
1503 return const_iterator();
1506 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
1507 const_iterator cend()
1509 return const_iterator();
1513 /// Inserts new node
1515 The function inserts \p val in the set if it does not contain
1516 an item with key equal to \p val.
1518 The function applies RCU lock internally.
1520 Returns \p true if \p val is placed into the set, \p false otherwise.
1522 bool insert( value_type& val )
1524 # ifdef CDS_CXX11_LAMBDA_SUPPORT
1525 return insert( val, []( value_type& ) {} );
1527 return insert( val, empty_insert_functor() );
1531 /// Inserts new node
1533 This function is intended for derived non-intrusive containers.
1535 The function allows to split creating of new item into two part:
1536 - create item with key only
1537 - insert new item into the set
1538 - if inserting is success, calls \p f functor to initialize value-field of \p val.
1540 The functor signature is:
1542 void func( value_type& val );
1544 where \p val is the item inserted. User-defined functor \p f should guarantee that during changing
1545 \p val no any other changes could be made on this set's item by concurrent threads.
1546 The user-defined functor is called only if the inserting is success and may be passed by reference
1547 using <tt>boost::ref</tt>
1549 RCU \p synchronize method can be called. RCU should not be locked.
1551 template <typename Func>
1552 bool insert( value_type& val, Func f )
1554 check_deadlock_policy::check();
1560 node_type * pNode = node_traits::to_node_ptr( val );
1561 scoped_node_ptr scp( pNode );
1562 unsigned int nHeight = pNode->height();
1563 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
1564 bool bTowerMade = false;
1570 bool bFound = find_position( val, pos, key_comparator(), true );
1572 // scoped_node_ptr deletes the node tower if we create it
1576 m_Stat.onInsertFailed();
1582 build_node( pNode );
1583 nHeight = pNode->height();
1588 if ( !insert_at_position( val, pNode, pos, f )) {
1589 m_Stat.onInsertRetry();
1593 increase_height( nHeight );
1595 m_Stat.onAddNode( nHeight );
1596 m_Stat.onInsertSuccess();
1603 dispose_chain( pos );
1608 /// Ensures that the \p val exists in the set
1610 The operation performs inserting or changing data with lock-free manner.
1612 If the item \p val is not found in the set, then \p val is inserted into the set.
1613 Otherwise, the functor \p func is called with item found.
1614 The functor signature is:
1616 void func( bool bNew, value_type& item, value_type& val );
1619 - \p bNew - \p true if the item has been inserted, \p false otherwise
1620 - \p item - item of the set
1621 - \p val - argument \p val passed into the \p ensure function
1622 If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
1623 refer to the same thing.
1625 The functor can change non-key fields of the \p item; however, \p func must guarantee
1626 that during changing no any other modifications could be made on this item by concurrent threads.
1628 You can pass \p func argument by value or by reference using <tt>boost::ref</tt> or cds::ref.
1630 RCU \p synchronize method can be called. RCU should not be locked.
1632 Returns std::pair<bool, bool> where \p first is \p true if operation is successfull,
1633 \p second is \p true if new item has been added or \p false if the item with \p key
1634 already is in the set.
1636 template <typename Func>
1637 std::pair<bool, bool> ensure( value_type& val, Func func )
1639 check_deadlock_policy::check();
1642 std::pair<bool, bool> bRet( true, false );
1645 node_type * pNode = node_traits::to_node_ptr( val );
1646 scoped_node_ptr scp( pNode );
1647 unsigned int nHeight = pNode->height();
1648 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
1649 bool bTowerMade = false;
1651 # ifndef CDS_CXX11_LAMBDA_SUPPORT
1652 insert_at_ensure_functor<Func> wrapper( func );
1658 bool bFound = find_position( val, pos, key_comparator(), true );
1660 // scoped_node_ptr deletes the node tower if we create it before
1664 cds::unref(func)( false, *node_traits::to_value_ptr(pos.pCur), val );
1665 m_Stat.onEnsureExist();
1670 build_node( pNode );
1671 nHeight = pNode->height();
1676 # ifdef CDS_CXX11_LAMBDA_SUPPORT
1677 if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { cds::unref(func)( true, item, item ); }))
1679 if ( !insert_at_position( val, pNode, pos, cds::ref(wrapper) ))
1682 m_Stat.onInsertRetry();
1686 increase_height( nHeight );
1689 m_Stat.onAddNode( nHeight );
1690 m_Stat.onEnsureNew();
1696 dispose_chain( pos );
1701 /// Unlinks the item \p val from the set
1703 The function searches the item \p val in the set and unlink it from the set
1704 if it is found and is equal to \p val.
1706 Difference between \ref erase and \p unlink functions: \p erase finds <i>a key</i>
1707 and deletes the item found. \p unlink finds an item by key and deletes it
1708 only if \p val is an item of that set, i.e. the pointer to item found
1709 is equal to <tt> &val </tt>.
1711 RCU \p synchronize method can be called. RCU should not be locked.
1713 The \ref disposer specified in \p Traits class template parameter is called
1714 by garbage collector \p GC asynchronously.
1716 The function returns \p true if success and \p false otherwise.
1718 bool unlink( value_type& val )
1720 check_deadlock_policy::check();
1728 if ( !find_position( val, pos, key_comparator(), false ) ) {
1729 m_Stat.onUnlinkFailed();
1733 node_type * pDel = pos.pCur;
1734 assert( key_comparator()( *node_traits::to_value_ptr( pDel ), val ) == 0 );
1736 unsigned int nHeight = pDel->height();
1738 if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos,
1739 # ifdef CDS_CXX11_LAMBDA_SUPPORT
1740 [](value_type const&) {}
1742 empty_erase_functor()
1747 m_Stat.onRemoveNode( nHeight );
1748 m_Stat.onUnlinkSuccess();
1752 m_Stat.onUnlinkFailed();
1758 dispose_chain( pos );
1763 /// Extracts the item from the set with specified \p key
1764 /** \anchor cds_intrusive_SkipListSet_rcu_extract
1765 The function searches an item with key equal to \p key in the set,
1766 unlinks it from the set, places it to \p result parameter, and returns \p true.
1767 If the item with key equal to \p key is not found the function returns \p false.
1769 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1771 RCU \p synchronize method can be called. RCU should NOT be locked.
1772 The function does not call the disposer for the item found.
1773 The disposer will be implicitly invoked when \p result object is destroyed or when
1774 <tt>result.release()</tt> is called, see cds::urcu::exempt_ptr for explanation.
1775 @note Before reusing \p result object you should call its \p release() method.
1778 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1782 typename skip_list::exempt_ptr ep;
1783 if ( theList.extract( ep, 5 ) ) {
1787 // Dispose returned item.
1792 template <typename Q>
1793 bool extract( exempt_ptr& result, Q const& key )
1795 return do_extract( result, key );
1798 /// Extracts the item from the set with comparing functor \p pred
1800 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_extract "extract(exempt_ptr&, Q const&)"
1801 but \p pred predicate is used for key comparing.
1802 \p Less has the interface like \p std::less.
1803 \p pred must imply the same element order as the comparator used for building the set.
1805 template <typename Q, typename Less>
1806 bool extract_with( exempt_ptr& result, Q const& key, Less pred )
1808 return do_extract_with( result, key, pred );
1811 /// Extracts an item with minimal key from the list
1813 The function searches an item with minimal key, unlinks it, and returns the item found in \p result parameter.
1814 If the skip-list is empty the function returns \p false.
1816 RCU \p synchronize method can be called. RCU should NOT be locked.
1817 The function does not call the disposer for the item found.
1818 The disposer will be implicitly invoked when \p result object is destroyed or when
1819 <tt>result.release()</tt> is called, see cds::urcu::exempt_ptr for explanation.
1820 @note Before reusing \p result object you should call its \p release() method.
1823 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1827 typename skip_list::exempt_ptr ep;
1828 if ( theList.extract_min(ep)) {
1832 // Dispose returned item.
1837 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> minimum key.
1838 It means that the function gets leftmost item and tries to unlink it.
1839 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
1840 So, the function returns the item with minimum key at the moment of list traversing.
1842 bool extract_min( exempt_ptr& result )
1844 return do_extract_min( result );
1847 /// Extracts an item with maximal key from the list
1849 The function searches an item with maximal key, unlinks it, and returns the item found in \p result parameter.
1850 If the skip-list is empty the function returns \p false.
1852 RCU \p synchronize method can be called. RCU should NOT be locked.
1853 The function does not call the disposer for the item found.
1854 The disposer will be implicitly invoked when \p result object is destroyed or when
1855 <tt>result.release()</tt> is called, see cds::urcu::exempt_ptr for explanation.
1856 @note Before reusing \p result object you should call its \p release() method.
1859 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1863 typename skip_list::exempt_ptr ep;
1864 if ( theList.extract_max(ep) ) {
1867 // Dispose returned item.
1872 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> maximal key.
1873 It means that the function gets rightmost item and tries to unlink it.
1874 During unlinking, a concurrent thread can insert an item with key greater than rightmost item's key.
1875 So, the function returns the item with maximum key at the moment of list traversing.
1877 bool extract_max( exempt_ptr& result )
1879 return do_extract_max( result );
1882 /// Deletes the item from the set
1883 /** \anchor cds_intrusive_SkipListSet_rcu_erase
1884 The function searches an item with key equal to \p val in the set,
1885 unlinks it from the set, and returns \p true.
1886 If the item with key equal to \p val is not found the function return \p false.
1888 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1890 RCU \p synchronize method can be called. RCU should not be locked.
1892 template <typename Q>
1893 bool erase( const Q& val )
1895 # ifdef CDS_CXX11_LAMBDA_SUPPORT
1896 return do_erase( val, key_comparator(), [](value_type const&) {} );
1898 return do_erase( val, key_comparator(), empty_erase_functor() );
1902 /// Delete the item from the set with comparing functor \p pred
1904 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase "erase(Q const&)"
1905 but \p pred predicate is used for key comparing.
1906 \p Less has the interface like \p std::less.
1907 \p pred must imply the same element order as the comparator used for building the set.
1909 template <typename Q, typename Less>
1910 bool erase_with( const Q& val, Less pred )
1912 # ifdef CDS_CXX11_LAMBDA_SUPPORT
1913 return do_erase( val, cds::opt::details::make_comparator_from_less<Less>(), [](value_type const&) {} );
1915 return do_erase( val, cds::opt::details::make_comparator_from_less<Less>(), empty_erase_functor() );
1919 /// Deletes the item from the set
1920 /** \anchor cds_intrusive_SkipListSet_rcu_erase_func
1921 The function searches an item with key equal to \p val in the set,
1922 call \p f functor with item found, unlinks it from the set, and returns \p true.
1923 The \ref disposer specified in \p Traits class template parameter is called
1924 by garbage collector \p GC asynchronously.
1926 The \p Func interface is
1929 void operator()( value_type const& item );
1932 The functor can be passed by reference with <tt>boost:ref</tt>
1934 If the item with key equal to \p val is not found the function return \p false.
1936 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1938 RCU \p synchronize method can be called. RCU should not be locked.
1940 template <typename Q, typename Func>
1941 bool erase( Q const& val, Func f )
1943 return do_erase( val, key_comparator(), f );
1946 /// Delete the item from the set with comparing functor \p pred
1948 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase_func "erase(Q const&, Func)"
1949 but \p pred predicate is used for key comparing.
1950 \p Less has the interface like \p std::less.
1951 \p pred must imply the same element order as the comparator used for building the set.
1953 template <typename Q, typename Less, typename Func>
1954 bool erase_with( Q const& val, Less pred, Func f )
1956 return do_erase( val, cds::opt::details::make_comparator_from_less<Less>(), f );
1959 /// Finds the key \p val
1960 /** @anchor cds_intrusive_SkipListSet_rcu_find_func
1961 The function searches the item with key equal to \p val and calls the functor \p f for item found.
1962 The interface of \p Func functor is:
1965 void operator()( value_type& item, Q& val );
1968 where \p item is the item found, \p val is the <tt>find</tt> function argument.
1970 You can pass \p f argument by value or by reference using <tt>boost::ref</tt> or cds::ref.
1972 The functor can change non-key fields of \p item. Note that the functor is only guarantee
1973 that \p item cannot be disposed during functor is executing.
1974 The functor does not serialize simultaneous access to the set \p item. If such access is
1975 possible you must provide your own synchronization schema on item level to exclude unsafe item modifications.
1977 The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor
1978 can modify both arguments.
1980 The function applies RCU lock internally.
1982 The function returns \p true if \p val is found, \p false otherwise.
1984 template <typename Q, typename Func>
1985 bool find( Q& val, Func f )
1987 return do_find_with( val, key_comparator(), f );
1990 /// Finds the key \p val with comparing functor \p pred
1992 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_func "find(Q&, Func)"
1993 but \p cmp is used for key comparison.
1994 \p Less functor has the interface like \p std::less.
1995 \p cmp must imply the same element order as the comparator used for building the set.
1997 template <typename Q, typename Less, typename Func>
1998 bool find_with( Q& val, Less pred, Func f )
2000 return do_find_with( val, cds::opt::details::make_comparator_from_less<Less>(), f );
2003 /// Finds the key \p val
2004 /** @anchor cds_intrusive_SkipListSet_rcu_find_cfunc
2005 The function searches the item with key equal to \p val and calls the functor \p f for item found.
2006 The interface of \p Func functor is:
2009 void operator()( value_type& item, Q const& val );
2012 where \p item is the item found, \p val is the <tt>find</tt> function argument.
2014 You can pass \p f argument by value or by reference using <tt>boost::ref</tt> or cds::ref.
2016 The functor can change non-key fields of \p item. Note that the functor is only guarantee
2017 that \p item cannot be disposed during functor is executing.
2018 The functor does not serialize simultaneous access to the set \p item. If such access is
2019 possible you must provide your own synchronization schema on item level to exclude unsafe item modifications.
2021 The function applies RCU lock internally.
2023 The function returns \p true if \p val is found, \p false otherwise.
2025 template <typename Q, typename Func>
2026 bool find( Q const& val, Func f )
2028 return do_find_with( val, key_comparator(), f );
2031 /// Finds the key \p val with comparing functor \p pred
2033 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_cfunc "find(Q const&, Func)"
2034 but \p cmp is used for key comparison.
2035 \p Less functor has the interface like \p std::less.
2036 \p cmp must imply the same element order as the comparator used for building the set.
2038 template <typename Q, typename Less, typename Func>
2039 bool find_with( Q const& val, Less pred, Func f )
2041 return do_find_with( val, cds::opt::details::make_comparator_from_less<Less>(), f );
2044 /// Finds the key \p val
2045 /** @anchor cds_intrusive_SkipListSet_rcu_find_val
2046 The function searches the item with key equal to \p val
2047 and returns \p true if it is found, and \p false otherwise.
2049 The function applies RCU lock internally.
2051 template <typename Q>
2052 bool find( Q const& val )
2054 # ifdef CDS_CXX11_LAMBDA_SUPPORT
2055 return do_find_with( val, key_comparator(), [](value_type& , Q const& ) {} );
2057 return do_find_with( val, key_comparator(), empty_find_functor() );
2061 /// Finds the key \p val with comparing functor \p pred
2063 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_val "find(Q const&)"
2064 but \p pred is used for key compare.
2065 \p Less functor has the interface like \p std::less.
2066 \p pred must imply the same element order as the comparator used for building the set.
2068 template <typename Q, typename Less>
2069 bool find_with( Q const& val, Less pred )
2071 return do_find_with( val, cds::opt::details::make_comparator_from_less<Less>(),
2072 # ifdef CDS_CXX11_LAMBDA_SUPPORT
2073 [](value_type& , Q const& ) {}
2075 empty_find_functor()
2080 /// Finds the key \p val and return the item found
2081 /** \anchor cds_intrusive_SkipListSet_rcu_get
2082 The function searches the item with key equal to \p val and returns the pointer to item found.
2083 If \p val is not found it returns \p NULL.
2085 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
2087 RCU should be locked before call of this function.
2088 Returned item is valid only while RCU is locked:
2090 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
2095 skip_list::rcu_lock lock;
2097 foo * pVal = theList.get( 5 );
2103 // Unlock RCU by rcu_lock destructor
2104 // pVal can be retired by disposer at any time after RCU has been unlocked
2107 After RCU unlocking the \p %force_dispose member function can be called manually,
2108 see \ref force_dispose for explanation.
2110 template <typename Q>
2111 value_type * get( Q const& val )
2113 assert( gc::is_locked());
2115 # ifdef CDS_CXX11_LAMBDA_SUPPORT
2116 value_type * pFound;
2117 return do_find_with( val, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; } )
2121 return do_find_with( val, key_comparator(), cds::ref(gf) )
2122 ? gf.pFound : nullptr;
2126 /// Finds the key \p val and return the item found
2128 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_get "get(Q const&)"
2129 but \p pred is used for comparing the keys.
2131 \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q
2133 \p pred must imply the same element order as the comparator used for building the set.
2135 template <typename Q, typename Less>
2136 value_type * get_with( Q const& val, Less pred )
2138 assert( gc::is_locked());
2140 # ifdef CDS_CXX11_LAMBDA_SUPPORT
2141 value_type * pFound;
2142 return do_find_with( val, cds::opt::details::make_comparator_from_less<Less>(),
2143 [&pFound](value_type& found, Q const& ) { pFound = &found; } )
2147 return do_find_with( val, cds::opt::details::make_comparator_from_less<Less>(), cds::ref(gf) )
2148 ? gf.pFound : nullptr;
2152 /// Returns item count in the set
2154 The value returned depends on item counter type provided by \p Traits template parameter.
2155 If it is atomicity::empty_item_counter this function always returns 0.
2156 Therefore, the function is not suitable for checking the set emptiness, use \ref empty
2157 member function for this purpose.
2161 return m_ItemCounter;
2164 /// Checks if the set is empty
2167 return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr;
2170 /// Clears the set (non-atomic)
2172 The function unlink all items from the set.
2173 The function is not atomic, thus, in multi-threaded environment with parallel insertions
2177 assert( set.empty() );
2179 the assertion could be raised.
2181 For each item the \ref disposer will be called automatically after unlinking.
2186 while ( extract_min(ep) )
2190 /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32.
2191 static CDS_CONSTEXPR unsigned int max_height() CDS_NOEXCEPT
2193 return c_nMaxHeight;
2196 /// Returns const reference to internal statistics
2197 stat const& statistics() const
2202 /// Clears internal list of ready-to-remove items passing it to RCU reclamation cycle
2203 /** @anchor cds_intrusive_SkipListSet_rcu_force_dispose
2204 Skip list has complex multi-step algorithm for removing an item. In fact, when you
2205 remove the item it is just marked as removed that is enough for the success of your operation.
2206 Actual removing can take place in the future, in another call or even in another thread.
2207 Inside RCU lock the removed item cannot be passed to RCU reclamation cycle
2208 since it can lead to deadlock. To solve this problem, the current skip list implementation
2209 has internal list of items which is ready to remove but is not yet passed to RCU reclamation.
2210 Usually, this list will be passed to RCU reclamation in the next suitable call of skip list member function.
2211 In some cases we want to pass it to RCU reclamation immediately after RCU unlocking.
2212 This function provides such opportunity: it checks whether the RCU is not locked and if it is true
2213 the function passes the internal ready-to-remove list to RCU reclamation cycle.
2215 The RCU \p synchronize can be called.
2217 void force_dispose()
2219 if ( !gc::is_locked() )
2224 }} // namespace cds::intrusive
2227 #endif // #ifndef __CDS_INTRUSIVE_SKIP_LIST_RCU_H