2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H
32 #define CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H
34 #include <type_traits>
36 #include <cds/intrusive/details/skip_list_base.h>
37 #include <cds/opt/compare.h>
38 #include <cds/urcu/details/check_deadlock.h>
39 #include <cds/details/binary_functor_wrapper.h>
40 #include <cds/urcu/exempt_ptr.h>
41 #include <cds/urcu/raw_ptr.h>
42 #include <cds/intrusive/details/raw_ptr_disposer.h>
44 namespace cds { namespace intrusive {
49 template <class RCU, typename Tag>
50 class node< cds::urcu::gc< RCU >, Tag >
53 typedef cds::urcu::gc< RCU > gc; ///< Garbage collector
54 typedef Tag tag ; ///< tag
57 // bit 0 - the item is logically deleted
58 // bit 1 - the item is extracted (only for level 0)
59 typedef cds::details::marked_ptr<node, 3> marked_ptr; ///< marked pointer
60 typedef atomics::atomic< marked_ptr > atomic_marked_ptr; ///< atomic marked pointer
61 typedef atomic_marked_ptr tower_item_type;
64 atomic_marked_ptr m_pNext; ///< Next item in bottom-list (list at level 0)
66 node * m_pDelChain; ///< Deleted node chain (local for a thread)
68 bool volatile m_bLinked;
69 bool volatile m_bUnlinked;
72 unsigned int m_nHeight; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1.
73 atomic_marked_ptr * m_arrNext; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr
76 /// Constructs a node of height 1 (a bottom-list node)
79 , m_pDelChain( nullptr )
82 , m_bUnlinked( false )
85 , m_arrNext( nullptr )
91 assert( !m_bLinked || m_bUnlinked );
95 /// Constructs a node of height \p nHeight
96 void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower )
98 assert( nHeight > 0 );
99 assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node
100 || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0
103 m_arrNext = nextTower;
107 atomic_marked_ptr * release_tower()
109 atomic_marked_ptr * pTower = m_arrNext;
115 atomic_marked_ptr * get_tower() const
122 for ( unsigned int nLevel = 1; nLevel < m_nHeight; ++nLevel )
123 next(nLevel).store( marked_ptr(), atomics::memory_order_relaxed );
126 /// Access to element of next pointer array
127 atomic_marked_ptr& next( unsigned int nLevel )
129 assert( nLevel < height() );
130 assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr) );
132 # ifdef CDS_THREAD_SANITIZER_ENABLED
133 // TSan false positive: m_arrNext is read-only array
134 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
135 atomic_marked_ptr& r = nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
136 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
139 return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
143 /// Access to element of next pointer array (const version)
144 atomic_marked_ptr const& next( unsigned int nLevel ) const
146 assert( nLevel < height() );
147 assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr );
149 # ifdef CDS_THREAD_SANITIZER_ENABLED
150 // TSan false positive: m_arrNext is read-only array
151 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
152 atomic_marked_ptr& r = nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
153 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
156 return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
160 /// Access to element of next pointer array (same as \ref next function)
161 atomic_marked_ptr& operator[]( unsigned int nLevel )
163 return next( nLevel );
166 /// Access to element of next pointer array (same as \ref next function)
167 atomic_marked_ptr const& operator[]( unsigned int nLevel ) const
169 return next( nLevel );
172 /// Height of the node
173 unsigned int height() const
178 /// Clears internal links
181 assert( m_arrNext == nullptr );
182 m_pNext.store( marked_ptr(), atomics::memory_order_release );
183 m_pDelChain = nullptr;
186 bool is_cleared() const
188 return m_pNext == atomic_marked_ptr()
189 && m_arrNext == nullptr
193 } // namespace skip_list
197 namespace skip_list { namespace details {
199 template <class RCU, typename NodeTraits, typename BackOff, bool IsConst>
200 class iterator< cds::urcu::gc< RCU >, NodeTraits, BackOff, IsConst >
203 typedef cds::urcu::gc< RCU > gc;
204 typedef NodeTraits node_traits;
205 typedef BackOff back_off;
206 typedef typename node_traits::node_type node_type;
207 typedef typename node_traits::value_type value_type;
208 static bool const c_isConst = IsConst;
210 typedef typename std::conditional< c_isConst, value_type const &, value_type &>::type value_ref;
213 typedef typename node_type::marked_ptr marked_ptr;
214 typedef typename node_type::atomic_marked_ptr atomic_marked_ptr;
221 // RCU should be locked before iterating!!!
222 assert( gc::is_locked() );
227 if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
228 // Current node is marked as deleted. So, its next pointer can point to anything
229 // In this case we interrupt our iteration and returns end() iterator.
234 marked_ptr p = m_pNode->next(0).load( atomics::memory_order_relaxed );
235 node_type * pp = p.ptr();
237 // p is marked as deleted. Spin waiting for physical removal
241 else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits() ) {
242 // p is marked as deleted. Spin waiting for physical removal
252 public: // for internal use only!!!
253 iterator( node_type& refHead )
256 // RCU should be locked before iterating!!!
257 assert( gc::is_locked() );
262 marked_ptr p = refHead.next(0).load( atomics::memory_order_relaxed );
268 node_type * pp = p.ptr();
269 // Logically deleted node is marked from highest level
270 if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
283 // RCU should be locked before iterating!!!
284 assert( gc::is_locked() );
287 iterator( iterator const& s)
288 : m_pNode( s.m_pNode )
290 // RCU should be locked before iterating!!!
291 assert( gc::is_locked() );
294 value_type * operator ->() const
296 assert( m_pNode != nullptr );
297 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
299 return node_traits::to_value_ptr( m_pNode );
302 value_ref operator *() const
304 assert( m_pNode != nullptr );
305 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
307 return *node_traits::to_value_ptr( m_pNode );
311 iterator& operator ++()
317 iterator& operator = (const iterator& src)
319 m_pNode = src.m_pNode;
323 template <typename Bkoff, bool C>
324 bool operator ==(iterator<gc, node_traits, Bkoff, C> const& i ) const
326 return m_pNode == i.m_pNode;
328 template <typename Bkoff, bool C>
329 bool operator !=(iterator<gc, node_traits, Bkoff, C> const& i ) const
331 return !( *this == i );
334 }} // namespace skip_list::details
337 /// Lock-free skip-list set (template specialization for \ref cds_urcu_desc "RCU")
338 /** @ingroup cds_intrusive_map
339 @anchor cds_intrusive_SkipListSet_rcu
341 The implementation of well-known probabilistic data structure called skip-list
342 invented by W.Pugh in his papers:
343 - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees
344 - [1990] W.Pugh A Skip List Cookbook
346 A skip-list is a probabilistic data structure that provides expected logarithmic
347 time search without the need of rebalance. The skip-list is a collection of sorted
348 linked list. Nodes are ordered by key. Each node is linked into a subset of the lists.
349 Each list has a level, ranging from 0 to 32. The bottom-level list contains
350 all the nodes, and each higher-level list is a sublist of the lower-level lists.
351 Each node is created with a random top level (with a random height), and belongs
352 to all lists up to that level. The probability that a node has the height 1 is 1/2.
353 The probability that a node has the height N is 1/2 ** N (more precisely,
354 the distribution depends on an random generator provided, but our generators
357 The lock-free variant of skip-list is implemented according to book
358 - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming",
359 chapter 14.4 "A Lock-Free Concurrent Skiplist".
361 <b>Template arguments</b>:
362 - \p RCU - one of \ref cds_urcu_gc "RCU type"
363 - \p T - type to be stored in the list. The type must be based on \p skip_list::node (for \p skip_list::base_hook)
364 or it must have a member of type \p skip_list::node (for \p skip_list::member_hook).
365 - \p Traits - set traits, default is \p skip_list::traits
366 It is possible to declare option-based list with \p cds::intrusive::skip_list::make_traits metafunction
367 instead of \p Traits template argument.
369 @note Before including <tt><cds/intrusive/skip_list_rcu.h></tt> you should include appropriate RCU header file,
370 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
374 The class supports a forward iterator (\ref iterator and \ref const_iterator).
375 The iteration is ordered.
377 You may iterate over skip-list set items only under RCU lock.
378 Only in this case the iterator is thread-safe since
379 while RCU is locked any set's item cannot be reclaimed.
381 @note The requirement of RCU lock during iterating means that any type of modification of the skip list
382 (i.e. inserting, erasing and so on) is not possible.
384 @warning The iterator object cannot be passed between threads.
386 Example how to use skip-list set iterators:
388 // First, you should include the header for RCU type you have chosen
389 #include <cds/urcu/general_buffered.h>
390 #include <cds/intrusive/skip_list_rcu.h>
392 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
398 // Traits for your skip-list.
399 // At least, you should define cds::opt::less or cds::opt::compare for Foo struct
400 struct my_traits: public cds::intrusive::skip_list::traits
404 typedef cds::intrusive::SkipListSet< rcu_type, Foo, my_traits > my_skiplist_set;
406 my_skiplist_set theSet;
412 // Apply RCU locking manually
413 typename rcu_type::scoped_lock sl;
415 for ( auto it = theList.begin(); it != theList.end(); ++it ) {
419 // rcu_type::scoped_lock destructor releases RCU lock implicitly
423 The iterator class supports the following minimalistic interface:
430 iterator( iterator const& s);
432 value_type * operator ->() const;
433 value_type& operator *() const;
436 iterator& operator ++();
439 iterator& operator = (const iterator& src);
441 bool operator ==(iterator const& i ) const;
442 bool operator !=(iterator const& i ) const;
445 Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced.
449 You should incorporate skip_list::node into your struct \p T and provide
450 appropriate skip_list::traits::hook in your \p Traits template parameters. Usually, for \p Traits you
451 define a struct based on \p skip_list::traits.
453 Example for <tt>cds::urcu::general_buffered<></tt> RCU and base hook:
455 // First, you should include the header for RCU type you have chosen
456 #include <cds/urcu/general_buffered.h>
458 // Include RCU skip-list specialization
459 #include <cds/intrusive/skip_list_rcu.h>
462 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
464 // Data stored in skip list
465 struct my_data: public cds::intrusive::skip_list::node< rcu_type >
474 // my_data compare functor
476 int operator()( const my_data& d1, const my_data& d2 )
478 return d1.strKey.compare( d2.strKey );
481 int operator()( const my_data& d, const std::string& s )
483 return d.strKey.compare(s);
486 int operator()( const std::string& s, const my_data& d )
488 return s.compare( d.strKey );
493 struct my_traits: public cds::intrusive::skip_list::traits
495 typedef cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > hook;
496 typedef my_data_cmp compare;
499 // Declare skip-list set type
500 typedef cds::intrusive::SkipListSet< rcu_type, my_data, my_traits > traits_based_set;
503 Equivalent option-based code:
505 #include <cds/urcu/general_buffered.h>
506 #include <cds/intrusive/skip_list_rcu.h>
508 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
517 // Declare option-based skip-list set
518 typedef cds::intrusive::SkipListSet< rcu_type
520 , typename cds::intrusive::skip_list::make_traits<
521 cds::intrusive::opt::hook< cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > >
522 ,cds::intrusive::opt::compare< my_data_cmp >
531 #ifdef CDS_DOXYGEN_INVOKED
532 ,typename Traits = skip_list::traits
537 class SkipListSet< cds::urcu::gc< RCU >, T, Traits >
540 typedef cds::urcu::gc< RCU > gc; ///< Garbage collector
541 typedef T value_type; ///< type of value stored in the skip-list
542 typedef Traits traits; ///< Traits template parameter
544 typedef typename traits::hook hook; ///< hook type
545 typedef typename hook::node_type node_type; ///< node type
547 # ifdef CDS_DOXYGEN_INVOKED
548 typedef implementation_defined key_comparator ; ///< key comparison functor based on \p Traits::compare and \p Traits::less
550 typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator;
553 typedef typename traits::disposer disposer; ///< disposer
554 typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits
556 typedef typename traits::item_counter item_counter; ///< Item counting policy used
557 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
558 typedef typename traits::random_level_generator random_level_generator; ///< random level generator
559 typedef typename traits::allocator allocator_type; ///< allocator for maintaining array of next pointers of the node
560 typedef typename traits::back_off back_off; ///< Back-off strategy
561 typedef typename traits::stat stat; ///< internal statistics type
562 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
563 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
564 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking
567 /// Max node height. The actual node height should be in range <tt>[0 .. c_nMaxHeight)</tt>
569 The max height is specified by \ref skip_list::random_level_generator "random level generator" constant \p m_nUpperBound
570 but it should be no more than 32 (\ref skip_list::c_nHeightLimit).
572 static unsigned int const c_nMaxHeight = std::conditional<
573 (random_level_generator::c_nUpperBound <= skip_list::c_nHeightLimit),
574 std::integral_constant< unsigned int, random_level_generator::c_nUpperBound >,
575 std::integral_constant< unsigned int, skip_list::c_nHeightLimit >
579 static unsigned int const c_nMinHeight = 5;
583 typedef typename node_type::atomic_marked_ptr atomic_node_ptr ; ///< Atomic marked node pointer
584 typedef typename node_type::marked_ptr marked_node_ptr ; ///< Node marked pointer
588 typedef skip_list::details::intrusive_node_builder< node_type, atomic_node_ptr, allocator_type > intrusive_node_builder;
590 typedef typename std::conditional<
591 std::is_same< typename traits::internal_node_builder, cds::opt::none >::value
592 ,intrusive_node_builder
593 ,typename traits::internal_node_builder
594 >::type node_builder;
596 typedef std::unique_ptr< node_type, typename node_builder::node_disposer > scoped_node_ptr;
598 static void dispose_node( value_type * pVal )
602 typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal) );
608 void operator()( value_type * pVal )
610 dispose_node( pVal );
614 static void dispose_chain( node_type * pChain )
617 assert( !gc::is_locked() );
619 auto f = [&pChain]() -> cds::urcu::retired_ptr {
620 node_type * p = pChain;
622 pChain = p->m_pDelChain;
623 return cds::urcu::make_retired_ptr<node_disposer>( node_traits::to_value_ptr( p ));
625 return cds::urcu::make_retired_ptr<node_disposer>( static_cast<value_type *>(nullptr));
627 gc::batch_retire(std::ref(f));
632 node_type * pPrev[ c_nMaxHeight ];
633 node_type * pSucc[ c_nMaxHeight ];
634 node_type * pNext[ c_nMaxHeight ];
637 node_type * pDelChain;
640 : pDelChain( nullptr )
645 dispose_chain( pDelChain );
648 void dispose( node_type * p )
650 assert( p != nullptr );
651 assert( p->m_pDelChain == nullptr );
653 p->m_pDelChain = pDelChain;
658 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy;
662 skip_list::details::head_node< node_type > m_Head; ///< head tower (max height)
664 item_counter m_ItemCounter; ///< item counter
665 random_level_generator m_RandomLevelGen; ///< random level generator instance
666 atomics::atomic<unsigned int> m_nHeight; ///< estimated high level
667 atomics::atomic<node_type *> m_pDeferredDelChain ; ///< Deferred deleted node chain
668 mutable stat m_Stat; ///< internal statistics
672 unsigned int random_level()
674 // Random generator produces a number from range [0..31]
675 // We need a number from range [1..32]
676 return m_RandomLevelGen() + 1;
679 template <typename Q>
680 node_type * build_node( Q v )
682 return node_builder::make_tower( v, m_RandomLevelGen );
687 using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, node_disposer, void >; ///< pointer to extracted node
691 struct chain_disposer {
692 void operator()( node_type * pChain ) const
694 dispose_chain( pChain );
697 typedef cds::intrusive::details::raw_ptr_disposer< gc, node_type, chain_disposer> raw_ptr_disposer;
701 /// Result of \p get(), \p get_with() functions - pointer to the node found
702 typedef cds::urcu::raw_ptr< gc, value_type, raw_ptr_disposer > raw_ptr;
707 bool is_extracted( marked_node_ptr const p ) const
709 return (p.bits() & 2) != 0;
712 template <typename Q, typename Compare >
713 bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound )
715 assert( gc::is_locked() );
718 marked_node_ptr pSucc;
719 marked_node_ptr pCur;
723 pPred = m_Head.head();
725 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
728 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
730 // pCur.bits() means that pPred is logically deleted
734 if ( pCur.ptr() == nullptr ) {
735 // end of the list at level nLevel - goto next level
739 // pSucc contains deletion mark for pCur
740 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
742 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
745 if ( pSucc.bits() ) {
746 // pCur is marked, i.e. logically deleted.
747 marked_node_ptr p( pCur.ptr() );
750 pCur->m_bUnlinked = true;
752 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
753 memory_model::memory_order_release, atomics::memory_order_relaxed ))
756 if ( !is_extracted( pSucc )) {
757 // We cannot free the node at this moment since RCU is locked
758 // Link deleted nodes to a chain to free later
759 pos.dispose( pCur.ptr() );
760 m_Stat.onEraseWhileFind();
763 m_Stat.onExtractWhileFind();
770 nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
773 else if ( nCmp == 0 && bStopIfFound )
781 pos.pPrev[ nLevel ] = pPred;
782 pos.pSucc[ nLevel ] = pCur.ptr();
789 pos.pCur = pCur.ptr();
790 return pCur.ptr() && nCmp == 0;
793 bool find_min_position( position& pos )
795 assert( gc::is_locked() );
798 marked_node_ptr pSucc;
799 marked_node_ptr pCur;
802 pPred = m_Head.head();
804 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
806 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
807 // pCur.bits() means that pPred is logically deleted
808 // head cannot be deleted
809 assert( pCur.bits() == 0 );
813 // pSucc contains deletion mark for pCur
814 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
816 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
819 if ( pSucc.bits() ) {
820 // pCur is marked, i.e. logically deleted.
823 pCur->m_bUnlinked = true;
825 marked_node_ptr p( pCur.ptr() );
826 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
827 memory_model::memory_order_release, atomics::memory_order_relaxed ))
830 if ( !is_extracted( pSucc )) {
831 // We cannot free the node at this moment since RCU is locked
832 // Link deleted nodes to a chain to free later
833 pos.dispose( pCur.ptr() );
834 m_Stat.onEraseWhileFind();
837 m_Stat.onExtractWhileFind();
846 pos.pPrev[ nLevel ] = pPred;
847 pos.pSucc[ nLevel ] = pCur.ptr();
849 return (pos.pCur = pCur.ptr()) != nullptr;
852 bool find_max_position( position& pos )
854 assert( gc::is_locked() );
857 marked_node_ptr pSucc;
858 marked_node_ptr pCur;
861 pPred = m_Head.head();
863 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
866 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
868 // pCur.bits() means that pPred is logically deleted
872 if ( pCur.ptr() == nullptr ) {
873 // end of the list at level nLevel - goto next level
877 // pSucc contains deletion mark for pCur
878 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
880 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
883 if ( pSucc.bits() ) {
884 // pCur is marked, i.e. logically deleted.
887 pCur->m_bUnlinked = true;
889 marked_node_ptr p( pCur.ptr() );
890 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
891 memory_model::memory_order_release, atomics::memory_order_relaxed ))
894 if ( !is_extracted( pSucc )) {
895 // We cannot free the node at this moment since RCU is locked
896 // Link deleted nodes to a chain to free later
897 pos.dispose( pCur.ptr() );
898 m_Stat.onEraseWhileFind();
901 m_Stat.onExtractWhileFind();
916 pos.pPrev[ nLevel ] = pPred;
917 pos.pSucc[ nLevel ] = pCur.ptr();
920 return (pos.pCur = pCur.ptr()) != nullptr;
923 template <typename Func>
924 bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f )
926 assert( gc::is_locked() );
928 unsigned int nHeight = pNode->height();
929 pNode->clear_tower();
932 marked_node_ptr p( pos.pSucc[0] );
933 pNode->next( 0 ).store( p, memory_model::memory_order_release );
934 if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed )) {
938 pNode->m_bLinked = true;
943 for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) {
946 marked_node_ptr q( pos.pSucc[ nLevel ]);
947 if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
948 // pNode has been marked as removed while we are inserting it
951 m_Stat.onLogicDeleteWhileInsert();
955 if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ) )
958 // Renew insert position
959 m_Stat.onRenewInsertPosition();
960 if ( !find_position( val, pos, key_comparator(), false )) {
961 // The node has been deleted while we are inserting it
962 m_Stat.onNotFoundWhileInsert();
970 template <typename Func>
971 bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract )
973 assert( pDel != nullptr );
974 assert( gc::is_locked() );
976 marked_node_ptr pSucc;
978 // logical deletion (marking)
979 for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) {
980 pSucc = pDel->next(nLevel).load( memory_model::memory_order_relaxed );
983 || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
990 pSucc = pDel->next(0).load( memory_model::memory_order_relaxed );
995 int const nMask = bExtract ? 3 : 1;
996 if ( pDel->next(0).compare_exchange_strong( pSucc, pSucc | nMask, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
998 f( *node_traits::to_value_ptr( pDel ));
1000 // physical deletion
1003 for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
1004 if ( !pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( pSucc,
1005 marked_node_ptr( pDel->next(nLevel).load(memory_model::memory_order_relaxed).ptr() ),
1006 memory_model::memory_order_release, atomics::memory_order_relaxed) )
1009 find_position( *node_traits::to_value_ptr(pDel), pos, key_comparator(), false );
1011 m_Stat.onSlowExtract();
1013 m_Stat.onSlowErase();
1015 assert( pDel->m_bUnlinked );
1022 pDel->m_bUnlinked = true;
1025 // We cannot free the node at this moment since RCU is locked
1026 // Link deleted nodes to a chain to free later
1027 pos.dispose( pDel );
1028 m_Stat.onFastErase();
1031 m_Stat.onFastExtract();
1034 m_Stat.onEraseRetry();
1038 enum finsd_fastpath_result {
1039 find_fastpath_found,
1040 find_fastpath_not_found,
1043 template <typename Q, typename Compare, typename Func>
1044 finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f ) const
1047 marked_node_ptr pCur;
1048 marked_node_ptr pSucc;
1049 marked_node_ptr pNull;
1053 pPred = m_Head.head();
1054 for ( int nLevel = static_cast<int>(m_nHeight.load(memory_model::memory_order_relaxed) - 1); nLevel >= 0; --nLevel ) {
1055 pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire );
1056 if ( pCur == pNull )
1059 while ( pCur != pNull ) {
1060 if ( pCur.bits() ) {
1061 // Wait until pCur is removed
1062 unsigned int nAttempt = 0;
1063 while ( pCur.bits() && nAttempt++ < 16 ) {
1065 pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire );
1069 if ( pCur.bits() ) {
1070 // Maybe, we are on deleted node sequence
1071 // Abort searching, try slow-path
1072 return find_fastpath_abort;
1077 int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
1080 pCur = pCur->next(nLevel).load( memory_model::memory_order_acquire );
1082 else if ( nCmp == 0 ) {
1084 f( *node_traits::to_value_ptr( pCur.ptr() ), val );
1085 return find_fastpath_found;
1087 else // pCur > val - go down
1093 return find_fastpath_not_found;
1096 template <typename Q, typename Compare, typename Func>
1097 bool find_slowpath( Q& val, Compare cmp, Func f, position& pos )
1099 if ( find_position( val, pos, cmp, true )) {
1100 assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 );
1102 f( *node_traits::to_value_ptr( pos.pCur ), val );
1109 template <typename Q, typename Compare, typename Func>
1110 bool do_find_with( Q& val, Compare cmp, Func f )
1113 return do_find_with( val, cmp, f, pos );
1116 template <typename Q, typename Compare, typename Func>
1117 bool do_find_with( Q& val, Compare cmp, Func f, position& pos )
1124 switch ( find_fastpath( val, cmp, f )) {
1125 case find_fastpath_found:
1126 m_Stat.onFindFastSuccess();
1128 case find_fastpath_not_found:
1129 m_Stat.onFindFastFailed();
1135 if ( find_slowpath( val, cmp, f, pos )) {
1136 m_Stat.onFindSlowSuccess();
1140 m_Stat.onFindSlowFailed();
1147 template <typename Q, typename Compare, typename Func>
1148 bool do_erase( Q const& val, Compare cmp, Func f )
1150 check_deadlock_policy::check();
1158 if ( !find_position( val, pos, cmp, false ) ) {
1159 m_Stat.onEraseFailed();
1163 node_type * pDel = pos.pCur;
1164 assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 );
1166 unsigned int nHeight = pDel->height();
1167 if ( try_remove_at( pDel, pos, f, false )) {
1169 m_Stat.onRemoveNode( nHeight );
1170 m_Stat.onEraseSuccess();
1174 m_Stat.onEraseFailed();
1183 template <typename Q, typename Compare>
1184 value_type * do_extract_key( Q const& key, Compare cmp, position& pos )
1186 // RCU should be locked!!!
1187 assert( gc::is_locked() );
1191 if ( !find_position( key, pos, cmp, false ) ) {
1192 m_Stat.onExtractFailed();
1197 assert( cmp( *node_traits::to_value_ptr( pDel ), key ) == 0 );
1199 unsigned int const nHeight = pDel->height();
1201 if ( try_remove_at( pDel, pos, [](value_type const&) {}, true )) {
1203 m_Stat.onRemoveNode( nHeight );
1204 m_Stat.onExtractSuccess();
1207 m_Stat.onExtractFailed();
1212 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1215 template <typename Q>
1216 value_type * do_extract( Q const& key )
1218 check_deadlock_policy::check();
1219 value_type * pDel = nullptr;
1223 pDel = do_extract_key( key, key_comparator(), pos );
1229 template <typename Q, typename Less>
1230 value_type * do_extract_with( Q const& key, Less pred )
1233 check_deadlock_policy::check();
1234 value_type * pDel = nullptr;
1238 pDel = do_extract_key( key, cds::opt::details::make_comparator_from_less<Less>(), pos );
1244 value_type * do_extract_min()
1246 assert( !gc::is_locked() );
1254 if ( !find_min_position( pos ) ) {
1255 m_Stat.onExtractMinFailed();
1260 unsigned int const nHeight = pDel->height();
1262 if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
1264 m_Stat.onRemoveNode( nHeight );
1265 m_Stat.onExtractMinSuccess();
1268 m_Stat.onExtractMinFailed();
1274 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1277 value_type * do_extract_max()
1279 assert( !gc::is_locked() );
1287 if ( !find_max_position( pos ) ) {
1288 m_Stat.onExtractMaxFailed();
1293 unsigned int const nHeight = pDel->height();
1295 if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
1297 m_Stat.onRemoveNode( nHeight );
1298 m_Stat.onExtractMaxSuccess();
1301 m_Stat.onExtractMaxFailed();
1307 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1310 void increase_height( unsigned int nHeight )
1312 unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed );
1313 if ( nCur < nHeight )
1314 m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed );
1319 /// Default constructor
1321 : m_Head( c_nMaxHeight )
1322 , m_nHeight( c_nMinHeight )
1323 , m_pDeferredDelChain( nullptr )
1325 static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
1327 // Barrier for head node
1328 atomics::atomic_thread_fence( memory_model::memory_order_release );
1331 /// Clears and destructs the skip-list
1338 ///@name Forward iterators (thread-safe under RCU lock)
1340 /// Forward iterator
1342 The forward iterator has some features:
1343 - it has no post-increment operator
1344 - it depends on iterator of underlying \p OrderedList
1346 You may safely use iterators in multi-threaded environment only under RCU lock.
1347 Otherwise, a crash is possible if another thread deletes the element the iterator points to.
1349 typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator;
1351 /// Const iterator type
1352 typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator;
1354 /// Returns a forward iterator addressing the first element in a set
1357 return iterator( *m_Head.head() );
1360 /// Returns a forward const iterator addressing the first element in a set
1361 const_iterator begin() const
1363 return const_iterator( *m_Head.head() );
1366 /// Returns a forward const iterator addressing the first element in a set
1367 const_iterator cbegin() const
1369 return const_iterator( *m_Head.head() );
1372 /// Returns a forward iterator that addresses the location succeeding the last element in a set.
1378 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
1379 const_iterator end() const
1381 return const_iterator();
1384 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
1385 const_iterator cend() const
1387 return const_iterator();
1392 /// Inserts new node
1394 The function inserts \p val in the set if it does not contain
1395 an item with key equal to \p val.
1397 The function applies RCU lock internally.
1399 Returns \p true if \p val is placed into the set, \p false otherwise.
1401 bool insert( value_type& val )
1403 return insert( val, []( value_type& ) {} );
1406 /// Inserts new node
1408 This function is intended for derived non-intrusive containers.
1410 The function allows to split creating of new item into two part:
1411 - create item with key only
1412 - insert new item into the set
1413 - if inserting is success, calls \p f functor to initialize value-field of \p val.
1415 The functor signature is:
1417 void func( value_type& val );
1419 where \p val is the item inserted. User-defined functor \p f should guarantee that during changing
1420 \p val no any other changes could be made on this set's item by concurrent threads.
1421 The user-defined functor is called only if the inserting is success.
1423 RCU \p synchronize method can be called. RCU should not be locked.
1425 template <typename Func>
1426 bool insert( value_type& val, Func f )
1428 check_deadlock_policy::check();
1434 node_type * pNode = node_traits::to_node_ptr( val );
1435 scoped_node_ptr scp( pNode );
1436 unsigned int nHeight = pNode->height();
1437 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
1438 bool bTowerMade = false;
1444 bool bFound = find_position( val, pos, key_comparator(), true );
1446 // scoped_node_ptr deletes the node tower if we create it
1450 m_Stat.onInsertFailed();
1456 build_node( pNode );
1457 nHeight = pNode->height();
1462 if ( !insert_at_position( val, pNode, pos, f )) {
1463 m_Stat.onInsertRetry();
1467 increase_height( nHeight );
1469 m_Stat.onAddNode( nHeight );
1470 m_Stat.onInsertSuccess();
1480 /// Updates the node
1482 The operation performs inserting or changing data with lock-free manner.
1484 If the item \p val is not found in the set, then \p val is inserted into the set
1485 iff \p bInsert is \p true.
1486 Otherwise, the functor \p func is called with item found.
1487 The functor signature is:
1489 void func( bool bNew, value_type& item, value_type& val );
1492 - \p bNew - \p true if the item has been inserted, \p false otherwise
1493 - \p item - item of the set
1494 - \p val - argument \p val passed into the \p %update() function
1495 If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
1496 refer to the same thing.
1498 The functor can change non-key fields of the \p item; however, \p func must guarantee
1499 that during changing no any other modifications could be made on this item by concurrent threads.
1501 RCU \p synchronize method can be called. RCU should not be locked.
1503 Returns std::pair<bool, bool> where \p first is \p true if operation is successfull,
1504 i.e. the node has been inserted or updated,
1505 \p second is \p true if new item has been added or \p false if the item with \p key
1508 @warning See \ref cds_intrusive_item_creating "insert item troubleshooting"
1510 template <typename Func>
1511 std::pair<bool, bool> update( value_type& val, Func func, bool bInsert = true )
1513 check_deadlock_policy::check();
1516 std::pair<bool, bool> bRet( true, false );
1519 node_type * pNode = node_traits::to_node_ptr( val );
1520 scoped_node_ptr scp( pNode );
1521 unsigned int nHeight = pNode->height();
1522 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
1523 bool bTowerMade = false;
1528 bool bFound = find_position( val, pos, key_comparator(), true );
1530 // scoped_node_ptr deletes the node tower if we create it before
1534 func( false, *node_traits::to_value_ptr(pos.pCur), val );
1535 m_Stat.onUpdateExist();
1546 build_node( pNode );
1547 nHeight = pNode->height();
1552 if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { func( true, item, item ); })) {
1553 m_Stat.onInsertRetry();
1557 increase_height( nHeight );
1560 m_Stat.onAddNode( nHeight );
1561 m_Stat.onUpdateNew();
1570 template <typename Func>
1571 CDS_DEPRECATED("ensure() is deprecated, use update()")
1572 std::pair<bool, bool> ensure( value_type& val, Func func )
1574 return update( val, func, true );
1578 /// Unlinks the item \p val from the set
1580 The function searches the item \p val in the set and unlink it from the set
1581 if it is found and is equal to \p val.
1583 Difference between \p erase() and \p %unlink() functions: \p erase() finds <i>a key</i>
1584 and deletes the item found. \p %unlink() searches an item by key and deletes it
1585 only if \p val is an item of that set, i.e. the pointer to item found
1586 is equal to <tt> &val </tt>.
1588 RCU \p synchronize method can be called. RCU should not be locked.
1590 The \ref disposer specified in \p Traits class template parameter is called
1591 by garbage collector \p GC asynchronously.
1593 The function returns \p true if success and \p false otherwise.
1595 bool unlink( value_type& val )
1597 check_deadlock_policy::check();
1605 if ( !find_position( val, pos, key_comparator(), false ) ) {
1606 m_Stat.onUnlinkFailed();
1610 node_type * pDel = pos.pCur;
1611 assert( key_comparator()( *node_traits::to_value_ptr( pDel ), val ) == 0 );
1613 unsigned int nHeight = pDel->height();
1615 if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos, [](value_type const&) {}, false )) {
1617 m_Stat.onRemoveNode( nHeight );
1618 m_Stat.onUnlinkSuccess();
1622 m_Stat.onUnlinkFailed();
1631 /// Extracts the item from the set with specified \p key
1632 /** \anchor cds_intrusive_SkipListSet_rcu_extract
1633 The function searches an item with key equal to \p key in the set,
1634 unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
1635 If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr.
1637 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1639 RCU \p synchronize method can be called. RCU should NOT be locked.
1640 The function does not call the disposer for the item found.
1641 The disposer will be implicitly invoked when the returned object is destroyed or when
1642 its \p release() member function is called.
1645 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1649 typename skip_list::exempt_ptr ep( theList.extract( 5 ));
1654 // Dispose returned item.
1659 template <typename Q>
1660 exempt_ptr extract( Q const& key )
1662 return exempt_ptr( do_extract( key ));
1665 /// Extracts the item from the set with comparing functor \p pred
1667 The function is an analog of \p extract(Q const&) but \p pred predicate is used for key comparing.
1668 \p Less has the interface like \p std::less.
1669 \p pred must imply the same element order as the comparator used for building the set.
1671 template <typename Q, typename Less>
1672 exempt_ptr extract_with( Q const& key, Less pred )
1674 return exempt_ptr( do_extract_with( key, pred ));
1677 /// Extracts an item with minimal key from the list
1679 The function searches an item with minimal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item.
1680 If the skip-list is empty the function returns an empty \p exempt_ptr.
1682 RCU \p synchronize method can be called. RCU should NOT be locked.
1683 The function does not call the disposer for the item found.
1684 The disposer will be implicitly invoked when the returned object is destroyed or when
1685 its \p release() member function is manually called.
1688 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1692 typename skip_list::exempt_ptr ep(theList.extract_min());
1697 // Dispose returned item.
1702 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> minimum key.
1703 It means that the function gets leftmost item and tries to unlink it.
1704 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
1705 So, the function returns the item with minimum key at the moment of list traversing.
1707 exempt_ptr extract_min()
1709 return exempt_ptr( do_extract_min());
1712 /// Extracts an item with maximal key from the list
1714 The function searches an item with maximal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item.
1715 If the skip-list is empty the function returns an empty \p exempt_ptr.
1717 RCU \p synchronize method can be called. RCU should NOT be locked.
1718 The function does not call the disposer for the item found.
1719 The disposer will be implicitly invoked when the returned object is destroyed or when
1720 its \p release() member function is manually called.
1723 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1727 typename skip_list::exempt_ptr ep( theList.extract_max() );
1731 // Dispose returned item.
1736 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> maximal key.
1737 It means that the function gets rightmost item and tries to unlink it.
1738 During unlinking, a concurrent thread can insert an item with key greater than rightmost item's key.
1739 So, the function returns the item with maximum key at the moment of list traversing.
1741 exempt_ptr extract_max()
1743 return exempt_ptr( do_extract_max());
1746 /// Deletes the item from the set
1747 /** \anchor cds_intrusive_SkipListSet_rcu_erase
1748 The function searches an item with key equal to \p key in the set,
1749 unlinks it from the set, and returns \p true.
1750 If the item with key equal to \p key is not found the function return \p false.
1752 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1754 RCU \p synchronize method can be called. RCU should not be locked.
1756 template <typename Q>
1757 bool erase( const Q& key )
1759 return do_erase( key, key_comparator(), [](value_type const&) {} );
1762 /// Delete the item from the set with comparing functor \p pred
1764 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase "erase(Q const&)"
1765 but \p pred predicate is used for key comparing.
1766 \p Less has the interface like \p std::less.
1767 \p pred must imply the same element order as the comparator used for building the set.
1769 template <typename Q, typename Less>
1770 bool erase_with( const Q& key, Less pred )
1773 return do_erase( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type const&) {} );
1776 /// Deletes the item from the set
1777 /** \anchor cds_intrusive_SkipListSet_rcu_erase_func
1778 The function searches an item with key equal to \p key in the set,
1779 call \p f functor with item found, unlinks it from the set, and returns \p true.
1780 The \ref disposer specified in \p Traits class template parameter is called
1781 by garbage collector \p GC asynchronously.
1783 The \p Func interface is
1786 void operator()( value_type const& item );
1789 If the item with key equal to \p key is not found the function return \p false.
1791 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1793 RCU \p synchronize method can be called. RCU should not be locked.
1795 template <typename Q, typename Func>
1796 bool erase( Q const& key, Func f )
1798 return do_erase( key, key_comparator(), f );
1801 /// Delete the item from the set with comparing functor \p pred
1803 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase_func "erase(Q const&, Func)"
1804 but \p pred predicate is used for key comparing.
1805 \p Less has the interface like \p std::less.
1806 \p pred must imply the same element order as the comparator used for building the set.
1808 template <typename Q, typename Less, typename Func>
1809 bool erase_with( Q const& key, Less pred, Func f )
1812 return do_erase( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1816 /** @anchor cds_intrusive_SkipListSet_rcu_find_func
1817 The function searches the item with key equal to \p key and calls the functor \p f for item found.
1818 The interface of \p Func functor is:
1821 void operator()( value_type& item, Q& key );
1824 where \p item is the item found, \p key is the <tt>find</tt> function argument.
1826 The functor can change non-key fields of \p item. Note that the functor is only guarantee
1827 that \p item cannot be disposed during functor is executing.
1828 The functor does not serialize simultaneous access to the set \p item. If such access is
1829 possible you must provide your own synchronization schema on item level to exclude unsafe item modifications.
1831 The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor
1832 can modify both arguments.
1834 The function applies RCU lock internally.
1836 The function returns \p true if \p key is found, \p false otherwise.
1838 template <typename Q, typename Func>
1839 bool find( Q& key, Func f )
1841 return do_find_with( key, key_comparator(), f );
1844 template <typename Q, typename Func>
1845 bool find( Q const& key, Func f )
1847 return do_find_with( key, key_comparator(), f );
1851 /// Finds the key \p key with comparing functor \p pred
1853 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_func "find(Q&, Func)"
1854 but \p cmp is used for key comparison.
1855 \p Less functor has the interface like \p std::less.
1856 \p cmp must imply the same element order as the comparator used for building the set.
1858 template <typename Q, typename Less, typename Func>
1859 bool find_with( Q& key, Less pred, Func f )
1862 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1865 template <typename Q, typename Less, typename Func>
1866 bool find_with( Q const& key, Less pred, Func f )
1869 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1873 /// Checks whether the set contains \p key
1875 The function searches the item with key equal to \p key
1876 and returns \p true if it is found, and \p false otherwise.
1878 The function applies RCU lock internally.
1880 template <typename Q>
1881 bool contains( Q const& key )
1883 return do_find_with( key, key_comparator(), [](value_type& , Q const& ) {} );
1886 template <typename Q>
1887 CDS_DEPRECATED("deprecated, use contains()")
1888 bool find( Q const& key )
1890 return contains( key );
1894 /// Checks whether the set contains \p key using \p pred predicate for searching
1896 The function is similar to <tt>contains( key )</tt> but \p pred is used for key comparing.
1897 \p Less functor has the interface like \p std::less.
1898 \p Less must imply the same element order as the comparator used for building the set.
1900 template <typename Q, typename Less>
1901 bool contains( Q const& key, Less pred )
1904 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type& , Q const& ) {} );
1907 template <typename Q, typename Less>
1908 CDS_DEPRECATED("deprecated, use contains()")
1909 bool find_with( Q const& key, Less pred )
1911 return contains( key, pred );
1915 /// Finds \p key and return the item found
1916 /** \anchor cds_intrusive_SkipListSet_rcu_get
1917 The function searches the item with key equal to \p key and returns a \p raw_ptr object pointed to item found.
1918 If \p key is not found it returns empty \p raw_ptr.
1920 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1922 RCU should be locked before call of this function.
1923 Returned item is valid only while RCU is locked:
1925 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1928 typename skip_list::raw_ptr pVal;
1931 skip_list::rcu_lock lock;
1933 pVal = theList.get( 5 );
1939 // You can manually release pVal after RCU-locked section
1943 template <typename Q>
1944 raw_ptr get( Q const& key )
1946 assert( gc::is_locked());
1949 value_type * pFound;
1950 if ( do_find_with( key, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos ))
1951 return raw_ptr( pFound, raw_ptr_disposer( pos ));
1952 return raw_ptr( raw_ptr_disposer( pos ));
1955 /// Finds \p key and return the item found
1957 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_get "get(Q const&)"
1958 but \p pred is used for comparing the keys.
1960 \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q
1962 \p pred must imply the same element order as the comparator used for building the set.
1964 template <typename Q, typename Less>
1965 raw_ptr get_with( Q const& key, Less pred )
1968 assert( gc::is_locked());
1970 value_type * pFound = nullptr;
1972 if ( do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(),
1973 [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos ))
1975 return raw_ptr( pFound, raw_ptr_disposer( pos ));
1977 return raw_ptr( raw_ptr_disposer( pos ));
1980 /// Returns item count in the set
1982 The value returned depends on item counter type provided by \p Traits template parameter.
1983 For \p atomicity::empty_item_counter the function always returns 0.
1984 Therefore, the function is not suitable for checking the set emptiness, use \p empty()
1985 member function for this purpose.
1989 return m_ItemCounter;
1992 /// Checks if the set is empty
1995 return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr;
1998 /// Clears the set (not atomic)
2000 The function unlink all items from the set.
2001 The function is not atomic, thus, in multi-threaded environment with parallel insertions
2005 assert( set.empty() );
2007 the assertion could be raised.
2009 For each item the \p disposer will be called automatically after unlinking.
2014 while ( (ep = extract_min()) );
2017 /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32.
2018 static CDS_CONSTEXPR unsigned int max_height() CDS_NOEXCEPT
2020 return c_nMaxHeight;
2023 /// Returns const reference to internal statistics
2024 stat const& statistics() const
2030 }} // namespace cds::intrusive
2033 #endif // #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H