2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H
32 #define CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H
34 #include <type_traits>
36 #include <cds/intrusive/details/skip_list_base.h>
37 #include <cds/opt/compare.h>
38 #include <cds/urcu/details/check_deadlock.h>
39 #include <cds/details/binary_functor_wrapper.h>
40 #include <cds/urcu/exempt_ptr.h>
41 #include <cds/urcu/raw_ptr.h>
42 #include <cds/intrusive/details/raw_ptr_disposer.h>
44 namespace cds { namespace intrusive {
49 template <class RCU, typename Tag>
50 class node< cds::urcu::gc< RCU >, Tag >
53 typedef cds::urcu::gc< RCU > gc; ///< Garbage collector
54 typedef Tag tag ; ///< tag
57 // bit 0 - the item is logically deleted
58 // bit 1 - the item is extracted (only for level 0)
59 typedef cds::details::marked_ptr<node, 3> marked_ptr; ///< marked pointer
60 typedef atomics::atomic< marked_ptr > atomic_marked_ptr; ///< atomic marked pointer
61 typedef atomic_marked_ptr tower_item_type;
64 atomic_marked_ptr m_pNext; ///< Next item in bottom-list (list at level 0)
66 node * m_pDelChain; ///< Deleted node chain (local for a thread)
68 bool volatile m_bLinked;
69 bool volatile m_bUnlinked;
72 unsigned int m_nHeight; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1.
73 atomic_marked_ptr * m_arrNext; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr
76 /// Constructs a node of height 1 (a bottom-list node)
79 , m_pDelChain( nullptr )
82 , m_bUnlinked( false )
85 , m_arrNext( nullptr )
91 assert( !m_bLinked || m_bUnlinked );
95 /// Constructs a node of height \p nHeight
96 void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower )
98 assert( nHeight > 0 );
99 assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node
100 || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0
103 m_arrNext = nextTower;
107 atomic_marked_ptr * release_tower()
109 atomic_marked_ptr * pTower = m_arrNext;
115 atomic_marked_ptr * get_tower() const
122 for ( unsigned int nLevel = 1; nLevel < m_nHeight; ++nLevel )
123 next(nLevel).store( marked_ptr(), atomics::memory_order_relaxed );
126 /// Access to element of next pointer array
127 atomic_marked_ptr& next( unsigned int nLevel )
129 assert( nLevel < height() );
130 assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr) );
132 # ifdef CDS_THREAD_SANITIZER_ENABLED
133 // TSan false positive: m_arrNext is read-only array
134 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
135 atomic_marked_ptr& r = nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
136 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
139 return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
143 /// Access to element of next pointer array (const version)
144 atomic_marked_ptr const& next( unsigned int nLevel ) const
146 assert( nLevel < height() );
147 assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr );
149 # ifdef CDS_THREAD_SANITIZER_ENABLED
150 // TSan false positive: m_arrNext is read-only array
151 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
152 atomic_marked_ptr& r = nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
153 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
156 return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
160 /// Access to element of next pointer array (same as \ref next function)
161 atomic_marked_ptr& operator[]( unsigned int nLevel )
163 return next( nLevel );
166 /// Access to element of next pointer array (same as \ref next function)
167 atomic_marked_ptr const& operator[]( unsigned int nLevel ) const
169 return next( nLevel );
172 /// Height of the node
173 unsigned int height() const
178 /// Clears internal links
181 assert( m_arrNext == nullptr );
182 m_pNext.store( marked_ptr(), atomics::memory_order_release );
183 m_pDelChain = nullptr;
186 bool is_cleared() const
188 return m_pNext == atomic_marked_ptr()
189 && m_arrNext == nullptr
193 } // namespace skip_list
197 namespace skip_list { namespace details {
199 template <class RCU, typename NodeTraits, typename BackOff, bool IsConst>
200 class iterator< cds::urcu::gc< RCU >, NodeTraits, BackOff, IsConst >
203 typedef cds::urcu::gc< RCU > gc;
204 typedef NodeTraits node_traits;
205 typedef BackOff back_off;
206 typedef typename node_traits::node_type node_type;
207 typedef typename node_traits::value_type value_type;
208 static bool const c_isConst = IsConst;
210 typedef typename std::conditional< c_isConst, value_type const &, value_type &>::type value_ref;
213 typedef typename node_type::marked_ptr marked_ptr;
214 typedef typename node_type::atomic_marked_ptr atomic_marked_ptr;
221 // RCU should be locked before iterating!!!
222 assert( gc::is_locked() );
227 if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
228 // Current node is marked as deleted. So, its next pointer can point to anything
229 // In this case we interrupt our iteration and returns end() iterator.
234 marked_ptr p = m_pNode->next(0).load( atomics::memory_order_relaxed );
235 node_type * pp = p.ptr();
237 // p is marked as deleted. Spin waiting for physical removal
241 else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits() ) {
242 // p is marked as deleted. Spin waiting for physical removal
252 public: // for internal use only!!!
253 iterator( node_type& refHead )
256 // RCU should be locked before iterating!!!
257 assert( gc::is_locked() );
262 marked_ptr p = refHead.next(0).load( atomics::memory_order_relaxed );
268 node_type * pp = p.ptr();
269 // Logically deleted node is marked from highest level
270 if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
283 // RCU should be locked before iterating!!!
284 assert( gc::is_locked() );
287 iterator( iterator const& s)
288 : m_pNode( s.m_pNode )
290 // RCU should be locked before iterating!!!
291 assert( gc::is_locked() );
294 value_type * operator ->() const
296 assert( m_pNode != nullptr );
297 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
299 return node_traits::to_value_ptr( m_pNode );
302 value_ref operator *() const
304 assert( m_pNode != nullptr );
305 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
307 return *node_traits::to_value_ptr( m_pNode );
311 iterator& operator ++()
317 iterator& operator = (const iterator& src)
319 m_pNode = src.m_pNode;
323 template <typename Bkoff, bool C>
324 bool operator ==(iterator<gc, node_traits, Bkoff, C> const& i ) const
326 return m_pNode == i.m_pNode;
328 template <typename Bkoff, bool C>
329 bool operator !=(iterator<gc, node_traits, Bkoff, C> const& i ) const
331 return !( *this == i );
334 }} // namespace skip_list::details
337 /// Lock-free skip-list set (template specialization for \ref cds_urcu_desc "RCU")
338 /** @ingroup cds_intrusive_map
339 @anchor cds_intrusive_SkipListSet_rcu
341 The implementation of well-known probabilistic data structure called skip-list
342 invented by W.Pugh in his papers:
343 - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees
344 - [1990] W.Pugh A Skip List Cookbook
346 A skip-list is a probabilistic data structure that provides expected logarithmic
347 time search without the need of rebalance. The skip-list is a collection of sorted
348 linked list. Nodes are ordered by key. Each node is linked into a subset of the lists.
349 Each list has a level, ranging from 0 to 32. The bottom-level list contains
350 all the nodes, and each higher-level list is a sublist of the lower-level lists.
351 Each node is created with a random top level (with a random height), and belongs
352 to all lists up to that level. The probability that a node has the height 1 is 1/2.
353 The probability that a node has the height N is 1/2 ** N (more precisely,
354 the distribution depends on an random generator provided, but our generators
357 The lock-free variant of skip-list is implemented according to book
358 - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming",
359 chapter 14.4 "A Lock-Free Concurrent Skiplist".
361 <b>Template arguments</b>:
362 - \p RCU - one of \ref cds_urcu_gc "RCU type"
363 - \p T - type to be stored in the list. The type must be based on \p skip_list::node (for \p skip_list::base_hook)
364 or it must have a member of type \p skip_list::node (for \p skip_list::member_hook).
365 - \p Traits - set traits, default is \p skip_list::traits
366 It is possible to declare option-based list with \p cds::intrusive::skip_list::make_traits metafunction
367 instead of \p Traits template argument.
369 @note Before including <tt><cds/intrusive/skip_list_rcu.h></tt> you should include appropriate RCU header file,
370 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
374 The class supports a forward iterator (\ref iterator and \ref const_iterator).
375 The iteration is ordered.
377 You may iterate over skip-list set items only under RCU lock.
378 Only in this case the iterator is thread-safe since
379 while RCU is locked any set's item cannot be reclaimed.
381 @note The requirement of RCU lock during iterating means that any type of modification of the skip list
382 (i.e. inserting, erasing and so on) is not possible.
384 @warning The iterator object cannot be passed between threads.
386 Example how to use skip-list set iterators:
388 // First, you should include the header for RCU type you have chosen
389 #include <cds/urcu/general_buffered.h>
390 #include <cds/intrusive/skip_list_rcu.h>
392 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
398 // Traits for your skip-list.
399 // At least, you should define cds::opt::less or cds::opt::compare for Foo struct
400 struct my_traits: public cds::intrusive::skip_list::traits
404 typedef cds::intrusive::SkipListSet< rcu_type, Foo, my_traits > my_skiplist_set;
406 my_skiplist_set theSet;
412 // Apply RCU locking manually
413 typename rcu_type::scoped_lock sl;
415 for ( auto it = theList.begin(); it != theList.end(); ++it ) {
419 // rcu_type::scoped_lock destructor releases RCU lock implicitly
423 The iterator class supports the following minimalistic interface:
430 iterator( iterator const& s);
432 value_type * operator ->() const;
433 value_type& operator *() const;
436 iterator& operator ++();
439 iterator& operator = (const iterator& src);
441 bool operator ==(iterator const& i ) const;
442 bool operator !=(iterator const& i ) const;
445 Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced.
449 You should incorporate skip_list::node into your struct \p T and provide
450 appropriate skip_list::traits::hook in your \p Traits template parameters. Usually, for \p Traits you
451 define a struct based on \p skip_list::traits.
453 Example for <tt>cds::urcu::general_buffered<></tt> RCU and base hook:
455 // First, you should include the header for RCU type you have chosen
456 #include <cds/urcu/general_buffered.h>
458 // Include RCU skip-list specialization
459 #include <cds/intrusive/skip_list_rcu.h>
462 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
464 // Data stored in skip list
465 struct my_data: public cds::intrusive::skip_list::node< rcu_type >
474 // my_data compare functor
476 int operator()( const my_data& d1, const my_data& d2 )
478 return d1.strKey.compare( d2.strKey );
481 int operator()( const my_data& d, const std::string& s )
483 return d.strKey.compare(s);
486 int operator()( const std::string& s, const my_data& d )
488 return s.compare( d.strKey );
493 struct my_traits: public cds::intrusive::skip_list::traits
495 typedef cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > hook;
496 typedef my_data_cmp compare;
499 // Declare skip-list set type
500 typedef cds::intrusive::SkipListSet< rcu_type, my_data, my_traits > traits_based_set;
503 Equivalent option-based code:
505 #include <cds/urcu/general_buffered.h>
506 #include <cds/intrusive/skip_list_rcu.h>
508 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
517 // Declare option-based skip-list set
518 typedef cds::intrusive::SkipListSet< rcu_type
520 , typename cds::intrusive::skip_list::make_traits<
521 cds::intrusive::opt::hook< cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > >
522 ,cds::intrusive::opt::compare< my_data_cmp >
531 #ifdef CDS_DOXYGEN_INVOKED
532 ,typename Traits = skip_list::traits
537 class SkipListSet< cds::urcu::gc< RCU >, T, Traits >
540 typedef cds::urcu::gc< RCU > gc; ///< Garbage collector
541 typedef T value_type; ///< type of value stored in the skip-list
542 typedef Traits traits; ///< Traits template parameter
544 typedef typename traits::hook hook; ///< hook type
545 typedef typename hook::node_type node_type; ///< node type
547 # ifdef CDS_DOXYGEN_INVOKED
548 typedef implementation_defined key_comparator ; ///< key comparison functor based on \p Traits::compare and \p Traits::less
550 typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator;
553 typedef typename traits::disposer disposer; ///< disposer
554 typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits
556 typedef typename traits::item_counter item_counter; ///< Item counting policy used
557 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
558 typedef typename traits::random_level_generator random_level_generator; ///< random level generator
559 typedef typename traits::allocator allocator_type; ///< allocator for maintaining array of next pointers of the node
560 typedef typename traits::back_off back_off; ///< Back-off strategy
561 typedef typename traits::stat stat; ///< internal statistics type
562 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
563 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
564 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking
567 /// Max node height. The actual node height should be in range <tt>[0 .. c_nMaxHeight)</tt>
569 The max height is specified by \ref skip_list::random_level_generator "random level generator" constant \p m_nUpperBound
570 but it should be no more than 32 (\ref skip_list::c_nHeightLimit).
572 static unsigned int const c_nMaxHeight = std::conditional<
573 (random_level_generator::c_nUpperBound <= skip_list::c_nHeightLimit),
574 std::integral_constant< unsigned int, random_level_generator::c_nUpperBound >,
575 std::integral_constant< unsigned int, skip_list::c_nHeightLimit >
579 static unsigned int const c_nMinHeight = 5;
583 typedef typename node_type::atomic_marked_ptr atomic_node_ptr ; ///< Atomic marked node pointer
584 typedef typename node_type::marked_ptr marked_node_ptr ; ///< Node marked pointer
588 typedef skip_list::details::intrusive_node_builder< node_type, atomic_node_ptr, allocator_type > intrusive_node_builder;
590 typedef typename std::conditional<
591 std::is_same< typename traits::internal_node_builder, cds::opt::none >::value
592 ,intrusive_node_builder
593 ,typename traits::internal_node_builder
594 >::type node_builder;
596 typedef std::unique_ptr< node_type, typename node_builder::node_disposer > scoped_node_ptr;
598 static void dispose_node( value_type * pVal )
602 typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal) );
608 void operator()( value_type * pVal )
610 dispose_node( pVal );
614 static void dispose_chain( node_type * pChain )
617 assert( !gc::is_locked() );
619 auto f = [&pChain]() -> cds::urcu::retired_ptr {
620 node_type * p = pChain;
622 pChain = p->m_pDelChain;
623 return cds::urcu::make_retired_ptr<node_disposer>( node_traits::to_value_ptr( p ));
625 return cds::urcu::make_retired_ptr<node_disposer>( static_cast<value_type *>(nullptr));
627 gc::batch_retire(std::ref(f));
632 node_type * pPrev[ c_nMaxHeight ];
633 node_type * pSucc[ c_nMaxHeight ];
634 node_type * pNext[ c_nMaxHeight ];
637 node_type * pDelChain;
640 : pDelChain( nullptr )
645 dispose_chain( pDelChain );
648 void dispose( node_type * p )
650 assert( p != nullptr );
651 assert( p->m_pDelChain == nullptr );
653 p->m_pDelChain = pDelChain;
658 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy;
662 skip_list::details::head_node< node_type > m_Head; ///< head tower (max height)
664 item_counter m_ItemCounter; ///< item counter
665 random_level_generator m_RandomLevelGen; ///< random level generator instance
666 atomics::atomic<unsigned int> m_nHeight; ///< estimated high level
667 atomics::atomic<node_type *> m_pDeferredDelChain ; ///< Deferred deleted node chain
668 mutable stat m_Stat; ///< internal statistics
672 unsigned int random_level()
674 // Random generator produces a number from range [0..31]
675 // We need a number from range [1..32]
676 return m_RandomLevelGen() + 1;
679 template <typename Q>
680 node_type * build_node( Q v )
682 return node_builder::make_tower( v, m_RandomLevelGen );
687 using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, node_disposer, void >; ///< pointer to extracted node
691 struct chain_disposer {
692 void operator()( node_type * pChain ) const
694 dispose_chain( pChain );
697 typedef cds::intrusive::details::raw_ptr_disposer< gc, node_type, chain_disposer> raw_ptr_disposer;
701 /// Result of \p get(), \p get_with() functions - pointer to the node found
702 typedef cds::urcu::raw_ptr< gc, value_type, raw_ptr_disposer > raw_ptr;
707 bool is_extracted( marked_node_ptr const p ) const
709 return (p.bits() & 2) != 0;
712 template <typename Q, typename Compare >
713 bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound )
715 assert( gc::is_locked() );
718 marked_node_ptr pSucc;
719 marked_node_ptr pCur;
723 pPred = m_Head.head();
725 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
728 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
730 // pCur.bits() means that pPred is logically deleted
734 if ( pCur.ptr() == nullptr ) {
735 // end of the list at level nLevel - goto next level
739 // pSucc contains deletion mark for pCur
740 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
742 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
745 if ( pSucc.bits() ) {
746 // pCur is marked, i.e. logically deleted.
747 marked_node_ptr p( pCur.ptr() );
750 pCur->m_bUnlinked = true;
752 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
753 memory_model::memory_order_release, atomics::memory_order_relaxed ))
756 if ( !is_extracted( pSucc )) {
757 // We cannot free the node at this moment since RCU is locked
758 // Link deleted nodes to a chain to free later
759 pos.dispose( pCur.ptr() );
760 m_Stat.onEraseWhileFind();
763 m_Stat.onExtractWhileFind();
770 nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
773 else if ( nCmp == 0 && bStopIfFound )
781 pos.pPrev[ nLevel ] = pPred;
782 pos.pSucc[ nLevel ] = pCur.ptr();
789 pos.pCur = pCur.ptr();
790 return pCur.ptr() && nCmp == 0;
793 bool find_min_position( position& pos )
795 assert( gc::is_locked() );
798 marked_node_ptr pSucc;
799 marked_node_ptr pCur;
802 pPred = m_Head.head();
804 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
806 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
807 // pCur.bits() means that pPred is logically deleted
808 // head cannot be deleted
809 assert( pCur.bits() == 0 );
813 // pSucc contains deletion mark for pCur
814 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
816 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
819 if ( pSucc.bits() ) {
820 // pCur is marked, i.e. logically deleted.
823 pCur->m_bUnlinked = true;
825 marked_node_ptr p( pCur.ptr() );
826 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
827 memory_model::memory_order_release, atomics::memory_order_relaxed ))
830 if ( !is_extracted( pSucc )) {
831 // We cannot free the node at this moment since RCU is locked
832 // Link deleted nodes to a chain to free later
833 pos.dispose( pCur.ptr() );
834 m_Stat.onEraseWhileFind();
837 m_Stat.onExtractWhileFind();
846 pos.pPrev[ nLevel ] = pPred;
847 pos.pSucc[ nLevel ] = pCur.ptr();
849 return (pos.pCur = pCur.ptr()) != nullptr;
852 bool find_max_position( position& pos )
854 assert( gc::is_locked() );
857 marked_node_ptr pSucc;
858 marked_node_ptr pCur;
861 pPred = m_Head.head();
863 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
866 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
868 // pCur.bits() means that pPred is logically deleted
872 if ( pCur.ptr() == nullptr ) {
873 // end of the list at level nLevel - goto next level
877 // pSucc contains deletion mark for pCur
878 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
880 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
883 if ( pSucc.bits() ) {
884 // pCur is marked, i.e. logically deleted.
887 pCur->m_bUnlinked = true;
889 marked_node_ptr p( pCur.ptr() );
890 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
891 memory_model::memory_order_release, atomics::memory_order_relaxed ))
894 if ( !is_extracted( pSucc )) {
895 // We cannot free the node at this moment since RCU is locked
896 // Link deleted nodes to a chain to free later
897 pos.dispose( pCur.ptr() );
898 m_Stat.onEraseWhileFind();
901 m_Stat.onExtractWhileFind();
916 pos.pPrev[ nLevel ] = pPred;
917 pos.pSucc[ nLevel ] = pCur.ptr();
920 return (pos.pCur = pCur.ptr()) != nullptr;
923 template <typename Func>
924 bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f )
926 assert( gc::is_locked() );
928 unsigned int nHeight = pNode->height();
929 pNode->clear_tower();
932 marked_node_ptr p( pos.pSucc[0] );
933 pNode->next( 0 ).store( p, memory_model::memory_order_release );
934 if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed )) {
938 pNode->m_bLinked = true;
943 for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) {
946 marked_node_ptr q( pos.pSucc[ nLevel ]);
947 if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
948 // pNode has been marked as removed while we are inserting it
951 m_Stat.onLogicDeleteWhileInsert();
955 if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ) )
958 // Renew insert position
959 m_Stat.onRenewInsertPosition();
960 if ( !find_position( val, pos, key_comparator(), false )) {
961 // The node has been deleted while we are inserting it
962 m_Stat.onNotFoundWhileInsert();
970 template <typename Func>
971 bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract )
973 assert( pDel != nullptr );
974 assert( gc::is_locked() );
976 marked_node_ptr pSucc;
978 // logical deletion (marking)
979 for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) {
980 pSucc = pDel->next(nLevel).load( memory_model::memory_order_relaxed );
983 || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
990 pSucc = pDel->next(0).load( memory_model::memory_order_relaxed );
995 int const nMask = bExtract ? 3 : 1;
996 if ( pDel->next(0).compare_exchange_strong( pSucc, pSucc | nMask, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
998 f( *node_traits::to_value_ptr( pDel ));
1000 // physical deletion
1003 for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
1004 if ( !pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( pSucc,
1005 marked_node_ptr( pDel->next(nLevel).load(memory_model::memory_order_relaxed).ptr() ),
1006 memory_model::memory_order_release, atomics::memory_order_relaxed) )
1009 find_position( *node_traits::to_value_ptr(pDel), pos, key_comparator(), false );
1011 m_Stat.onSlowExtract();
1013 m_Stat.onSlowErase();
1015 assert( pDel->m_bUnlinked );
1022 pDel->m_bUnlinked = true;
1025 // We cannot free the node at this moment since RCU is locked
1026 // Link deleted nodes to a chain to free later
1027 pos.dispose( pDel );
1028 m_Stat.onFastErase();
1031 m_Stat.onFastExtract();
1034 m_Stat.onEraseRetry();
1038 enum finsd_fastpath_result {
1039 find_fastpath_found,
1040 find_fastpath_not_found,
1043 template <typename Q, typename Compare, typename Func>
1044 finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f ) const
1047 marked_node_ptr pCur;
1048 marked_node_ptr pSucc;
1049 marked_node_ptr pNull;
1053 pPred = m_Head.head();
1054 for ( int nLevel = static_cast<int>(m_nHeight.load(memory_model::memory_order_relaxed) - 1); nLevel >= 0; --nLevel ) {
1055 pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire );
1056 if ( pCur == pNull )
1059 while ( pCur != pNull ) {
1060 if ( pCur.bits() ) {
1061 // Wait until pCur is removed
1062 unsigned int nAttempt = 0;
1063 while ( pCur.bits() && nAttempt++ < 16 ) {
1065 pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire );
1069 if ( pCur.bits() ) {
1070 // Maybe, we are on deleted node sequence
1071 // Abort searching, try slow-path
1072 return find_fastpath_abort;
1077 int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
1080 pCur = pCur->next(nLevel).load( memory_model::memory_order_acquire );
1082 else if ( nCmp == 0 ) {
1084 f( *node_traits::to_value_ptr( pCur.ptr() ), val );
1085 return find_fastpath_found;
1087 else // pCur > val - go down
1093 return find_fastpath_not_found;
1096 template <typename Q, typename Compare, typename Func>
1097 bool find_slowpath( Q& val, Compare cmp, Func f, position& pos )
1099 if ( find_position( val, pos, cmp, true )) {
1100 assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 );
1102 f( *node_traits::to_value_ptr( pos.pCur ), val );
1109 template <typename Q, typename Compare, typename Func>
1110 bool do_find_with( Q& val, Compare cmp, Func f )
1113 return do_find_with( val, cmp, f, pos );
1116 template <typename Q, typename Compare, typename Func>
1117 bool do_find_with( Q& val, Compare cmp, Func f, position& pos )
1124 switch ( find_fastpath( val, cmp, f )) {
1125 case find_fastpath_found:
1126 m_Stat.onFindFastSuccess();
1128 case find_fastpath_not_found:
1129 m_Stat.onFindFastFailed();
1135 if ( find_slowpath( val, cmp, f, pos )) {
1136 m_Stat.onFindSlowSuccess();
1140 m_Stat.onFindSlowFailed();
1147 template <typename Q, typename Compare, typename Func>
1148 bool do_erase( Q const& val, Compare cmp, Func f )
1150 check_deadlock_policy::check();
1158 if ( !find_position( val, pos, cmp, false ) ) {
1159 m_Stat.onEraseFailed();
1163 node_type * pDel = pos.pCur;
1164 assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 );
1166 unsigned int nHeight = pDel->height();
1167 if ( try_remove_at( pDel, pos, f, false )) {
1169 m_Stat.onRemoveNode( nHeight );
1170 m_Stat.onEraseSuccess();
1174 m_Stat.onEraseFailed();
1183 template <typename Q, typename Compare>
1184 value_type * do_extract_key( Q const& key, Compare cmp, position& pos )
1186 // RCU should be locked!!!
1187 assert( gc::is_locked() );
1191 if ( !find_position( key, pos, cmp, false ) ) {
1192 m_Stat.onExtractFailed();
1197 assert( cmp( *node_traits::to_value_ptr( pDel ), key ) == 0 );
1199 unsigned int const nHeight = pDel->height();
1201 if ( try_remove_at( pDel, pos, [](value_type const&) {}, true )) {
1203 m_Stat.onRemoveNode( nHeight );
1204 m_Stat.onExtractSuccess();
1207 m_Stat.onExtractFailed();
1212 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1215 template <typename Q>
1216 value_type * do_extract( Q const& key )
1218 check_deadlock_policy::check();
1219 value_type * pDel = nullptr;
1223 pDel = do_extract_key( key, key_comparator(), pos );
1229 template <typename Q, typename Less>
1230 value_type * do_extract_with( Q const& key, Less pred )
1233 check_deadlock_policy::check();
1234 value_type * pDel = nullptr;
1238 pDel = do_extract_key( key, cds::opt::details::make_comparator_from_less<Less>(), pos );
1244 value_type * do_extract_min()
1246 assert( !gc::is_locked() );
1254 if ( !find_min_position( pos ) ) {
1255 m_Stat.onExtractMinFailed();
1260 unsigned int const nHeight = pDel->height();
1262 if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
1264 m_Stat.onRemoveNode( nHeight );
1265 m_Stat.onExtractMinSuccess();
1268 m_Stat.onExtractMinFailed();
1274 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1277 value_type * do_extract_max()
1279 assert( !gc::is_locked() );
1287 if ( !find_max_position( pos ) ) {
1288 m_Stat.onExtractMaxFailed();
1293 unsigned int const nHeight = pDel->height();
1295 if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
1297 m_Stat.onRemoveNode( nHeight );
1298 m_Stat.onExtractMaxSuccess();
1301 m_Stat.onExtractMaxFailed();
1307 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1310 void increase_height( unsigned int nHeight )
1312 unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed );
1313 if ( nCur < nHeight )
1314 m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed );
1319 /// Default constructor
1321 : m_Head( c_nMaxHeight )
1322 , m_nHeight( c_nMinHeight )
1323 , m_pDeferredDelChain( nullptr )
1325 static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
1327 // Barrier for head node
1328 atomics::atomic_thread_fence( memory_model::memory_order_release );
1331 /// Clears and destructs the skip-list
1339 typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator;
1341 /// Const iterator type
1342 typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator;
1344 /// Returns a forward iterator addressing the first element in a set
1347 return iterator( *m_Head.head() );
1350 /// Returns a forward const iterator addressing the first element in a set
1351 const_iterator begin() const
1353 return const_iterator( *m_Head.head() );
1356 /// Returns a forward const iterator addressing the first element in a set
1357 const_iterator cbegin() const
1359 return const_iterator( *m_Head.head() );
1362 /// Returns a forward iterator that addresses the location succeeding the last element in a set.
1368 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
1369 const_iterator end() const
1371 return const_iterator();
1374 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
1375 const_iterator cend() const
1377 return const_iterator();
1381 /// Inserts new node
1383 The function inserts \p val in the set if it does not contain
1384 an item with key equal to \p val.
1386 The function applies RCU lock internally.
1388 Returns \p true if \p val is placed into the set, \p false otherwise.
1390 bool insert( value_type& val )
1392 return insert( val, []( value_type& ) {} );
1395 /// Inserts new node
1397 This function is intended for derived non-intrusive containers.
1399 The function allows to split creating of new item into two part:
1400 - create item with key only
1401 - insert new item into the set
1402 - if inserting is success, calls \p f functor to initialize value-field of \p val.
1404 The functor signature is:
1406 void func( value_type& val );
1408 where \p val is the item inserted. User-defined functor \p f should guarantee that during changing
1409 \p val no any other changes could be made on this set's item by concurrent threads.
1410 The user-defined functor is called only if the inserting is success.
1412 RCU \p synchronize method can be called. RCU should not be locked.
1414 template <typename Func>
1415 bool insert( value_type& val, Func f )
1417 check_deadlock_policy::check();
1423 node_type * pNode = node_traits::to_node_ptr( val );
1424 scoped_node_ptr scp( pNode );
1425 unsigned int nHeight = pNode->height();
1426 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
1427 bool bTowerMade = false;
1433 bool bFound = find_position( val, pos, key_comparator(), true );
1435 // scoped_node_ptr deletes the node tower if we create it
1439 m_Stat.onInsertFailed();
1445 build_node( pNode );
1446 nHeight = pNode->height();
1451 if ( !insert_at_position( val, pNode, pos, f )) {
1452 m_Stat.onInsertRetry();
1456 increase_height( nHeight );
1458 m_Stat.onAddNode( nHeight );
1459 m_Stat.onInsertSuccess();
1469 /// Updates the node
1471 The operation performs inserting or changing data with lock-free manner.
1473 If the item \p val is not found in the set, then \p val is inserted into the set
1474 iff \p bInsert is \p true.
1475 Otherwise, the functor \p func is called with item found.
1476 The functor signature is:
1478 void func( bool bNew, value_type& item, value_type& val );
1481 - \p bNew - \p true if the item has been inserted, \p false otherwise
1482 - \p item - item of the set
1483 - \p val - argument \p val passed into the \p %update() function
1484 If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
1485 refer to the same thing.
1487 The functor can change non-key fields of the \p item; however, \p func must guarantee
1488 that during changing no any other modifications could be made on this item by concurrent threads.
1490 RCU \p synchronize method can be called. RCU should not be locked.
1492 Returns std::pair<bool, bool> where \p first is \p true if operation is successfull,
1493 i.e. the node has been inserted or updated,
1494 \p second is \p true if new item has been added or \p false if the item with \p key
1497 @warning See \ref cds_intrusive_item_creating "insert item troubleshooting"
1499 template <typename Func>
1500 std::pair<bool, bool> update( value_type& val, Func func, bool bInsert = true )
1502 check_deadlock_policy::check();
1505 std::pair<bool, bool> bRet( true, false );
1508 node_type * pNode = node_traits::to_node_ptr( val );
1509 scoped_node_ptr scp( pNode );
1510 unsigned int nHeight = pNode->height();
1511 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
1512 bool bTowerMade = false;
1517 bool bFound = find_position( val, pos, key_comparator(), true );
1519 // scoped_node_ptr deletes the node tower if we create it before
1523 func( false, *node_traits::to_value_ptr(pos.pCur), val );
1524 m_Stat.onUpdateExist();
1535 build_node( pNode );
1536 nHeight = pNode->height();
1541 if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { func( true, item, item ); })) {
1542 m_Stat.onInsertRetry();
1546 increase_height( nHeight );
1549 m_Stat.onAddNode( nHeight );
1550 m_Stat.onUpdateNew();
1559 template <typename Func>
1560 CDS_DEPRECATED("ensure() is deprecated, use update()")
1561 std::pair<bool, bool> ensure( value_type& val, Func func )
1563 return update( val, func, true );
1567 /// Unlinks the item \p val from the set
1569 The function searches the item \p val in the set and unlink it from the set
1570 if it is found and is equal to \p val.
1572 Difference between \p erase() and \p %unlink() functions: \p erase() finds <i>a key</i>
1573 and deletes the item found. \p %unlink() searches an item by key and deletes it
1574 only if \p val is an item of that set, i.e. the pointer to item found
1575 is equal to <tt> &val </tt>.
1577 RCU \p synchronize method can be called. RCU should not be locked.
1579 The \ref disposer specified in \p Traits class template parameter is called
1580 by garbage collector \p GC asynchronously.
1582 The function returns \p true if success and \p false otherwise.
1584 bool unlink( value_type& val )
1586 check_deadlock_policy::check();
1594 if ( !find_position( val, pos, key_comparator(), false ) ) {
1595 m_Stat.onUnlinkFailed();
1599 node_type * pDel = pos.pCur;
1600 assert( key_comparator()( *node_traits::to_value_ptr( pDel ), val ) == 0 );
1602 unsigned int nHeight = pDel->height();
1604 if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos, [](value_type const&) {}, false )) {
1606 m_Stat.onRemoveNode( nHeight );
1607 m_Stat.onUnlinkSuccess();
1611 m_Stat.onUnlinkFailed();
1620 /// Extracts the item from the set with specified \p key
1621 /** \anchor cds_intrusive_SkipListSet_rcu_extract
1622 The function searches an item with key equal to \p key in the set,
1623 unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
1624 If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr.
1626 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1628 RCU \p synchronize method can be called. RCU should NOT be locked.
1629 The function does not call the disposer for the item found.
1630 The disposer will be implicitly invoked when the returned object is destroyed or when
1631 its \p release() member function is called.
1634 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1638 typename skip_list::exempt_ptr ep( theList.extract( 5 ));
1643 // Dispose returned item.
1648 template <typename Q>
1649 exempt_ptr extract( Q const& key )
1651 return exempt_ptr( do_extract( key ));
1654 /// Extracts the item from the set with comparing functor \p pred
1656 The function is an analog of \p extract(Q const&) but \p pred predicate is used for key comparing.
1657 \p Less has the interface like \p std::less.
1658 \p pred must imply the same element order as the comparator used for building the set.
1660 template <typename Q, typename Less>
1661 exempt_ptr extract_with( Q const& key, Less pred )
1663 return exempt_ptr( do_extract_with( key, pred ));
1666 /// Extracts an item with minimal key from the list
1668 The function searches an item with minimal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item.
1669 If the skip-list is empty the function returns an empty \p exempt_ptr.
1671 RCU \p synchronize method can be called. RCU should NOT be locked.
1672 The function does not call the disposer for the item found.
1673 The disposer will be implicitly invoked when the returned object is destroyed or when
1674 its \p release() member function is manually called.
1677 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1681 typename skip_list::exempt_ptr ep(theList.extract_min());
1686 // Dispose returned item.
1691 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> minimum key.
1692 It means that the function gets leftmost item and tries to unlink it.
1693 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
1694 So, the function returns the item with minimum key at the moment of list traversing.
1696 exempt_ptr extract_min()
1698 return exempt_ptr( do_extract_min());
1701 /// Extracts an item with maximal key from the list
1703 The function searches an item with maximal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item.
1704 If the skip-list is empty the function returns an empty \p exempt_ptr.
1706 RCU \p synchronize method can be called. RCU should NOT be locked.
1707 The function does not call the disposer for the item found.
1708 The disposer will be implicitly invoked when the returned object is destroyed or when
1709 its \p release() member function is manually called.
1712 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1716 typename skip_list::exempt_ptr ep( theList.extract_max() );
1720 // Dispose returned item.
1725 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> maximal key.
1726 It means that the function gets rightmost item and tries to unlink it.
1727 During unlinking, a concurrent thread can insert an item with key greater than rightmost item's key.
1728 So, the function returns the item with maximum key at the moment of list traversing.
1730 exempt_ptr extract_max()
1732 return exempt_ptr( do_extract_max());
1735 /// Deletes the item from the set
1736 /** \anchor cds_intrusive_SkipListSet_rcu_erase
1737 The function searches an item with key equal to \p key in the set,
1738 unlinks it from the set, and returns \p true.
1739 If the item with key equal to \p key is not found the function return \p false.
1741 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1743 RCU \p synchronize method can be called. RCU should not be locked.
1745 template <typename Q>
1746 bool erase( const Q& key )
1748 return do_erase( key, key_comparator(), [](value_type const&) {} );
1751 /// Delete the item from the set with comparing functor \p pred
1753 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase "erase(Q const&)"
1754 but \p pred predicate is used for key comparing.
1755 \p Less has the interface like \p std::less.
1756 \p pred must imply the same element order as the comparator used for building the set.
1758 template <typename Q, typename Less>
1759 bool erase_with( const Q& key, Less pred )
1762 return do_erase( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type const&) {} );
1765 /// Deletes the item from the set
1766 /** \anchor cds_intrusive_SkipListSet_rcu_erase_func
1767 The function searches an item with key equal to \p key in the set,
1768 call \p f functor with item found, unlinks it from the set, and returns \p true.
1769 The \ref disposer specified in \p Traits class template parameter is called
1770 by garbage collector \p GC asynchronously.
1772 The \p Func interface is
1775 void operator()( value_type const& item );
1778 If the item with key equal to \p key is not found the function return \p false.
1780 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1782 RCU \p synchronize method can be called. RCU should not be locked.
1784 template <typename Q, typename Func>
1785 bool erase( Q const& key, Func f )
1787 return do_erase( key, key_comparator(), f );
1790 /// Delete the item from the set with comparing functor \p pred
1792 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase_func "erase(Q const&, Func)"
1793 but \p pred predicate is used for key comparing.
1794 \p Less has the interface like \p std::less.
1795 \p pred must imply the same element order as the comparator used for building the set.
1797 template <typename Q, typename Less, typename Func>
1798 bool erase_with( Q const& key, Less pred, Func f )
1801 return do_erase( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1805 /** @anchor cds_intrusive_SkipListSet_rcu_find_func
1806 The function searches the item with key equal to \p key and calls the functor \p f for item found.
1807 The interface of \p Func functor is:
1810 void operator()( value_type& item, Q& key );
1813 where \p item is the item found, \p key is the <tt>find</tt> function argument.
1815 The functor can change non-key fields of \p item. Note that the functor is only guarantee
1816 that \p item cannot be disposed during functor is executing.
1817 The functor does not serialize simultaneous access to the set \p item. If such access is
1818 possible you must provide your own synchronization schema on item level to exclude unsafe item modifications.
1820 The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor
1821 can modify both arguments.
1823 The function applies RCU lock internally.
1825 The function returns \p true if \p key is found, \p false otherwise.
1827 template <typename Q, typename Func>
1828 bool find( Q& key, Func f )
1830 return do_find_with( key, key_comparator(), f );
1833 template <typename Q, typename Func>
1834 bool find( Q const& key, Func f )
1836 return do_find_with( key, key_comparator(), f );
1840 /// Finds the key \p key with comparing functor \p pred
1842 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_func "find(Q&, Func)"
1843 but \p cmp is used for key comparison.
1844 \p Less functor has the interface like \p std::less.
1845 \p cmp must imply the same element order as the comparator used for building the set.
1847 template <typename Q, typename Less, typename Func>
1848 bool find_with( Q& key, Less pred, Func f )
1851 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1854 template <typename Q, typename Less, typename Func>
1855 bool find_with( Q const& key, Less pred, Func f )
1858 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1862 /// Checks whether the set contains \p key
1864 The function searches the item with key equal to \p key
1865 and returns \p true if it is found, and \p false otherwise.
1867 The function applies RCU lock internally.
1869 template <typename Q>
1870 bool contains( Q const& key )
1872 return do_find_with( key, key_comparator(), [](value_type& , Q const& ) {} );
1875 template <typename Q>
1876 CDS_DEPRECATED("deprecated, use contains()")
1877 bool find( Q const& key )
1879 return contains( key );
1883 /// Checks whether the set contains \p key using \p pred predicate for searching
1885 The function is similar to <tt>contains( key )</tt> but \p pred is used for key comparing.
1886 \p Less functor has the interface like \p std::less.
1887 \p Less must imply the same element order as the comparator used for building the set.
1889 template <typename Q, typename Less>
1890 bool contains( Q const& key, Less pred )
1893 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type& , Q const& ) {} );
1896 template <typename Q, typename Less>
1897 CDS_DEPRECATED("deprecated, use contains()")
1898 bool find_with( Q const& key, Less pred )
1900 return contains( key, pred );
1904 /// Finds \p key and return the item found
1905 /** \anchor cds_intrusive_SkipListSet_rcu_get
1906 The function searches the item with key equal to \p key and returns a \p raw_ptr object pointed to item found.
1907 If \p key is not found it returns empty \p raw_ptr.
1909 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1911 RCU should be locked before call of this function.
1912 Returned item is valid only while RCU is locked:
1914 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1917 typename skip_list::raw_ptr pVal;
1920 skip_list::rcu_lock lock;
1922 pVal = theList.get( 5 );
1928 // You can manually release pVal after RCU-locked section
1932 template <typename Q>
1933 raw_ptr get( Q const& key )
1935 assert( gc::is_locked());
1938 value_type * pFound;
1939 if ( do_find_with( key, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos ))
1940 return raw_ptr( pFound, raw_ptr_disposer( pos ));
1941 return raw_ptr( raw_ptr_disposer( pos ));
1944 /// Finds \p key and return the item found
1946 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_get "get(Q const&)"
1947 but \p pred is used for comparing the keys.
1949 \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q
1951 \p pred must imply the same element order as the comparator used for building the set.
1953 template <typename Q, typename Less>
1954 raw_ptr get_with( Q const& key, Less pred )
1957 assert( gc::is_locked());
1959 value_type * pFound = nullptr;
1961 if ( do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(),
1962 [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos ))
1964 return raw_ptr( pFound, raw_ptr_disposer( pos ));
1966 return raw_ptr( raw_ptr_disposer( pos ));
1969 /// Returns item count in the set
1971 The value returned depends on item counter type provided by \p Traits template parameter.
1972 For \p atomicity::empty_item_counter the function always returns 0.
1973 Therefore, the function is not suitable for checking the set emptiness, use \p empty()
1974 member function for this purpose.
1978 return m_ItemCounter;
1981 /// Checks if the set is empty
1984 return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr;
1987 /// Clears the set (not atomic)
1989 The function unlink all items from the set.
1990 The function is not atomic, thus, in multi-threaded environment with parallel insertions
1994 assert( set.empty() );
1996 the assertion could be raised.
1998 For each item the \p disposer will be called automatically after unlinking.
2003 while ( (ep = extract_min()) );
2006 /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32.
2007 static CDS_CONSTEXPR unsigned int max_height() CDS_NOEXCEPT
2009 return c_nMaxHeight;
2012 /// Returns const reference to internal statistics
2013 stat const& statistics() const
2019 }} // namespace cds::intrusive
2022 #endif // #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H